diff --git a/.internal-ci/helm/fog-services/templates/supervisord-fog-view-configmap.yaml b/.internal-ci/helm/fog-services/templates/supervisord-fog-view-configmap.yaml index b9b87a0fe5..369e1caf34 100644 --- a/.internal-ci/helm/fog-services/templates/supervisord-fog-view-configmap.yaml +++ b/.internal-ci/helm/fog-services/templates/supervisord-fog-view-configmap.yaml @@ -11,7 +11,7 @@ data: priority=100 environment=MC_SENTRY_DSN="%(ENV_FOG_VIEW_SENTRY_DSN)s" command=/usr/bin/fog_view_server - --client-listen-uri insecure-fog-view://0.0.0.0:3225/ + --client-listen-uri insecure-fog-view-store://0.0.0.0:3225/ --client-responder-id "%(ENV_CLIENT_RESPONDER_ID)s" {{- if (include "fogServices.clientAuth" .) }} --client-auth-token-secret "%(ENV_CLIENT_AUTH_TOKEN_SECRET)s" diff --git a/Cargo.lock b/Cargo.lock index c1de39ea94..4d1ce98e3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2303,9 +2303,10 @@ dependencies = [ "mc-util-test-helper", "mc-util-test-vector", "mc-util-test-with-data", + "mc-util-uri", "mc-util-zip-exact", "mc-watcher-api", - "pem", + "pem 2.0.0", "prost", "protobuf", "rand", @@ -2379,7 +2380,7 @@ dependencies = [ "mc-util-encodings", "mc-util-repr-bytes", "mc-util-serial", - "pem", + "pem 2.0.0", "prost", "rand", "rand_core", @@ -2400,6 +2401,7 @@ dependencies = [ "mc-attest-verifier", "mc-crypto-noise", "mc-sgx-compat", + "mc-util-serial", "serde", ] @@ -2415,7 +2417,7 @@ dependencies = [ "mc-common", "mc-sgx-build", "mc-util-encodings", - "pem", + "pem 2.0.0", "percent-encoding", "rand", "reqwest", @@ -2551,7 +2553,7 @@ dependencies = [ "mc-crypto-keys", "mc-util-from-random", "mc-util-test-helper", - "pem", + "pem 2.0.0", "serde", "serde_json", "tempfile", @@ -2757,7 +2759,7 @@ dependencies = [ "mc-util-from-random", "mc-util-serial", "once_cell", - "pem", + "pem 2.0.0", "prost", "rand", "rand_core", @@ -2829,7 +2831,7 @@ dependencies = [ "mc-util-grpc", "mc-util-parse", "mc-util-uri", - "pem", + "pem 2.0.0", "protobuf", "rand", "serde", @@ -2987,7 +2989,7 @@ dependencies = [ "mc-util-parse", "mc-util-serial", "mc-util-uri", - "pem", + "pem 2.0.0", "serde", "serde_json", "serde_with", @@ -3066,6 +3068,7 @@ dependencies = [ "mc-sgx-build", "mc-sgx-compat", "mc-util-from-random", + "mc-util-serial", "sha2 0.10.6", ] @@ -3191,9 +3194,9 @@ dependencies = [ "mc-util-repr-bytes", "mc-util-serial", "mc-util-test-helper", - "pem", - "rand_core", - "rand_hc", + "pem 2.0.0", + "rand_core 0.6.4", + "rand_hc 0.3.1", "schnorrkel-og", "semver", "serde", @@ -3349,7 +3352,7 @@ dependencies = [ "clap 4.1.11", "mc-crypto-keys", "mc-util-build-script", - "pem", + "pem 2.0.0", "x509-signature", ] @@ -3360,7 +3363,7 @@ dependencies = [ "displaydoc", "mc-crypto-keys", "mc-crypto-x509-test-vectors", - "pem", + "pem 2.0.0", "x509-signature", ] @@ -3386,6 +3389,7 @@ dependencies = [ "mc-api", "mc-attest-api", "mc-attest-core", + "mc-attest-enclave-api", "mc-consensus-api", "mc-crypto-keys", "mc-fog-enclave-connection", @@ -4140,7 +4144,7 @@ dependencies = [ "mc-util-metrics", "mc-util-parse", "mc-util-uri", - "pem", + "pem 2.0.0", "prost", "rand", "serde", @@ -4247,9 +4251,9 @@ dependencies = [ "mc-fog-sig-authority", "mc-fog-sig-report", "mc-util-from-random", - "pem", - "rand_core", - "rand_hc", + "pem 2.0.0", + "rand_core 0.6.4", + "rand_hc 0.3.1", "signature", "x509-signature", ] @@ -4304,7 +4308,7 @@ dependencies = [ "mc-util-parse", "mc-util-repr-bytes", "mc-util-test-helper", - "pem", + "pem 2.0.0", "prost", "r2d2", "rand", @@ -4400,6 +4404,8 @@ dependencies = [ "crc", "displaydoc", "hex", + "mc-attest-enclave-api", + "mc-common", "mc-crypto-keys", "mc-fog-kex-rng", "mc-test-vectors-tx-out-records", @@ -4411,6 +4417,7 @@ dependencies = [ "mc-watcher-api", "prost", "serde", + "yare", ] [[package]] @@ -4425,11 +4432,17 @@ dependencies = [ name = "mc-fog-view-connection" version = "4.1.0-pre0" dependencies = [ + "aes-gcm", + "futures", "grpcio", + "mc-attest-ake", + "mc-attest-api", "mc-attest-core", "mc-attest-verifier", "mc-common", "mc-crypto-keys", + "mc-crypto-noise", + "mc-crypto-rand", "mc-fog-api", "mc-fog-enclave-connection", "mc-fog-types", @@ -4438,7 +4451,10 @@ dependencies = [ "mc-util-grpc", "mc-util-serial", "mc-util-telemetry", + "mc-util-uri", "retry", + "sha2 0.10.6", + "tokio", ] [[package]] @@ -4451,6 +4467,7 @@ dependencies = [ "mc-attest-enclave-api", "mc-attest-verifier", "mc-common", + "mc-crypto-ake-enclave", "mc-crypto-keys", "mc-enclave-boundary", "mc-fog-ocall-oram-storage-edl", @@ -4483,7 +4500,9 @@ dependencies = [ "mc-attest-core", "mc-attest-enclave-api", "mc-common", + "mc-crypto-ake-enclave", "mc-crypto-keys", + "mc-crypto-noise", "mc-fog-recovery-db-iface", "mc-fog-types", "mc-sgx-compat", @@ -4505,7 +4524,10 @@ dependencies = [ name = "mc-fog-view-enclave-impl" version = "4.1.0-pre0" dependencies = [ + "aes-gcm", "aligned-cmov", + "itertools", + "mc-attest-ake", "mc-attest-core", "mc-attest-enclave-api", "mc-common", @@ -4521,6 +4543,7 @@ dependencies = [ "mc-sgx-compat", "mc-sgx-report-cache-api", "mc-util-serial", + "static_assertions", ] [[package]] @@ -4586,9 +4609,13 @@ dependencies = [ "displaydoc", "futures", "grpcio", + "hex", + "itertools", "lazy_static", + "mc-api", "mc-attest-api", "mc-attest-core", + "mc-attest-enclave-api", "mc-attest-net", "mc-attest-verifier", "mc-blockchain-types", @@ -4607,6 +4634,7 @@ dependencies = [ "mc-fog-view-enclave-api", "mc-fog-view-enclave-measurement", "mc-fog-view-protocol", + "mc-fog-view-server-test-utils", "mc-sgx-report-cache-untrusted", "mc-transaction-core", "mc-util-cli", @@ -4620,12 +4648,41 @@ dependencies = [ "mc-util-telemetry", "mc-util-test-helper", "mc-util-uri", - "pem", + "pem 1.1.1", "portpicker", "rand", "rand_core", "serde", "serde_json", + "tempdir", + "yare", +] + +[[package]] +name = "mc-fog-view-server-test-utils" +version = "2.0.0" +dependencies = [ + "grpcio", + "mc-attest-net", + "mc-attest-verifier", + "mc-blockchain-types", + "mc-common", + "mc-fog-api", + "mc-fog-recovery-db-iface", + "mc-fog-sql-recovery-db", + "mc-fog-test-infra", + "mc-fog-types", + "mc-fog-uri", + "mc-fog-view-connection", + "mc-fog-view-enclave", + "mc-fog-view-enclave-measurement", + "mc-fog-view-protocol", + "mc-fog-view-server", + "mc-transaction-core", + "mc-util-from-random", + "mc-util-grpc", + "mc-util-uri", + "portpicker", ] [[package]] @@ -4793,7 +4850,7 @@ dependencies = [ "mc-watcher", "more-asserts", "num_cpus", - "pem", + "pem 2.0.0", "portpicker", "prost", "protobuf", @@ -5565,7 +5622,7 @@ dependencies = [ "mc-util-parse", "mc-util-serial", "mc-util-test-helper", - "pem", + "pem 2.0.0", "prost", "rand", "rand_core", @@ -5645,9 +5702,9 @@ dependencies = [ "mc-crypto-keys", "mc-util-from-random", "mc-util-parse", - "pem", - "rand", - "rand_hc", + "pem 2.0.0", + "rand 0.8.5", + "rand_hc 0.3.1", ] [[package]] @@ -6295,6 +6352,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +[[package]] +name = "pem" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8835c273a76a90455d7344889b0964598e3316e2a79ede8e36f16bdcf2228b8" +dependencies = [ + "base64 0.13.1", +] + [[package]] name = "pem" version = "2.0.0" @@ -8689,6 +8755,26 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" +[[package]] +name = "yare" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df119f412cd2150ab3e88ebee015a615b00140ff81acd07746262df4a4678c47" +dependencies = [ + "yare-macro", +] + +[[package]] +name = "yare-macro" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9467ac0aa6668b663fa80addde47e40ac364dc9d9a243b4df26ba647e4605633" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "yasna" version = "0.5.1" diff --git a/Cargo.toml b/Cargo.toml index 89bf4b9fe6..11d8067d8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -106,6 +106,7 @@ members = [ "fog/view/load-test", "fog/view/protocol", "fog/view/server", + "fog/view/server/test-utils", "go-grpc-gateway/testing", "ledger/db", "ledger/distribution", diff --git a/README.md b/README.md index f5869cd402..ce4b0d5922 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,4 @@ +spurious! ![](./img/mobilecoin_logo.png) ### Testing your first payment diff --git a/api/Cargo.toml b/api/Cargo.toml index 2bd670b868..71ca381cc2 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -22,6 +22,7 @@ mc-transaction-core = { path = "../transaction/core" } mc-transaction-extra = { path = "../transaction/extra" } mc-util-repr-bytes = { path = "../util/repr-bytes" } mc-util-serial = { path = "../util/serial" } +mc-util-uri = { path = "../util/uri" } mc-watcher-api = { path = "../watcher/api" } bs58 = "0.4.0" diff --git a/api/src/convert/error.rs b/api/src/convert/error.rs index 5ed4896761..bdab713b6d 100644 --- a/api/src/convert/error.rs +++ b/api/src/convert/error.rs @@ -3,6 +3,7 @@ use mc_blockchain_types::{BlockVersionError, ConvertError}; use mc_crypto_keys::{KeyError, SignatureError}; use mc_transaction_core::ring_signature::Error as RingSigError; +use mc_util_uri::{UriConversionError, UriParseError}; use std::{ array::TryFromSliceError, convert::Infallible, @@ -22,6 +23,8 @@ pub enum ConversionError { KeyCastError, MissingField(String), NarrowingCastError, + UriParse(UriParseError), + UriConversion(UriConversionError), ObjectMissing, Other, } @@ -80,3 +83,15 @@ impl fmt::Display for ConversionError { write!(f, "ConversionError") } } + +impl From for ConversionError { + fn from(error: UriParseError) -> Self { + Self::UriParse(error) + } +} + +impl From for ConversionError { + fn from(error: UriConversionError) -> Self { + Self::UriConversion(error) + } +} diff --git a/attest/ake/src/event.rs b/attest/ake/src/event.rs index 39987b04c8..1651b00297 100644 --- a/attest/ake/src/event.rs +++ b/attest/ake/src/event.rs @@ -351,3 +351,50 @@ impl MealyInput for Ciphertext<'_, '_> {} /// Our outputs may be simple vectors for the proto-inside-grpc use case. impl MealyOutput for Vec {} + +/// A type similar to [`aead::Payload`] used to distinguish writer inputs from +/// outputs when there's an explicit nonce. +pub struct NoncePlaintext<'aad, 'msg>(Plaintext<'aad, 'msg>); + +impl<'aad, 'msg> NoncePlaintext<'aad, 'msg> { + /// Create a new NoncePlaintext object from the given slices. + pub fn new(aad: &'aad [u8], msg: &'msg [u8]) -> Self { + Self(Plaintext::new(aad, msg)) + } + + /// Grab a reference to the internal `aad` slice. + pub fn aad(&self) -> &[u8] { + self.0.aad + } + + /// Grab a reference to the internal `msg` slice. + pub fn msg(&self) -> &[u8] { + self.0.msg + } +} + +/// Plaintext may be provided to an FST for encryption into a vector +impl MealyInput for NoncePlaintext<'_, '_> {} + +/// A tuple of bytes and a u64 can be output from an FST for the +/// encrypt-for-explicit nonce case. +impl MealyOutput for (Vec, u64) {} + +/// A type similar to [`aead::Payload`] used to distinguish reader inputs from +/// outputs when there's an explicit nonce. +pub struct NonceCiphertext<'aad, 'msg> { + pub ciphertext: Ciphertext<'aad, 'msg>, + pub nonce: u64, +} + +impl<'aad, 'msg> NonceCiphertext<'aad, 'msg> { + pub fn new(aad: &'aad [u8], msg: &'msg [u8], nonce: u64) -> Self { + Self { + ciphertext: Ciphertext::new(aad, msg), + nonce, + } + } +} + +/// Plaintext may be provided to an FST for encryption into a vector +impl MealyInput for NonceCiphertext<'_, '_> {} diff --git a/attest/ake/src/shared.rs b/attest/ake/src/shared.rs index 37676bae6d..55aa65f0ec 100644 --- a/attest/ake/src/shared.rs +++ b/attest/ake/src/shared.rs @@ -3,7 +3,7 @@ //! Common transitions between initiator and responder. use crate::{ - event::{Ciphertext, Plaintext}, + event::{Ciphertext, NonceCiphertext, NoncePlaintext, Plaintext}, mealy::Transition, state::Ready, }; @@ -46,3 +46,40 @@ where Ok((retval, ciphertext)) } } + +/// Ready + NonceCiphertext => Ready + Vec +impl Transition, NonceCiphertext<'_, '_>, Vec> for Ready +where + Cipher: NoiseCipher, +{ + type Error = CipherError; + + fn try_next( + self, + _csprng: &mut R, + input: NonceCiphertext<'_, '_>, + ) -> Result<(Ready, Vec), Self::Error> { + let mut retval = self; + let plaintext = + retval.decrypt_with_nonce(input.ciphertext.aad, input.ciphertext.msg, input.nonce)?; + Ok((retval, plaintext)) + } +} + +/// Ready + NoncePlaintext => Ready + (Vec, u64) +impl Transition, NoncePlaintext<'_, '_>, (Vec, u64)> for Ready +where + Cipher: NoiseCipher, +{ + type Error = CipherError; + + fn try_next( + self, + _csprng: &mut R, + input: NoncePlaintext<'_, '_>, + ) -> Result<(Ready, (Vec, u64)), Self::Error> { + let mut retval = self; + let output = retval.encrypt_with_nonce(input.aad(), input.msg())?; + Ok((retval, output)) + } +} diff --git a/attest/ake/src/state.rs b/attest/ake/src/state.rs index 370afadfde..0178fda804 100644 --- a/attest/ake/src/state.rs +++ b/attest/ake/src/state.rs @@ -74,6 +74,7 @@ where pub fn binding(&self) -> &[u8] { self.binding.as_ref() } + /// Using the writer cipher, encrypt the given plaintext. pub fn encrypt(&mut self, aad: &[u8], plaintext: &[u8]) -> Result, CipherError> { self.writer.encrypt_with_ad(aad, plaintext) @@ -83,6 +84,29 @@ where pub fn decrypt(&mut self, aad: &[u8], ciphertext: &[u8]) -> Result, CipherError> { self.reader.decrypt_with_ad(aad, ciphertext) } + + /// Using the writer cipher, encrypt the given plaintext and return the + /// nonce. + pub fn encrypt_with_nonce( + &mut self, + aad: &[u8], + plaintext: &[u8], + ) -> Result<(Vec, u64), CipherError> { + let nonce = self.writer.next_nonce(); + let ciphertext = self.encrypt(aad, plaintext)?; + Ok((ciphertext, nonce)) + } + + /// Using the reader cipher, decrypt the provided ciphertext for the nonce. + pub fn decrypt_with_nonce( + &mut self, + aad: &[u8], + ciphertext: &[u8], + nonce: u64, + ) -> Result, CipherError> { + self.reader.set_nonce(nonce); + self.decrypt(aad, ciphertext) + } } impl State for Ready where Cipher: NoiseCipher {} diff --git a/attest/api/proto/attest.proto b/attest/api/proto/attest.proto index 799f91c3fb..1461774086 100644 --- a/attest/api/proto/attest.proto +++ b/attest/api/proto/attest.proto @@ -48,3 +48,20 @@ message Message { /// for use in the enclave. bytes data = 3; } + +/// An AEAD message with an explicit nonce. +/// +/// This message is technically compatible with [`Message`], but exists to +// ensure generated code doesn't use Message. +message NonceMessage { + /// A byte array containing plaintext authenticated data. + bytes aad = 1; + /// An byte array containing the channel ID this message is + /// associated with. A zero-length channel ID is not valid. + bytes channel_id = 2; + /// A potentially encrypted bytestream containing opaque data intended + /// for use in the enclave. + bytes data = 3; + /// The explicit nonce. + fixed64 nonce = 4; +} diff --git a/attest/api/src/conversions.rs b/attest/api/src/conversions.rs index 2467b959c1..7763740c9d 100644 --- a/attest/api/src/conversions.rs +++ b/attest/api/src/conversions.rs @@ -2,11 +2,11 @@ //! Conversions from gRPC message types into consensus_enclave_api types. -use crate::attest::{AuthMessage, Message}; +use crate::attest::{AuthMessage, Message, NonceMessage}; use mc_attest_ake::{AuthRequestOutput, AuthResponseOutput}; use mc_attest_enclave_api::{ - ClientAuthRequest, ClientAuthResponse, EnclaveMessage, PeerAuthRequest, PeerAuthResponse, - Session, + ClientAuthRequest, ClientAuthResponse, EnclaveMessage, NonceAuthRequest, NonceAuthResponse, + NonceSession, PeerAuthRequest, PeerAuthResponse, Session, }; use mc_crypto_keys::Kex; use mc_crypto_noise::{HandshakePattern, NoiseCipher, NoiseDigest}; @@ -61,6 +61,34 @@ impl From for AuthMessage { } } +impl From for NonceAuthRequest { + fn from(src: AuthMessage) -> NonceAuthRequest { + src.data.into() + } +} + +impl From for AuthMessage { + fn from(src: NonceAuthRequest) -> AuthMessage { + let mut retval = AuthMessage::default(); + retval.set_data(src.into()); + retval + } +} + +impl From for NonceAuthResponse { + fn from(src: AuthMessage) -> NonceAuthResponse { + src.data.into() + } +} + +impl From for AuthMessage { + fn from(src: NonceAuthResponse) -> AuthMessage { + let mut retval = AuthMessage::default(); + retval.set_data(src.into()); + retval + } +} + impl From for PeerAuthResponse { fn from(src: AuthMessage) -> PeerAuthResponse { src.data.into() @@ -103,7 +131,31 @@ impl From> for Message { fn from(src: EnclaveMessage) -> Message { let mut retval = Message::default(); retval.set_aad(src.aad); - retval.set_channel_id(src.channel_id.clone().into()); + retval.set_channel_id(src.channel_id.into()); + retval.set_data(src.data); + retval + } +} + +impl From for EnclaveMessage { + fn from(src: NonceMessage) -> Self { + let channel_id = NonceSession::new(src.channel_id, src.nonce); + Self { + aad: src.aad, + channel_id, + data: src.data, + } + } +} + +impl From> for NonceMessage { + fn from(src: EnclaveMessage) -> NonceMessage { + let mut retval = NonceMessage::default(); + retval.set_aad(src.aad); + // it doesn't matter if we don't bump the nonce when retrieving it, + // src.channel_id will be discarded anyways. + retval.set_nonce(src.channel_id.nonce()); + retval.set_channel_id(src.channel_id.into()); retval.set_data(src.data); retval } diff --git a/attest/enclave-api/Cargo.toml b/attest/enclave-api/Cargo.toml index c4de73f7a4..099d191e20 100644 --- a/attest/enclave-api/Cargo.toml +++ b/attest/enclave-api/Cargo.toml @@ -16,6 +16,7 @@ mc-attest-core = { path = "../../attest/core", default-features = false } mc-attest-verifier = { path = "../../attest/verifier", default-features = false } mc-crypto-noise = { path = "../../crypto/noise", default-features = false } mc-sgx-compat = { path = "../../sgx/compat" } +mc-util-serial = { path = "../../util/serial" } displaydoc = { version = "0.2", default-features = false } serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } diff --git a/attest/enclave-api/src/error.rs b/attest/enclave-api/src/error.rs index 8128ffa5fd..f7d49edc56 100644 --- a/attest/enclave-api/src/error.rs +++ b/attest/enclave-api/src/error.rs @@ -2,10 +2,11 @@ //! Enclave API Errors +use alloc::string::{String, ToString}; use core::result::Result as StdResult; use displaydoc::Display; use mc_attest_ake::Error as AkeError; -use mc_attest_core::{NonceError, QuoteError, SgxError}; +use mc_attest_core::{IntelSealingError, NonceError, ParseSealedError, QuoteError, SgxError}; use mc_attest_verifier::Error as VerifierError; use mc_crypto_noise::CipherError; use mc_sgx_compat::sync::PoisonError; @@ -50,6 +51,12 @@ pub enum Error { /// Another thread crashed while holding a lock Poison, + /// An error occurred during a sealing operation + Seal(IntelSealingError), + + /// An error occurred during an unsealing operation + Unseal(ParseSealedError), + /** * Invalid state for call * @@ -64,6 +71,12 @@ pub enum Error { /// Too many IAS reports are already in-flight TooManyPendingReports, + /// Encoding error + Encode(String), + + /// Decoding error + Decode(String), + /// Connection not found by node ID or session NotFound, } @@ -109,3 +122,27 @@ impl From for Error { Error::Verify(src) } } + +impl From for Error { + fn from(src: IntelSealingError) -> Error { + Error::Seal(src) + } +} + +impl From for Error { + fn from(src: ParseSealedError) -> Error { + Error::Unseal(src) + } +} + +impl From for Error { + fn from(src: mc_util_serial::encode::Error) -> Self { + Error::Encode(src.to_string()) + } +} + +impl From for Error { + fn from(src: mc_util_serial::decode::Error) -> Self { + Error::Decode(src.to_string()) + } +} diff --git a/attest/enclave-api/src/lib.rs b/attest/enclave-api/src/lib.rs index 848fe9d327..af72b58dd5 100644 --- a/attest/enclave-api/src/lib.rs +++ b/attest/enclave-api/src/lib.rs @@ -12,77 +12,61 @@ mod error; pub use error::{Error, Result}; use alloc::vec::Vec; -use core::hash::Hash; -use mc_attest_core::{QuoteNonce, Report}; +use core::hash::{Hash, Hasher}; +use mc_attest_core::{IntelSealed, QuoteNonce, Report}; use serde::{Deserialize, Serialize}; -/// The raw authentication request message, sent from an initiator to a -/// responder -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] -pub struct ClientAuthRequest(Vec); +macro_rules! impl_newtype_vec_inout { + ($($newtype:ident;)*) => {$( + impl From> for $newtype { + fn from(src: alloc::vec::Vec) -> $newtype { + $newtype(src) + } + } -impl From> for ClientAuthRequest { - fn from(src: Vec) -> Self { - Self(src) - } + impl From<$newtype> for alloc::vec::Vec { + fn from(src: $newtype) -> alloc::vec::Vec { + src.0 + } + } + )*} } -impl From for Vec { - fn from(src: ClientAuthRequest) -> Vec { - src.0 - } +impl_newtype_vec_inout! { + ClientAuthRequest; ClientAuthResponse; ClientSession; + PeerAuthRequest; PeerAuthResponse; PeerSession; + NonceAuthRequest; NonceAuthResponse; } +/// The raw authentication request message, sent from an initiator to a +/// responder +#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct ClientAuthRequest(Vec); + /// The raw authentication response message, sent from a responder to an /// initiator. #[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] pub struct ClientAuthResponse(Vec); -impl From for Vec { - fn from(src: ClientAuthResponse) -> Vec { - src.0 - } -} - -impl From> for ClientAuthResponse { - fn from(src: Vec) -> Self { - Self(src) - } -} - /// The raw authentication request message, sent from an initiator to a /// responder #[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] pub struct PeerAuthRequest(Vec); -impl From> for PeerAuthRequest { - fn from(src: Vec) -> Self { - Self(src) - } -} - -impl From for Vec { - fn from(src: PeerAuthRequest) -> Vec { - src.0 - } -} - /// The raw authentication response message, sent from a responder to an /// initiator. #[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] pub struct PeerAuthResponse(Vec); -impl From for Vec { - fn from(src: PeerAuthResponse) -> Vec { - src.0 - } -} +/// The raw authentication request message, sent from an initiator to a +/// responder. +#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct NonceAuthRequest(Vec); -impl From> for PeerAuthResponse { - fn from(src: Vec) -> Self { - Self(src) - } -} +/// The raw authentication response message, sent from a responder to an +/// initiator. +#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct NonceAuthResponse(Vec); /// Inbound and outbound messages to/from an enclave. #[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] @@ -95,6 +79,27 @@ pub struct EnclaveMessage { pub data: Vec, } +/// An EnclaveMessage sealed for the current enclave +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct SealedClientMessage { + pub aad: Vec, + pub channel_id: ClientSession, + pub data: IntelSealed, +} + +/// Helper struct that is used in the enclave during the Intel +/// sealing process. Ensures that the data being sealed is not empty. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +pub struct PlaintextClientRequest { + /// The decrypted client request bytes + pub client_request_bytes: Vec, + + /// The channel_id associated with the QueryRequest. Since the channel_id + /// will never be 0, this struct will never serialize into an empty byte + /// array. + pub channel_id: ClientSession, +} + /// The response to a request for a new report. The enclave will expect the /// QuoteNonce to be used when the report is quoted, and both the quote and /// report to be returned to the enclave during the verify_quote() phase. @@ -106,39 +111,35 @@ pub struct NewEReportResponse { /// A helper trait to aid in generic implementation of enclave methods pub trait Session: - Clone + Default + Eq + Hash + for<'bytes> From<&'bytes [u8]> + Into> + Clone + Default + Hash + for<'bytes> From<&'bytes [u8]> + Into> + PartialEq + PartialOrd { type Request: Into>; type Response: From>; } -/// An opaque bytestream used as a client session -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] -pub struct ClientSession(pub Vec); +macro_rules! impl_newtype_asref_and_from_bytes { + ($($newtype:ident;)*) => {$( + impl AsRef<[u8]> for $newtype { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } + } -impl AsRef<[u8]> for ClientSession { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - -impl<'bytes> From<&'bytes [u8]> for ClientSession { - fn from(src: &[u8]) -> ClientSession { - Self(Vec::from(src)) - } + impl<'bytes> From<&'bytes [u8]> for $newtype { + fn from(src: &[u8]) -> $newtype { + Self(alloc::vec::Vec::from(src)) + } + } + )*} } -impl From> for ClientSession { - fn from(src: Vec) -> ClientSession { - ClientSession(src) - } +impl_newtype_asref_and_from_bytes! { + ClientSession; PeerSession; } -impl From for Vec { - fn from(src: ClientSession) -> Vec { - src.0 - } -} +/// An opaque bytestream used as a client session +#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct ClientSession(Vec); impl Session for ClientSession { type Request = ClientAuthRequest; @@ -149,31 +150,75 @@ impl Session for ClientSession { #[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] pub struct PeerSession(Vec); -impl AsRef<[u8]> for PeerSession { +impl Session for PeerSession { + type Request = PeerAuthRequest; + type Response = PeerAuthResponse; +} + +/// An opaque bytestream used as a session ID for a session which uses explicit +/// nonces. +#[derive(Clone, Debug, Default, Eq, Deserialize, PartialOrd, Serialize)] +pub struct NonceSession { + channel_id: Vec, + nonce: u64, +} + +impl NonceSession { + /// Create a new nonce session from a vector and nonce. + /// + /// This takes a pre-created Vec in order to remove an extra allocation that + /// would be required when converting from a NonceMessage to an + /// [`EnclaveMessage`]`<`[`NonceSession`]`>`. + pub fn new(channel_id: Vec, nonce: u64) -> Self { + Self { channel_id, nonce } + } + + /// Retrieves the nonce for this session + pub fn nonce(&self) -> u64 { + self.nonce + } +} + +impl AsRef<[u8]> for NonceSession { fn as_ref(&self) -> &[u8] { - self.0.as_ref() + self.channel_id.as_ref() } } -impl<'bytes> From<&'bytes [u8]> for PeerSession { - fn from(src: &[u8]) -> PeerSession { - Self(Vec::from(src)) +impl<'bytes> From<&'bytes [u8]> for NonceSession { + fn from(src: &'bytes [u8]) -> Self { + Self::from(Vec::from(src)) } } -impl From> for PeerSession { - fn from(src: Vec) -> PeerSession { - PeerSession(src) +impl From> for NonceSession { + fn from(channel_id: Vec) -> Self { + NonceSession { + channel_id, + nonce: 0, + } } } -impl From for Vec { - fn from(src: PeerSession) -> Vec { - src.0 +impl From for Vec { + fn from(src: NonceSession) -> Self { + src.channel_id } } -impl Session for PeerSession { - type Request = PeerAuthRequest; - type Response = PeerAuthResponse; +impl Hash for NonceSession { + fn hash(&self, state: &mut H) { + self.channel_id.hash(state) + } +} + +impl PartialEq for NonceSession { + fn eq(&self, other: &Self) -> bool { + self.channel_id == other.channel_id + } +} + +impl Session for NonceSession { + type Request = NonceAuthRequest; + type Response = NonceAuthResponse; } diff --git a/connection/Cargo.toml b/connection/Cargo.toml index 0c8998692f..19243142fe 100644 --- a/connection/Cargo.toml +++ b/connection/Cargo.toml @@ -8,6 +8,7 @@ readme = "README.md" rust-version = { workspace = true } [dependencies] + mc-attest-ake = { path = "../attest/ake" } mc-attest-api = { path = "../attest/api" } mc-attest-core = { path = "../attest/core" } diff --git a/consensus/enclave/trusted/Cargo.lock b/consensus/enclave/trusted/Cargo.lock index 217862e2d2..c083379fe9 100644 --- a/consensus/enclave/trusted/Cargo.lock +++ b/consensus/enclave/trusted/Cargo.lock @@ -709,6 +709,7 @@ dependencies = [ "mc-attest-verifier", "mc-crypto-noise", "mc-sgx-compat", + "mc-util-serial", "serde", ] @@ -955,6 +956,7 @@ dependencies = [ "mc-sgx-build", "mc-sgx-compat", "mc-util-from-random", + "mc-util-serial", "sha2", ] diff --git a/crypto/ake/enclave/Cargo.toml b/crypto/ake/enclave/Cargo.toml index 38f9ba6f81..bd7c4d81a8 100644 --- a/crypto/ake/enclave/Cargo.toml +++ b/crypto/ake/enclave/Cargo.toml @@ -16,6 +16,7 @@ mc-common = { path = "../../../common", default-features = false } mc-crypto-keys = { path = "../../../crypto/keys", default-features = false } mc-crypto-rand = { path = "../../../crypto/rand", default-features = false } mc-util-from-random = { path = "../../../util/from-random" } +mc-util-serial = { path = "../../../util/serial" } mc-sgx-compat = { path = "../../../sgx/compat", default-features = false } aes-gcm = "0.10.1" diff --git a/crypto/ake/enclave/src/lib.rs b/crypto/ake/enclave/src/lib.rs index 7258bcdc03..e5455f4fd7 100644 --- a/crypto/ake/enclave/src/lib.rs +++ b/crypto/ake/enclave/src/lib.rs @@ -5,21 +5,22 @@ extern crate alloc; use aes_gcm::Aes256Gcm; -use alloc::{string::ToString, vec::Vec}; +use alloc::{borrow::ToOwned, string::ToString, vec::Vec}; use digest::Digest; use mc_attest_ake::{ AuthPending, AuthRequestOutput, AuthResponseInput, AuthResponseOutput, ClientAuthRequestInput, - NodeAuthRequestInput, NodeInitiate, Ready, Start, Transition, + ClientInitiate, NodeAuthRequestInput, NodeInitiate, Ready, Start, Transition, }; use mc_attest_core::{ - IasNonce, Nonce, NonceError, Quote, QuoteNonce, Report, ReportData, TargetInfo, + IasNonce, IntelSealed, Nonce, NonceError, Quote, QuoteNonce, Report, ReportData, TargetInfo, VerificationReport, }; use mc_attest_enclave_api::{ - ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, Error, PeerAuthRequest, - PeerAuthResponse, PeerSession, Result, + ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, Error, NonceAuthRequest, + NonceAuthResponse, NonceSession, PeerAuthRequest, PeerAuthResponse, PeerSession, + PlaintextClientRequest, Result, SealedClientMessage, }; -use mc_attest_trusted::EnclaveReport; +use mc_attest_trusted::{EnclaveReport, SealAlgo}; use mc_attest_verifier::{MrEnclaveVerifier, Verifier, DEBUG_ENCLAVE}; use mc_common::{LruCache, ResponderId}; use mc_crypto_keys::{X25519Private, X25519Public, X25519}; @@ -31,19 +32,29 @@ use sha2::{Sha256, Sha512}; /// Max number of pending quotes. const MAX_PENDING_QUOTES: usize = 64; -// Max number of auth pending requests. +/// Max number of pending authentication requests. const MAX_AUTH_PENDING_REQUESTS: usize = 64; /// Max number of peer sessions. const MAX_PEER_SESSIONS: usize = 64; +/// Maximum number of concurrent sessions to this enclave from router enclaves. +const MAX_FRONTEND_SESSIONS: usize = 500; + +/// Max number of backends that this enclave can connect to as a client. +const MAX_BACKEND_SESSIONS: usize = 10_000; + /// Max number of client sessions. -const MAX_CLIENT_SESSIONS: usize = 10000; +const MAX_CLIENT_SESSIONS: usize = 10_000; + +/// Max number of auth requests for enclave backends. +const MAX_BACKEND_AUTH_PENDING_REQUESTS: usize = 10_000; /// Any additional "identities" (e.g. key material) for a given enclave that /// needs to become a part of the report. We provide some simple identities, and /// a trait to allow extensions mod identity; + pub use identity::{EnclaveIdentity, NullIdentity}; /// State associated to Attested Authenticated Key Exchange held by an enclave, @@ -52,6 +63,7 @@ pub use identity::{EnclaveIdentity, NullIdentity}; pub struct AkeEnclaveState { /// ResponderId used for peer connections peer_self_id: Mutex>, + /// ResponderId used for client connections client_self_id: Mutex>, @@ -76,6 +88,10 @@ pub struct AkeEnclaveState { /// A map of responder-ID to incomplete, outbound, AKE state. initiator_auth_pending: Mutex>>, + /// A map of responder-ID to incomplete, outbound AKE state for connections + /// to enclaves that serve as backends to the current enclave. + backend_auth_pending: Mutex>>, + /// A map of channel ID outbound connection state. peer_outbound: Mutex>>, @@ -84,6 +100,14 @@ pub struct AkeEnclaveState { /// A map of channel ID to connection state clients: Mutex>>, + + /// A map of inbound session IDs to connection states, for use by a + /// store/router backend + frontends: Mutex>>, + + /// A map of ResponderIds for each enclave that serves as a backend to the + /// current enclave. + backends: Mutex>>, } impl Default for AkeEnclaveState { @@ -104,9 +128,12 @@ impl AkeEnclaveState { ias_pending: Mutex::new(LruCache::new(MAX_PENDING_QUOTES)), current_ias_report: Mutex::new(None), initiator_auth_pending: Mutex::new(LruCache::new(MAX_AUTH_PENDING_REQUESTS)), + backend_auth_pending: Mutex::new(LruCache::new(MAX_BACKEND_AUTH_PENDING_REQUESTS)), peer_outbound: Mutex::new(LruCache::new(MAX_PEER_SESSIONS)), peer_inbound: Mutex::new(LruCache::new(MAX_PEER_SESSIONS)), clients: Mutex::new(LruCache::new(MAX_CLIENT_SESSIONS)), + frontends: Mutex::new(LruCache::new(MAX_FRONTEND_SESSIONS)), + backends: Mutex::new(LruCache::new(MAX_BACKEND_SESSIONS)), } } @@ -179,6 +206,125 @@ impl AkeEnclaveState { } } + pub fn frontend_encrypt( + &self, + session_id: &NonceSession, + aad: &[u8], + data: &[u8], + ) -> Result> { + let mut frontends = self.frontends.lock()?; + let session = frontends.get_mut(session_id).ok_or(Error::NotFound)?; + let (data, nonce) = session.encrypt_with_nonce(aad, data)?; + let channel_id = NonceSession::new(session.binding().to_owned(), nonce); + + // Return message + Ok(EnclaveMessage { + aad: aad.to_vec(), + channel_id, + data, + }) + } + + pub fn frontend_decrypt(&self, msg: EnclaveMessage) -> Result> { + let mut frontends = self.frontends.lock()?; + frontends + .get_mut(&msg.channel_id) + .ok_or(Error::NotFound) + .and_then(|session| { + Ok(session.decrypt_with_nonce(&msg.aad, &msg.data, msg.channel_id.nonce())?) + }) + } + + /// Accept an explicit-nonce session from a frontend service (router) to + /// ourselves (acting as a store). + pub fn frontend_accept( + &self, + req: NonceAuthRequest, + ) -> Result<(NonceAuthResponse, NonceSession)> { + let local_identity = self.kex_identity.clone(); + let ias_report = self.get_ias_report()?; + + // Create the state machine + let responder = Start::new(self.get_client_self_id()?.to_string()); + + // Massage the request message into state machine input + let auth_request = { + let req: Vec = req.into(); + ClientAuthRequestInput::::new( + AuthRequestOutput::from(req), + local_identity, + ias_report, + ) + }; + + // Advance the state machine + let mut csprng = McRng::default(); + let (responder, auth_response) = responder.try_next(&mut csprng, auth_request)?; + // For the first message, nonce is a zero + let session_id = NonceSession::new(responder.binding().to_owned(), 0); + + // This session is established as far as we are concerned. + self.frontends.lock()?.put(session_id.clone(), responder); + + // Massage the state machine output into the response message + let auth_response: Vec = auth_response.into(); + + Ok((NonceAuthResponse::from(auth_response), session_id)) + } + + /// Drop the given session from the list of known frontend router sessions. + pub fn frontend_close(&self, channel_id: NonceSession) -> Result<()> { + self.frontends.lock()?.pop(&channel_id); + Ok(()) + } + + /// Constructs a NonceAuthRequest to be sent to an enclave backend. + /// + /// Differs from peer_init in that this enclave does not establish a peer + /// connection to the enclave described by `backend_id`. Rather, this + /// enclave serves as a client to this other backend enclave. + pub fn backend_init(&self, backend_id: ResponderId) -> Result { + let mut csprng = McRng::default(); + + let initiator = Start::new(backend_id.to_string()); + + let init_input = ClientInitiate::::default(); + let (initiator, auth_request_output) = initiator.try_next(&mut csprng, init_input)?; + self.backend_auth_pending.lock()?.put(backend_id, initiator); + let client_auth_request_data: Vec = auth_request_output.into(); + Ok(client_auth_request_data.into()) + } + + /// Connect to an enclave backend as a client. + /// + /// This establishes the client to backend enclave connection, see + /// `backend_init` for more details on how this differs from a peer + /// connection. + pub fn backend_connect( + &self, + backend_id: ResponderId, + backend_auth_response: NonceAuthResponse, + ) -> Result<()> { + let initiator = self + .backend_auth_pending + .lock()? + .pop(&backend_id) + .ok_or(Error::NotFound)?; + + let mut csprng = McRng::default(); + + let auth_response_output_bytes: Vec = backend_auth_response.into(); + let auth_response_event = + AuthResponseInput::new(auth_response_output_bytes.into(), self.get_verifier()?); + let (initiator, _verification_report) = + initiator.try_next(&mut csprng, auth_response_event)?; + + let mut backends = self.backends.lock()?; + backends.put(backend_id, initiator); + + Ok(()) + } + /// Accept a client connection pub fn client_accept( &self, @@ -388,6 +534,85 @@ impl AkeEnclaveState { }) } + /// Transforms an incoming client message, i.e. a message sent from a client + /// to the current enclave, into a sealed message which can be decrypted + /// later for use by this enclave without advancing the Noise nonce. + pub fn decrypt_client_message_for_enclave( + &self, + incoming_client_message: EnclaveMessage, + ) -> Result { + let aad = incoming_client_message.aad.clone(); + let channel_id = incoming_client_message.channel_id.clone(); + let client_query_bytes = self.client_decrypt(incoming_client_message)?; + let sealed_client_query = PlaintextClientRequest { + client_request_bytes: client_query_bytes, + channel_id: channel_id.clone(), + }; + let sealed_client_query_bytes = mc_util_serial::serialize(&sealed_client_query)?; + let sealed_data = IntelSealed::seal_raw(&sealed_client_query_bytes, &[])?; + + Ok(SealedClientMessage { + channel_id, + aad, + data: sealed_data, + }) + } + + /// Unseals the data component of a sealed client message and returns the + /// plaintext + pub fn unseal(&self, sealed_message: &SealedClientMessage) -> Result> { + let (sealed_client_request_bytes, _) = sealed_message.data.unseal_raw()?; + let sealed_client_request: PlaintextClientRequest = + mc_util_serial::deserialize(&sealed_client_request_bytes)?; + + Ok(sealed_client_request.client_request_bytes) + } + + /// Transforms a sealed client message, i.e. a message sent from a client + /// to the current enclave which has been sealed for this enclave, into a + /// list of outbound messages for other enclaves that serve as backends to + /// the current enclave. + /// / --> Backend Enclave 1 + /// Client -> Current Enclave ---> Backend Enclave 2 + /// \ --> Backend Enclave N + pub fn reencrypt_sealed_message_for_backends( + &self, + sealed_client_message: &SealedClientMessage, + ) -> Result>> { + let client_request_bytes = self.unseal(sealed_client_message)?; + let mut backends = self.backends.lock()?; + let backend_messages = backends + .iter_mut() + .map(|(_, encryptor)| { + let aad = sealed_client_message.aad.clone(); + let (data, nonce) = encryptor.encrypt_with_nonce(&aad, &client_request_bytes)?; + let channel_id = NonceSession::new(encryptor.binding().into(), nonce); + Ok(EnclaveMessage { + aad, + channel_id, + data, + }) + }) + .collect::>()?; + + Ok(backend_messages) + } + + pub fn backend_decrypt( + &self, + responder_id: &ResponderId, + msg: &EnclaveMessage, + ) -> Result> { + // Ensure lock gets released as soon as we're done decrypting. + let mut backends = self.backends.lock()?; + backends + .get_mut(responder_id) + .ok_or(Error::NotFound) + .and_then(|session| { + Ok(session.decrypt_with_nonce(&msg.aad, &msg.data, msg.channel_id.nonce())?) + }) + } + // // IAS related // diff --git a/crypto/noise/src/cipher_state.rs b/crypto/noise/src/cipher_state.rs index 90de9ba1b7..4e7019981b 100644 --- a/crypto/noise/src/cipher_state.rs +++ b/crypto/noise/src/cipher_state.rs @@ -161,12 +161,19 @@ impl CipherState { self.cipher.is_some() } + /// Retrieve the nonce value which will be used in the next operation. + /// + /// This is an extension of the noise protocol to allow for implicit-nonce + /// writers with explicit-nonce readers to co-exist in the same stream. + pub fn next_nonce(&self) -> u64 { + self.nonce + } + /// The noise protocol `SetNonce()` operation. /// /// This will irrevocably override the current nonce value. pub fn set_nonce(&mut self, nonce: u64) { self.nonce = nonce; - // TODO: return current nonce? We don't provide any access otherwise... } /// The noise protocol `EncryptWithAd()` operation. @@ -393,4 +400,14 @@ mod test { assert_eq!(encryptor.nonce, 2); assert_eq!(encryptor.bytes_sent, key.len() as u64); } + + /// Try to set the nonce, and retrieve it. + #[test] + fn set_nonce() { + let mut encryptor = CipherState::::default(); + let expected = 1234; + encryptor.set_nonce(expected); + let actual = encryptor.next_nonce(); + assert_eq!(expected, actual); + } } diff --git a/fog/api/Cargo.toml b/fog/api/Cargo.toml index 073ccfa9cb..e7e033719d 100644 --- a/fog/api/Cargo.toml +++ b/fog/api/Cargo.toml @@ -17,6 +17,7 @@ protobuf = "2.27.1" mc-api = { path = "../../api" } mc-attest-api = { path = "../../attest/api" } mc-attest-core = { path = "../../attest/core" } +mc-attest-enclave-api = { path = "../../attest/enclave-api" } mc-consensus-api = { path = "../../consensus/api" } mc-crypto-keys = { path = "../../crypto/keys" } mc-fog-enclave-connection = { path = "../enclave_connection" } diff --git a/fog/api/proto/view.proto b/fog/api/proto/view.proto index 023299b21e..0241ec6d29 100644 --- a/fog/api/proto/view.proto +++ b/fog/api/proto/view.proto @@ -10,6 +10,85 @@ import "external.proto"; import "kex_rng.proto"; import "fog_common.proto"; +import "google/protobuf/empty.proto"; + +/// A single Duplex streaming API that allows clients to authorize with Fog View and +/// query it for TxOuts. +service FogViewRouterAPI { + rpc request(stream FogViewRouterRequest) returns (stream FogViewRouterResponse) {} +} + +service FogViewRouterAdminAPI { + // Adds a shard to the Fog View Router's list of shards to query. + rpc addShard(AddShardRequest) returns (google.protobuf.Empty) {} +} + +message AddShardRequest { + // The shard's URI in string format. + string shard_uri = 1; +} + +message FogViewRouterRequest { + oneof request_data { + /// This is called to perform IX key exchange + /// with the enclave before making a query call. + attest.AuthMessage auth = 1; + /// Input should be an encrypted QueryRequest + attest.Message query = 2; + } +} + +message FogViewRouterResponse { + oneof response_data { + /// Returned for an auth request. + attest.AuthMessage auth = 1; + /// Returned for a query request. + /// The data is an encrypted QueryResponse. + attest.Message query = 2; + } +} + +message MultiViewStoreQueryRequest { + /// A list of queries encrypted for Fog View Stores. + repeated attest.NonceMessage queries = 1; +} + +/// The status associated with a MultiViewStoreQueryResponse +enum MultiViewStoreQueryResponseStatus { + /// Default status. Shouldn't be set explicitly. + UNKNOWN = 0; + /// The Fog View Store successfully fulfilled the request. + SUCCESS = 1; + /// The Fog View Store is unable to decrypt a query within the MultiViewStoreQuery. It needs to be authenticated + /// by the router. + AUTHENTICATION_ERROR = 2; + /// The Fog View Store is not ready to service a MultiViewStoreQueryRequest. This might be because the store has + /// not loaded enough blocks yet. + NOT_READY = 3; +} + +message MultiViewStoreQueryResponse { + /// Optional field that gets set when the Fog View Store is able to decrypt a query + /// included in the MultiViewStoreQueryRequest and create a query response for that + // query. + attest.NonceMessage query_response = 1; + + /// The FogViewStoreUri for the specific Fog View Store that + /// tried to decrypt the MultiViewStoreQueryRequest and failed. + /// The client should subsequently authenticate with the machine + /// described by this URI. + string store_uri = 2; + + /// Status that gets returned when the Fog View Store services a MultiViewStoreQueryRequest. + MultiViewStoreQueryResponseStatus status = 3; + + /// The block range that this view store is responsible for based on the store's sharding strategy. Note that this + /// doesn't mean the block ranges that this store has processed. Rather, this is the range of blocks that this + /// store is configured to serve once they become available. + fog_common.BlockRange block_range = 4; +} + +/// Fulfills requests sent directly by a Fog client, e.g. a mobile phone using the SDK. service FogViewAPI { /// This is called to perform IX key exchange with the enclave before calling GetOutputs. rpc Auth(attest.AuthMessage) returns (attest.AuthMessage) {} @@ -17,6 +96,14 @@ service FogViewAPI { rpc Query(attest.Message) returns (attest.Message) {} } +/// Fulfills requests sent by the Fog View Router. This is not meant to fulfill requests sent directly by the client. +service FogViewStoreAPI { + /// This is called to perform IX key exchange with the enclave before calling GetOutputs. + rpc Auth(attest.AuthMessage) returns (attest.AuthMessage) {} + /// Input should be an encrypted MultiViewStoreQueryRequest, result is an encrypted QueryResponse. + rpc MultiViewStoreQuery(MultiViewStoreQueryRequest) returns (MultiViewStoreQueryResponse) {} +} + /// There are several kinds of records returned by the fog view API /// - RngRecords, which a user can use with their private key to construct KexRng's /// - TxOutSearchResults, which the user can decrypt with their private key to obtain TxOutRecords @@ -134,6 +221,8 @@ message QueryResponse { repeated DecommissionedIngestInvocation decommissioned_ingest_invocations = 6; /// Any TxOutSearchResults from the get_txos in the request. + /// TODO: Deprecate this field once clients have been given enough time to upgrade to the new + /// fixed_tx_out_search_result field. repeated TxOutSearchResult tx_out_search_results = 7; /// Extra data: The index of the last known block. @@ -145,6 +234,10 @@ message QueryResponse { /// This can be used by the client as a hint when choosing cryptonote mixin indices. /// This field doesn't have the same "cursor" semantics as the other fields. uint64 last_known_block_cumulative_txo_count = 9; + + /// Any FixedTxOutSearchResults from the get_txos in the request. Will be filled alongside the tx_out_search_result + /// field and contains the same payload data, but in a different format. + repeated FixedTxOutSearchResult fixed_tx_out_search_results = 10; } /// A record of an Rng created by a fog ingest enclave. @@ -186,6 +279,33 @@ message TxOutSearchResult { /// It is be zero-padding in the other cases. /// FIXME: MC-1491 ensure this happens either in enclave or db, or wait for ORAM bytes ciphertext = 3; + + /// Unused padding that needs to be returned to maintain internal obliviousness. + bytes padding = 4; + +} + +/// Contains the same payload data as a TxOutSearchResult, but the payload is contained within a ciphertext of fixed +// length. +message FixedTxOutSearchResult { + /// The search key associated to this result + bytes search_key = 1; + /// The result code for the query. + /// This is logically an enum, but should not be an enum because protobuf + /// requires that enums are encoded using the "varint" encoding which is not fixed size. + /// We want that e.g. "Found" and "NotFound" have the same length on the wire to avoid leaking that. + /// So it is a fixed32 in protobuf, and the 0 (default) value is intentionally unused. + fixed32 result_code = 2; + + /// A ciphertext, which is a view-key encrypted TxOutRecord in case result_code == 1. + /// It is be zero-padding in the other cases. + /// FIXME: MC-1491 ensure this happens either in enclave or db, or wait for ORAM + bytes ciphertext = 3; + + /// The length of the payload that is encrypted in the ciphertext. Ciphertexts will always be of fixed length, but + /// the contained payload may be less than this length, so the rest of the ciphertext is zeroed out. These + /// zeroed bytes should not be interpreted by the client, and this value tells the client which bytes to interpret. + fixed32 payload_length = 4; } /// Corresponds to and documents values of TxOutSearchResult.result_code diff --git a/fog/api/src/conversions.rs b/fog/api/src/conversions.rs index 95f9a124da..6733b0be10 100644 --- a/fog/api/src/conversions.rs +++ b/fog/api/src/conversions.rs @@ -2,10 +2,33 @@ // // Contains helper methods that enable conversions for Fog Api types. -use crate::{fog_common, ingest_common}; +use crate::{fog_common, ingest_common, view::MultiViewStoreQueryRequest}; use mc_api::ConversionError; +use mc_attest_api::attest; +use mc_attest_enclave_api::{EnclaveMessage, NonceSession}; use mc_crypto_keys::CompressedRistrettoPublic; -use mc_fog_types::common; +use mc_fog_types::{common, common::BlockRange, view::MultiViewStoreQueryResponseStatus}; +use mc_fog_uri::{ConnectionUri, FogViewStoreUri}; +use std::str::FromStr; + +impl From>> for MultiViewStoreQueryRequest { + fn from(enclave_messages: Vec>) -> MultiViewStoreQueryRequest { + enclave_messages + .into_iter() + .map(|enclave_message| enclave_message.into()) + .collect::>() + .into() + } +} + +impl From> for MultiViewStoreQueryRequest { + fn from(attested_query_messages: Vec) -> MultiViewStoreQueryRequest { + let mut multi_view_store_query_request = MultiViewStoreQueryRequest::new(); + multi_view_store_query_request.set_queries(attested_query_messages.into()); + + multi_view_store_query_request + } +} impl From<&common::BlockRange> for fog_common::BlockRange { fn from(common_block_range: &common::BlockRange) -> fog_common::BlockRange { @@ -17,6 +40,12 @@ impl From<&common::BlockRange> for fog_common::BlockRange { } } +impl From for common::BlockRange { + fn from(proto_block_range: fog_common::BlockRange) -> common::BlockRange { + common::BlockRange::new(proto_block_range.start_block, proto_block_range.end_block) + } +} + impl TryFrom<&ingest_common::IngestSummary> for mc_fog_types::ingest_common::IngestSummary { type Error = ConversionError; fn try_from(proto_ingest_summary: &ingest_common::IngestSummary) -> Result { @@ -45,3 +74,42 @@ impl TryFrom<&ingest_common::IngestSummary> for mc_fog_types::ingest_common::Ing Ok(result) } } + +impl TryFrom + for mc_fog_types::view::MultiViewStoreQueryResponse +{ + type Error = ConversionError; + fn try_from( + mut proto_response: crate::view::MultiViewStoreQueryResponse, + ) -> Result { + let store_responder_id = + FogViewStoreUri::from_str(proto_response.get_store_uri())?.responder_id()?; + let result = mc_fog_types::view::MultiViewStoreQueryResponse { + encrypted_query_response: proto_response.take_query_response().into(), + store_responder_id, + store_uri: proto_response.get_store_uri().to_string(), + status: proto_response.get_status().into(), + block_range: BlockRange::from(proto_response.take_block_range()), + }; + Ok(result) + } +} + +impl From for MultiViewStoreQueryResponseStatus { + fn from(proto_status: crate::view::MultiViewStoreQueryResponseStatus) -> Self { + match proto_status { + crate::view::MultiViewStoreQueryResponseStatus::UNKNOWN => { + MultiViewStoreQueryResponseStatus::Unknown + } + crate::view::MultiViewStoreQueryResponseStatus::SUCCESS => { + MultiViewStoreQueryResponseStatus::Success + } + crate::view::MultiViewStoreQueryResponseStatus::AUTHENTICATION_ERROR => { + MultiViewStoreQueryResponseStatus::AuthenticationError + } + crate::view::MultiViewStoreQueryResponseStatus::NOT_READY => { + MultiViewStoreQueryResponseStatus::NotReady + } + } + } +} diff --git a/fog/api/tests/fog_types.rs b/fog/api/tests/fog_types.rs index 49a9be107d..ac7c955ea1 100644 --- a/fog/api/tests/fog_types.rs +++ b/fog/api/tests/fog_types.rs @@ -129,11 +129,12 @@ fn fog_view_query_response_round_trip() { .map(|_| mc_fog_types::view::DecommissionedIngestInvocation::sample(&mut rng)) .collect(), missed_block_ranges: Default::default(), - tx_out_search_results: (0..40) - .map(|_| mc_fog_types::view::TxOutSearchResult::sample(&mut rng)) + fixed_tx_out_search_results: (0..40) + .map(|_| mc_fog_types::view::FixedTxOutSearchResult::sample(&mut rng)) .collect(), last_known_block_count: rng.next_u32() as u64, last_known_block_cumulative_txo_count: rng.next_u32() as u64, + tx_out_search_results: vec![], }; round_trip_message::( &test_val, @@ -152,11 +153,12 @@ fn fog_view_query_response_round_trip() { .map(|_| mc_fog_types::view::DecommissionedIngestInvocation::sample(&mut rng)) .collect(), missed_block_ranges: Default::default(), - tx_out_search_results: (0..40) - .map(|_| mc_fog_types::view::TxOutSearchResult::sample(&mut rng)) + fixed_tx_out_search_results: (0..40) + .map(|_| mc_fog_types::view::FixedTxOutSearchResult::sample(&mut rng)) .collect(), last_known_block_count: rng.next_u32() as u64, last_known_block_cumulative_txo_count: rng.next_u32() as u64, + tx_out_search_results: vec![], }; round_trip_message::( &test_val, @@ -182,11 +184,12 @@ fn fog_view_query_response_round_trip() { ) }) .collect(), - tx_out_search_results: (0..40) - .map(|_| mc_fog_types::view::TxOutSearchResult::sample(&mut rng)) + fixed_tx_out_search_results: (0..40) + .map(|_| mc_fog_types::view::FixedTxOutSearchResult::sample(&mut rng)) .collect(), last_known_block_count: rng.next_u32() as u64, last_known_block_cumulative_txo_count: rng.next_u32() as u64, + tx_out_search_results: vec![], }; round_trip_message::( &test_val, @@ -427,12 +430,13 @@ impl Sample for mc_fog_types::view::DecommissionedIngestInvocation { } } -impl Sample for mc_fog_types::view::TxOutSearchResult { +impl Sample for mc_fog_types::view::FixedTxOutSearchResult { fn sample(rng: &mut T) -> Self { Self { search_key: <[u8; 32]>::sample(rng).to_vec(), ciphertext: <[u8; 32]>::sample(rng).to_vec(), result_code: 1, + payload_length: 32, } } } diff --git a/fog/ingest/enclave/trusted/Cargo.lock b/fog/ingest/enclave/trusted/Cargo.lock index eaced896c0..f2baf8facc 100644 --- a/fog/ingest/enclave/trusted/Cargo.lock +++ b/fog/ingest/enclave/trusted/Cargo.lock @@ -739,6 +739,7 @@ dependencies = [ "mc-attest-verifier", "mc-crypto-noise", "mc-sgx-compat", + "mc-util-serial", "serde", ] @@ -892,6 +893,7 @@ dependencies = [ "mc-sgx-build", "mc-sgx-compat", "mc-util-from-random", + "mc-util-serial", "sha2", ] @@ -1252,6 +1254,8 @@ version = "4.1.0-pre0" dependencies = [ "crc", "displaydoc", + "mc-attest-enclave-api", + "mc-common", "mc-crypto-keys", "mc-fog-kex-rng", "mc-transaction-core", diff --git a/fog/ledger/enclave/trusted/Cargo.lock b/fog/ledger/enclave/trusted/Cargo.lock index 2165a00cec..733cc5afc8 100644 --- a/fog/ledger/enclave/trusted/Cargo.lock +++ b/fog/ledger/enclave/trusted/Cargo.lock @@ -733,6 +733,7 @@ dependencies = [ "mc-attest-verifier", "mc-crypto-noise", "mc-sgx-compat", + "mc-util-serial", "serde", ] @@ -851,6 +852,7 @@ dependencies = [ "mc-sgx-build", "mc-sgx-compat", "mc-util-from-random", + "mc-util-serial", "sha2", ] @@ -1190,6 +1192,8 @@ version = "4.1.0-pre0" dependencies = [ "crc", "displaydoc", + "mc-attest-enclave-api", + "mc-common", "mc-crypto-keys", "mc-fog-kex-rng", "mc-transaction-core", diff --git a/fog/recovery_db_iface/src/lib.rs b/fog/recovery_db_iface/src/lib.rs index df7d67c536..1d4b4ab82c 100644 --- a/fog/recovery_db_iface/src/lib.rs +++ b/fog/recovery_db_iface/src/lib.rs @@ -14,7 +14,7 @@ use chrono::NaiveDateTime; use core::fmt::{Debug, Display}; use mc_crypto_keys::CompressedRistrettoPublic; use mc_fog_kex_rng::KexRngPubkey; -use mc_fog_types::view::TxOutSearchResult; +use mc_fog_types::view::FixedTxOutSearchResult; pub use mc_blockchain_types::Block; pub use mc_fog_types::{common::BlockRange, ETxOutRecord}; @@ -226,13 +226,13 @@ pub trait RecoveryDb { /// * search_keys: A list of fog tx_out search keys to search for. /// /// Returns: - /// * Exactly one TxOutSearchResult object for every search key, or an + /// * Exactly one FixedTxOutSearchResult object for every search key, or an /// internal database error description. fn get_tx_outs( &self, start_block: u64, search_keys: &[Vec], - ) -> Result, Self::Error>; + ) -> Result, Self::Error>; /// Mark a given ingest invocation as still being alive. fn update_last_active_at( @@ -264,8 +264,7 @@ pub trait RecoveryDb { /// /// Arguments: /// * ingress_key: The ingress key we need ETxOutRecords from - /// * block_index: The first block we need ETxOutRecords from - /// * block_count: How many consecutive blocks to also request data for. + /// * block_range: The range of blocks to get ETxOutRecords from. /// /// Returns: /// * The sequence of ETxOutRecord's, from consecutive blocks starting from @@ -273,8 +272,7 @@ pub trait RecoveryDb { fn get_tx_outs_by_block_range_and_key( &self, ingress_key: CompressedRistrettoPublic, - block_index: u64, - block_count: usize, + block_range: &BlockRange, ) -> Result>, Self::Error>; /// Get the invocation id that published this block with this key. diff --git a/fog/recovery_db_iface/src/types.rs b/fog/recovery_db_iface/src/types.rs index 1285e1591f..e1cb8b5969 100644 --- a/fog/recovery_db_iface/src/types.rs +++ b/fog/recovery_db_iface/src/types.rs @@ -72,32 +72,47 @@ impl IngressPublicKeyRecord { /// /// The function computes whether the view server needs to try to load a /// block connected to this ingress key. + pub fn covers_block_index(&self, block_index: u64) -> bool { + self.get_block_range().contains(block_index) + } + + /// Computes the block range for which this record is responsible for. /// - /// Behavior: /// Intuitively an IngressPublicKeyRecord corresponds to a range of /// consecutive block indices, that the view server needs to try to load /// for the users to make progress correctly. /// - /// If the key is not retired or lost, the range is - /// [ start_block , infinity ) + /// If the key is lost, the range is + /// [ start_block, last_scanned_block ] + /// /// If the key is retired and not lost, the range is /// [ start_block , pubkey_expiry ) /// - /// If the key is lost, the range is - /// [ start_block, last_scanned_block ] + /// If the key is neither retired nor lost, the range is + /// [ start_block , infinity ) + /// /// and if the key is lost and there is no last scanned block, the range is /// empty. - pub fn covers_block_index(&self, block_index: u64) -> bool { + pub fn get_block_range(&self) -> BlockRange { if self.status.lost { - self.status.start_block <= block_index - && self - .last_scanned_block - .map(|idx| block_index <= idx) - .unwrap_or(false) - } else { - self.status.start_block <= block_index - && (!self.status.retired || block_index < self.status.pubkey_expiry) + return self.get_lost_block_range(); } + if self.status.retired { + return BlockRange::new(self.status.start_block, self.status.pubkey_expiry); + } + + BlockRange::new(self.status.start_block, u64::MAX) + } + + fn get_lost_block_range(&self) -> BlockRange { + if let Some(last_scanned_block) = self.last_scanned_block { + // Add 1 because the last_scanned_block is inclusive. + return BlockRange::new(self.status.start_block, last_scanned_block + 1); + } + + // If we lost this key before it scanned anything, then return a + // default "empty" block range. + BlockRange::new(0, 0) } /// The next block index that needs to be scanned with this key. diff --git a/fog/sql_recovery_db/src/lib.rs b/fog/sql_recovery_db/src/lib.rs index 1d927096e5..62fd2257cb 100644 --- a/fog/sql_recovery_db/src/lib.rs +++ b/fog/sql_recovery_db/src/lib.rs @@ -41,7 +41,7 @@ use mc_fog_recovery_db_iface::{ }; use mc_fog_types::{ common::BlockRange, - view::{TxOutSearchResult, TxOutSearchResultCode}, + view::{FixedTxOutSearchResult, TxOutSearchResultCode}, ETxOutRecord, }; use mc_util_parse::parse_duration_in_seconds; @@ -882,13 +882,13 @@ impl SqlRecoveryDb { /// * search_keys: A list of fog tx_out search keys to search for. /// /// Returns: - /// * Exactly one TxOutSearchResult object for every search key, or an + /// * Exactly one FixedTxOutSearchResult object for every search key, or an /// internal database error description. fn get_tx_outs_retriable( &self, start_block: u64, search_keys: &[Vec], - ) -> Result, Error> { + ) -> Result, Error> { let conn = self.pool.get()?; let query = schema::ingested_blocks::dsl::ingested_blocks @@ -906,17 +906,12 @@ impl SqlRecoveryDb { let mut results = Vec::new(); for search_key in search_keys { results.push(match search_key_to_payload.get(search_key) { - Some(payload) => TxOutSearchResult { - search_key: search_key.clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: payload.clone(), - }, - - None => TxOutSearchResult { - search_key: search_key.clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: Default::default(), - }, + Some(payload) => FixedTxOutSearchResult::new( + search_key.clone(), + payload, + TxOutSearchResultCode::Found, + ), + None => FixedTxOutSearchResult::new_not_found(search_key.clone()), }); } @@ -976,8 +971,7 @@ impl SqlRecoveryDb { /// /// Arguments: /// * ingress_key: The ingress key we need ETxOutRecords from - /// * block_index: The first block we need ETxOutRecords from - /// * block_count: How many consecutive blocks to also request data for. + /// * block_range: The range of blocks to get ETxOutRecords from. /// /// Returns: /// * The sequence of ETxOutRecord's, from consecutive blocks starting from @@ -985,8 +979,7 @@ impl SqlRecoveryDb { fn get_tx_outs_by_block_range_and_key_retriable( &self, ingress_key: CompressedRistrettoPublic, - block_index: u64, - block_count: usize, + block_range: &BlockRange, ) -> Result>, Error> { let conn = self.pool.get()?; @@ -1002,8 +995,8 @@ impl SqlRecoveryDb { use schema::ingested_blocks::dsl; dsl::ingested_blocks .filter(dsl::ingress_public_key.eq(key_bytes)) - .filter(dsl::block_number.ge(block_index as i64)) - .limit(block_count as i64) + .filter(dsl::block_number.ge(block_range.start_block as i64)) + .limit(block_range.len() as i64) .select((dsl::block_number, dsl::proto_ingested_block_data)) .order(dsl::block_number.asc()) }; @@ -1011,12 +1004,12 @@ impl SqlRecoveryDb { // We will get one row for each hit in the table we found let rows: Vec<(i64, Vec)> = query.load(&conn)?; - if rows.len() > block_count { + if (rows.len() as u64) > block_range.len() { log::warn!( self.logger, "When querying, more responses than expected: {} > {}", rows.len(), - block_count + block_range.len(), ); } @@ -1030,11 +1023,11 @@ impl SqlRecoveryDb { let mut result = Vec::new(); for (idx, (block_number, proto)) in rows.into_iter().enumerate() { - if block_index + idx as u64 == block_number as u64 { + if block_range.start_block + (idx as u64) == block_number as u64 { let proto = ProtoIngestedBlockData::decode(&*proto)?; result.push(proto.e_tx_out_records); } else { - log::warn!(self.logger, "When querying for block index {} and up to {} blocks on, the {}'th response has block_number {} which is not expected. Gaps in the data?", block_index, block_count, idx, block_number); + log::warn!(self.logger, "When querying for block index {} and up to {} blocks on, the {}'th response has block_number {} which is not expected. Gaps in the data?", block_range.start_block, block_range.len(), idx, block_number); break; } } @@ -1420,13 +1413,13 @@ impl RecoveryDb for SqlRecoveryDb { /// * search_keys: A list of fog tx_out search keys to search for. /// /// Returns: - /// * Exactly one TxOutSearchResult object for every search key, or an + /// * Exactly one FixedTxOutSearchResult object for every search key, or an /// internal database error description. fn get_tx_outs( &self, start_block: u64, search_keys: &[Vec], - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { our_retry(self.get_retries(), || { self.get_tx_outs_retriable(start_block, search_keys) }) @@ -1469,8 +1462,7 @@ impl RecoveryDb for SqlRecoveryDb { /// /// Arguments: /// * ingress_key: The ingress key we need ETxOutRecords from - /// * block_index: The first block we need ETxOutRecords from - /// * block_count: How many consecutive blocks to also request data for. + /// * block_range: The range of blocks to get ETxOutRecords from. /// /// Returns: /// * The sequence of ETxOutRecord's, from consecutive blocks starting from @@ -1478,11 +1470,10 @@ impl RecoveryDb for SqlRecoveryDb { fn get_tx_outs_by_block_range_and_key( &self, ingress_key: CompressedRistrettoPublic, - block_index: u64, - block_count: usize, + block_range: &BlockRange, ) -> Result>, Self::Error> { our_retry(self.get_retries(), || { - self.get_tx_outs_by_block_range_and_key_retriable(ingress_key, block_index, block_count) + self.get_tx_outs_by_block_range_and_key_retriable(ingress_key, block_range) }) } @@ -1630,6 +1621,7 @@ mod tests { }; use mc_crypto_keys::RistrettoPublic; use mc_fog_test_infra::db_tests::{random_block, random_kex_rng_pubkey}; + use mc_fog_types::view::FixedTxOutSearchResult; use mc_util_from_random::FromRandom; use pem::Pem; use rand::{rngs::StdRng, thread_rng, SeedableRng}; @@ -2238,11 +2230,7 @@ mod tests { results, test_case .iter() - .map(|search_key| TxOutSearchResult { - search_key: search_key.clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }) + .map(|search_key| FixedTxOutSearchResult::new_not_found(search_key.clone())) .collect::>() ); } @@ -2259,31 +2247,23 @@ mod tests { assert_eq!( results, vec![ - TxOutSearchResult { - search_key: test_case[0].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[1].clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: records1[0].payload.clone(), - }, - TxOutSearchResult { - search_key: test_case[2].clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: records1[5].payload.clone(), - }, - TxOutSearchResult { - search_key: test_case[3].clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: records2[3].payload.clone(), - }, - TxOutSearchResult { - search_key: test_case[4].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, + FixedTxOutSearchResult::new_not_found(test_case[0].clone()), + FixedTxOutSearchResult::new( + test_case[1].clone(), + &records1[0].payload, + TxOutSearchResultCode::Found + ), + FixedTxOutSearchResult::new( + test_case[2].clone(), + &records1[5].payload, + TxOutSearchResultCode::Found + ), + FixedTxOutSearchResult::new( + test_case[3].clone(), + &records2[3].payload, + TxOutSearchResultCode::Found + ), + FixedTxOutSearchResult::new_not_found(test_case[4].clone()), ] ); @@ -2291,31 +2271,23 @@ mod tests { assert_eq!( results, vec![ - TxOutSearchResult { - search_key: test_case[0].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[1].clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: records1[0].payload.clone(), - }, - TxOutSearchResult { - search_key: test_case[2].clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: records1[5].payload.clone(), - }, - TxOutSearchResult { - search_key: test_case[3].clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: records2[3].payload.clone(), - }, - TxOutSearchResult { - search_key: test_case[4].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, + FixedTxOutSearchResult::new_not_found(test_case[0].clone()), + FixedTxOutSearchResult::new( + test_case[1].clone(), + &records1[0].payload, + TxOutSearchResultCode::Found + ), + FixedTxOutSearchResult::new( + test_case[2].clone(), + &records1[5].payload, + TxOutSearchResultCode::Found + ), + FixedTxOutSearchResult::new( + test_case[3].clone(), + &records2[3].payload, + TxOutSearchResultCode::Found + ), + FixedTxOutSearchResult::new_not_found(test_case[4].clone()), ] ); @@ -2325,31 +2297,11 @@ mod tests { assert_eq!( results, vec![ - TxOutSearchResult { - search_key: test_case[0].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[1].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[2].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[3].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[4].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, + FixedTxOutSearchResult::new_not_found(test_case[0].clone()), + FixedTxOutSearchResult::new_not_found(test_case[1].clone()), + FixedTxOutSearchResult::new_not_found(test_case[2].clone()), + FixedTxOutSearchResult::new_not_found(test_case[3].clone()), + FixedTxOutSearchResult::new_not_found(test_case[4].clone()), ] ); @@ -2357,31 +2309,15 @@ mod tests { assert_eq!( results, vec![ - TxOutSearchResult { - search_key: test_case[0].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[1].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[2].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, - TxOutSearchResult { - search_key: test_case[3].clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: records2[3].payload.clone(), - }, - TxOutSearchResult { - search_key: test_case[4].clone(), - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![] - }, + FixedTxOutSearchResult::new_not_found(test_case[0].clone()), + FixedTxOutSearchResult::new_not_found(test_case[1].clone()), + FixedTxOutSearchResult::new_not_found(test_case[2].clone()), + FixedTxOutSearchResult::new( + test_case[3].clone(), + &records2[3].payload, + TxOutSearchResultCode::Found + ), + FixedTxOutSearchResult::new_not_found(test_case[4].clone()), ] ); } @@ -2463,69 +2399,78 @@ mod tests { // Get tx outs for a key we're not aware of or a block id we're not aware of // should return empty vec + let block_range = BlockRange::new_from_length(124, 2); let batch_result = db - .get_tx_outs_by_block_range_and_key(ingress_key, 124, 2) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_result.len(), 0); + let block_range = BlockRange::new_from_length(123, 2); let batch_result = db .get_tx_outs_by_block_range_and_key( CompressedRistrettoPublic::from_random(&mut rng), - 123, - 2, + &block_range, ) .unwrap(); assert_eq!(batch_result.len(), 0); // Getting tx outs in a batch should work as expected when requesting things // that exist + let block_range = BlockRange::new_from_length(block1.index, 1); let batch_results = db - .get_tx_outs_by_block_range_and_key(ingress_key, block1.index, 1) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_results.len(), 1); assert_eq!(batch_results[0], records1); + let block_range = BlockRange::new_from_length(block2.index, 1); let batch_results = db - .get_tx_outs_by_block_range_and_key(ingress_key, block2.index, 1) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_results.len(), 1); assert_eq!(batch_results[0], records2); + let block_range = BlockRange::new_from_length(block1.index, 2); let batch_results = db - .get_tx_outs_by_block_range_and_key(ingress_key, block1.index, 2) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_results.len(), 2); assert_eq!(batch_results[0], records1); assert_eq!(batch_results[1], records2); + let block_range = BlockRange::new_from_length(block2.index, 2); let batch_results = db - .get_tx_outs_by_block_range_and_key(ingress_key, block2.index, 2) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_results.len(), 1); assert_eq!(batch_results[0], records2); + let block_range = BlockRange::new_from_length(block1.index, 3); let batch_results = db - .get_tx_outs_by_block_range_and_key(ingress_key, block1.index, 3) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_results.len(), 2); assert_eq!(batch_results[0], records1); assert_eq!(batch_results[1], records2); + let block_range = BlockRange::new_from_length(block2.index, 3); let batch_results = db - .get_tx_outs_by_block_range_and_key(ingress_key, block2.index, 3) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_results.len(), 1); assert_eq!(batch_results[0], records2); // When there is a gap in the data, the gap should suppress any further results // even if there are hits later in the range. + let block_range = BlockRange::new_from_length(block1.index - 1, 2); let batch_results = db - .get_tx_outs_by_block_range_and_key(ingress_key, block1.index - 1, 2) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_results.len(), 0); + let block_range = BlockRange::new_from_length(block1.index - 2, 3); let batch_results = db - .get_tx_outs_by_block_range_and_key(ingress_key, block1.index - 2, 3) + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) .unwrap(); assert_eq!(batch_results.len(), 0); } diff --git a/fog/test_infra/src/db_tests.rs b/fog/test_infra/src/db_tests.rs index 117a607dd2..fdf8334ce7 100644 --- a/fog/test_infra/src/db_tests.rs +++ b/fog/test_infra/src/db_tests.rs @@ -625,10 +625,17 @@ fn test_recovery_db_txs_new_apis( let results = db.get_tx_outs(0, &search_keys[..]).unwrap(); for row in test_rows { - assert!(results.iter().any(|res| &res.search_key[..] - == AsRef::<[u8]>::as_ref(&row.search_key) - && res.ciphertext == row.payload - && res.result_code == TxOutSearchResultCode::Found as u32)); + assert!(results.iter().any(|fixed_result| { + let search_keys_match = + &fixed_result.search_key[..] == AsRef::<[u8]>::as_ref(&row.search_key); + let payload_length = fixed_result.payload_length as usize; + let result_payload = &fixed_result.ciphertext[0..payload_length]; + let payloads_match = result_payload == row.payload; + let result_codes_match = + fixed_result.result_code == TxOutSearchResultCode::Found as u32; + + search_keys_match && payloads_match && result_codes_match + })); } assert!(results.iter().any(|res| res.search_key == random_search_key diff --git a/fog/test_infra/src/mock_client.rs b/fog/test_infra/src/mock_client.rs index feb123b55b..884889002a 100644 --- a/fog/test_infra/src/mock_client.rs +++ b/fog/test_infra/src/mock_client.rs @@ -75,9 +75,18 @@ impl FogViewConnection for PassThroughViewClient { tx_out_search_results: Default::default(), last_known_block_count: highest_known_block_count, last_known_block_cumulative_txo_count: cumulative_txo_count, + fixed_tx_out_search_results: Default::default(), }; - resp.tx_out_search_results = self.db.get_tx_outs(start_from_block_index, &search_keys)?; + resp.fixed_tx_out_search_results = + self.db.get_tx_outs(start_from_block_index, &search_keys)?; + + resp.tx_out_search_results = resp + .fixed_tx_out_search_results + .iter() + .cloned() + .map(|result| result.into()) + .collect(); Ok(resp) } diff --git a/fog/types/Cargo.toml b/fog/types/Cargo.toml index 0ab93aacd5..1acd6e5f0a 100644 --- a/fog/types/Cargo.toml +++ b/fog/types/Cargo.toml @@ -9,6 +9,8 @@ rust-version = { workspace = true } [dependencies] # mobilecoin +mc-attest-enclave-api = { path = "../../attest/enclave-api" } +mc-common = { path = "../../common/", default-features = false } mc-crypto-keys = { path = "../../crypto/keys", default-features = false } mc-transaction-core = { path = "../../transaction/core" } mc-watcher-api = { path = "../../watcher/api" } @@ -32,3 +34,4 @@ mc-util-test-with-data = { path = "../../util/test-with-data" } # third-party hex = "0.4" +yare = "1.0.2" diff --git a/fog/types/src/common.rs b/fog/types/src/common.rs index 8d1bb7ce22..a7fe504f41 100644 --- a/fog/types/src/common.rs +++ b/fog/types/src/common.rs @@ -1,8 +1,14 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation +use alloc::{format, string::String, vec::Vec}; +use core::str::FromStr; use prost::Message; use serde::{Deserialize, Serialize}; +/// The string that delimits the start and end blocks in a string that +/// represents a BlockRange. +pub const BLOCK_RANGE_DELIMITER: &str = "-"; + /// A half-open [a, b) range of blocks #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Message, Serialize, Deserialize)] pub struct BlockRange { @@ -23,6 +29,14 @@ impl BlockRange { } } + /// Create a new block range from length + pub fn new_from_length(start_block: u64, length: u64) -> Self { + Self { + start_block, + end_block: start_block + length, + } + } + /// Test if a block index is in the range pub fn contains(&self, block: u64) -> bool { block >= self.start_block && block < self.end_block @@ -37,6 +51,16 @@ impl BlockRange { pub fn overlaps(&self, other: &BlockRange) -> bool { self.start_block < other.end_block && other.start_block < self.end_block } + + /// Returns the length of the BlockRange, i.e. the number of blocks. + pub fn len(&self) -> u64 { + self.end_block - self.start_block + } + + /// Returns true if the BlockRange length is 0. + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } impl core::fmt::Display for BlockRange { @@ -45,9 +69,31 @@ impl core::fmt::Display for BlockRange { } } +impl FromStr for BlockRange { + type Err = String; + + fn from_str(s: &str) -> Result { + let block_indices: Vec = s + .split(BLOCK_RANGE_DELIMITER) + .map(|index_str| index_str.trim().parse()) + .collect::, _>>() + .map_err(|_| "BlockRange index is not a number.")?; + if block_indices.len() != 2 { + return Err(format!( + "Block range is composed of two indices, found {} indices", + block_indices.len() + )); + } + let result = BlockRange::new(block_indices[0], block_indices[1]); + + Ok(result) + } +} + #[cfg(test)] mod tests { use super::*; + #[test] fn test_contains() { let range = BlockRange::new(10, 13); @@ -87,4 +133,57 @@ mod tests { assert!(!range.overlaps(&BlockRange::new(0, 10))); assert!(!range.overlaps(&BlockRange::new(13, 100))); } + + #[test] + fn from_string_well_formatted_creates_block_range() { + let start_block = 0; + let end_block = 10; + let block_range_str = format!("{start_block}{BLOCK_RANGE_DELIMITER}{end_block}"); + + let result = BlockRange::from_str(&block_range_str); + + assert!(result.is_ok()); + let block_range = result.unwrap(); + assert_eq!(block_range.start_block, start_block); + assert_eq!(block_range.end_block, end_block); + } + + #[test] + fn from_string_well_formatted_with_whitespace_creates_block_range() { + let start_block = 0; + let end_block = 10; + let block_range_str = format!("{start_block}{BLOCK_RANGE_DELIMITER}{end_block}"); + + let result = BlockRange::from_str(&block_range_str); + + assert!(result.is_ok()); + let block_range = result.unwrap(); + assert_eq!(block_range.start_block, start_block); + assert_eq!(block_range.end_block, end_block); + } + + #[test] + fn from_string_multiple_indices_errors() { + let start_block = 0; + let end_block = 10; + let third_block = 10; + let block_range_str = format!( + "{start_block}{BLOCK_RANGE_DELIMITER}{end_block}{BLOCK_RANGE_DELIMITER}{third_block}" + ); + + let result = BlockRange::from_str(&block_range_str); + + assert!(result.is_err()); + } + + #[test] + fn from_string_non_numbers_errors() { + let start_block = 'a'; + let end_block = 'b'; + let block_range_str = format!("{start_block}{BLOCK_RANGE_DELIMITER}{end_block}"); + + let result = BlockRange::from_str(&block_range_str); + + assert!(result.is_err()); + } } diff --git a/fog/types/src/view.rs b/fog/types/src/view.rs index fe1c258ab2..0837b45964 100644 --- a/fog/types/src/view.rs +++ b/fog/types/src/view.rs @@ -1,9 +1,11 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation use crate::common::BlockRange; -use alloc::vec::Vec; +use alloc::{string::String, vec, vec::Vec}; use crc::Crc; use displaydoc::Display; +use mc_attest_enclave_api::{EnclaveMessage, NonceSession}; +use mc_common::ResponderId; use mc_crypto_keys::{CompressedRistrettoPublic, KeyError, RistrettoPrivate, RistrettoPublic}; use mc_transaction_core::{ encrypted_fog_hint::{EncryptedFogHint, ENCRYPTED_FOG_HINT_LEN}, @@ -15,6 +17,9 @@ use serde::{Deserialize, Serialize}; pub use mc_fog_kex_rng::KexRngPubkey; +/// The length of the ciphertext in the FixedTxOutSearchResult. +pub const FIXED_CIPHERTEXT_LENGTH: usize = 255; + // User <-> enclave proto schema types // These are synced with types in fog_api view.proto, and tests enforce that // they round trip These are NOT expected to be synced with Db schema types @@ -93,6 +98,45 @@ pub struct QueryResponse { /// clients sample for mixins. #[prost(uint64, tag = "9")] pub last_known_block_cumulative_txo_count: u64, + + /// The results of each tx out search query + #[prost(message, repeated, tag = "10")] + pub fixed_tx_out_search_results: Vec, +} + +/// Internal representation of the `MultiViewStoreQueryResponseStance` proto +/// enum. +#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] +pub enum MultiViewStoreQueryResponseStatus { + /// Default status. This shouldn't be set explicitly. + Unknown, + /// The Fog View Store successfully fulfilled the request. + Success, + /// The Fog View Store is unable to decrypt a query within the + /// MultiViewStoreQuery. It needs to be authenticated by the router. + AuthenticationError, + /// The Fog View Store is not ready to service a MultiViewStoreQueryRequest. + /// This might be because the store has not loaded enough blocks yet. + NotReady, +} + +/// Internal representation of the `MultiViewStoreQueryResponse` proto struct. +#[derive(Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] +pub struct MultiViewStoreQueryResponse { + /// The encrypted QueryResponse. + pub encrypted_query_response: EnclaveMessage, + + /// The `ResponderId` for the store that fulfilled the request. + pub store_responder_id: ResponderId, + + /// The `FogViewStoreUri` for the store that fulfilled the request. + pub store_uri: String, + + /// The block ranges that the shard is responsible for. + pub block_range: BlockRange, + + /// The response status set by the store. + pub status: MultiViewStoreQueryResponseStatus, } /// A record that can be used by the user to produce an Rng shared with fog @@ -180,6 +224,49 @@ pub struct TxOutSearchResult { /// The ciphertext payload #[prost(bytes, tag = "3")] pub ciphertext: Vec, + /// The payload length + #[prost(bytes, tag = "4")] + pub padding: Vec, +} + +/// A struct representing the result of a fog view Txo query with a fixed +/// ciphertext length. This View Stores return this to the router. +#[derive(Clone, Eq, Hash, PartialEq, Message, Serialize, Deserialize)] +pub struct FixedTxOutSearchResult { + /// The search key that yielded this result + #[prost(bytes, tag = "1")] + pub search_key: Vec, + /// This is a TxOutSearchResultCode + #[prost(fixed32, tag = "2")] + pub result_code: u32, + /// The ciphertext payload + #[prost(bytes, tag = "3")] + pub ciphertext: Vec, + /// The payload length + #[prost(fixed32, tag = "4")] + pub payload_length: u32, +} + +impl FixedTxOutSearchResult { + /// Creates a new [FixedTxOutSearchResult] + pub fn new(search_key: Vec, payload: &[u8], result_code: TxOutSearchResultCode) -> Self { + let mut ciphertext = vec![0u8; FIXED_CIPHERTEXT_LENGTH]; + let payload_length = payload.len(); + ciphertext[0..payload_length].clone_from_slice(payload); + + FixedTxOutSearchResult { + search_key, + result_code: result_code as u32, + ciphertext, + payload_length: payload_length as u32, + } + } + + /// Creates a new [FixedTxOutSearchResult] with + /// [TxOutSearchResult::NotFound] + pub fn new_not_found(search_key: Vec) -> Self { + Self::new(search_key, &[], TxOutSearchResultCode::NotFound) + } } /// An enum capturing the Oneof in the proto file around masked token id bytes @@ -225,6 +312,27 @@ impl From<&MaskedAmount> for TxOutAmountMaskedTokenId { } } +/// This conversion must be constant time. +impl From for TxOutSearchResult { + fn from(src: FixedTxOutSearchResult) -> Self { + // The ciphertext field's length will always be FIXED_CIPHERTEXT_LENGTH, so this + // is constant time. + let mut ciphertext = src.ciphertext.clone(); + let mut padding = vec![0; FIXED_CIPHERTEXT_LENGTH]; + + ciphertext.truncate(src.payload_length as usize); + let padding_length = FIXED_CIPHERTEXT_LENGTH - (src.payload_length as usize); + padding.truncate(padding_length); + + TxOutSearchResult { + search_key: src.search_key, + result_code: src.result_code, + ciphertext, + padding, + } + } +} + /// TxOutRecord is what information the fog service preserves for a user about /// their TxOut. These are created by the ingest server and then encrypted. The /// encrypted blobs are eventually returned to the user, who must deserialize @@ -570,3 +678,50 @@ mod view_tests { assert!(result.is_err()); } } + +#[cfg(test)] +mod tests { + use crate::view::{ + FixedTxOutSearchResult, TxOutSearchResult, TxOutSearchResultCode, FIXED_CIPHERTEXT_LENGTH, + }; + use alloc::{vec, vec::Vec}; + use yare::parameterized; + + #[parameterized( + payload_length_is_0 = { 0 }, + payload_length_is_1 = { 1 }, + payload_length_smaller_than_ciphertext = { 232 }, + payload_length_is_254 = { 254 }, + payload_length_equals_ciphertext = { 255 }, + )] + fn tx_out_search_result_conversion(payload_length: usize) { + let fixed_tx_out_search_results = (0..10) + .map(|i| { + let payload = vec![i; payload_length]; + FixedTxOutSearchResult::new(vec![i], &payload, TxOutSearchResultCode::Found) + }) + .collect::>(); + + let tx_out_search_results = fixed_tx_out_search_results + .iter() + .map(|result| result.clone().into()) + .collect::>(); + + for (i, result) in tx_out_search_results.iter().enumerate() { + assert_eq!(result.search_key, fixed_tx_out_search_results[i].search_key); + assert_eq!( + result.result_code, + fixed_tx_out_search_results[i].result_code + ); + let fixed_result_payload_length = + fixed_tx_out_search_results[i].payload_length as usize; + assert_eq!( + result.ciphertext, + fixed_tx_out_search_results[i].ciphertext[0..fixed_result_payload_length] + ); + let padding_length = + FIXED_CIPHERTEXT_LENGTH - (fixed_tx_out_search_results[i].payload_length as usize); + assert_eq!(result.padding, vec![0; padding_length]) + } + } +} diff --git a/fog/uri/src/lib.rs b/fog/uri/src/lib.rs index 4669bea5e8..b5634bc213 100644 --- a/fog/uri/src/lib.rs +++ b/fog/uri/src/lib.rs @@ -4,6 +4,20 @@ use mc_util_uri::{Uri, UriScheme}; pub use mc_util_uri::{ConnectionUri, FogUri, UriParseError}; +/// Fog View Router Scheme +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct FogViewRouterScheme {} + +impl UriScheme for FogViewRouterScheme { + /// The part before the '://' of a URL. + const SCHEME_SECURE: &'static str = "fog-view-router"; + const SCHEME_INSECURE: &'static str = "insecure-fog-view-router"; + + /// Default port numbers + const DEFAULT_SECURE_PORT: u16 = 443; + const DEFAULT_INSECURE_PORT: u16 = 3225; +} + /// Fog View Uri Scheme #[derive(Debug, Hash, Ord, PartialOrd, Eq, PartialEq, Clone)] pub struct FogViewScheme {} @@ -18,6 +32,20 @@ impl UriScheme for FogViewScheme { const DEFAULT_INSECURE_PORT: u16 = 3225; } +/// Fog View Store Scheme +#[derive(Debug, Hash, Ord, PartialOrd, Eq, PartialEq, Clone)] +pub struct FogViewStoreScheme {} + +impl UriScheme for FogViewStoreScheme { + /// The part before the '://' of a URL. + const SCHEME_SECURE: &'static str = "fog-view-store"; + const SCHEME_INSECURE: &'static str = "insecure-fog-view-store"; + + /// Default port numbers + const DEFAULT_SECURE_PORT: u16 = 443; + const DEFAULT_INSECURE_PORT: u16 = 3225; +} + /// Fog Ledger Uri Scheme #[derive(Debug, Hash, Ord, PartialOrd, Eq, PartialEq, Clone)] pub struct FogLedgerScheme {} @@ -59,22 +87,25 @@ impl UriScheme for IngestPeerScheme { const DEFAULT_INSECURE_PORT: u16 = 8090; } -/// Uri used when talking to fog-view service, with the right default ports and -/// scheme. -pub type FogViewUri = Uri; -/// Uri used when talking to fog-ledger service, with the right default ports -/// and scheme. -pub type FogLedgerUri = Uri; /// Uri used when talking to fog-ingest service, with the right default ports /// and scheme. pub type FogIngestUri = Uri; -/// Usi used when talking to fog-ingest-peer service. +/// Uri used when talking to fog-ledger service, with the right default ports +/// and scheme. +pub type FogLedgerUri = Uri; +/// Uri used when talking to fog view router service. +pub type FogViewRouterUri = Uri; +/// Uri used when talking to fog view store service. +pub type FogViewStoreUri = Uri; +/// Uri used when talking to fog-view service, with the right default ports and +/// scheme. +pub type FogViewUri = Uri; +/// Uri used when talking to fog-ingest-peer service. pub type IngestPeerUri = Uri; #[cfg(test)] mod tests { - use super::{FogLedgerUri, FogViewUri}; - use crate::ConnectionUri; + use super::*; use core::str::FromStr; use mc_common::ResponderId; @@ -129,6 +160,27 @@ mod tests { ResponderId::from_str("node1.test.mobilecoin.com:666").unwrap() ); assert!(!uri.use_tls()); + + let uri = FogViewRouterUri::from_str( + "insecure-fog-view-router://node1.test.mobilecoin.com:3225/", + ) + .unwrap(); + assert_eq!(uri.addr(), "node1.test.mobilecoin.com:3225"); + assert_eq!( + uri.responder_id().unwrap(), + ResponderId::from_str("node1.test.mobilecoin.com:3225").unwrap() + ); + assert!(!uri.use_tls()); + + let uri = + FogViewStoreUri::from_str("insecure-fog-view-store://node1.test.mobilecoin.com:3225/") + .unwrap(); + assert_eq!(uri.addr(), "node1.test.mobilecoin.com:3225"); + assert_eq!( + uri.responder_id().unwrap(), + ResponderId::from_str("node1.test.mobilecoin.com:3225").unwrap() + ); + assert!(!uri.use_tls()); } #[test] diff --git a/fog/view/connection/Cargo.toml b/fog/view/connection/Cargo.toml index 03084f30db..9733bc7131 100644 --- a/fog/view/connection/Cargo.toml +++ b/fog/view/connection/Cargo.toml @@ -8,14 +8,20 @@ readme = "README.md" rust-version = { workspace = true } [dependencies] + # mobilecoin +mc-attest-ake = { path = "../../../attest/ake" } +mc-attest-api = { path = "../../../attest/api" } mc-attest-core = { path = "../../../attest/core" } mc-attest-verifier = { path = "../../../attest/verifier" } mc-common = { path = "../../../common", features = ["log"] } mc-crypto-keys = { path = "../../../crypto/keys" } +mc-crypto-noise = { path = "../../../crypto/noise" } +mc-crypto-rand = { path = "../../../crypto/rand" } mc-util-grpc = { path = "../../../util/grpc" } mc-util-serial = { path = "../../../util/serial" } mc-util-telemetry = { path = "../../../util/telemetry" } +mc-util-uri = { path = "../../../util/uri" } # fog mc-fog-api = { path = "../../api" } @@ -25,5 +31,9 @@ mc-fog-uri = { path = "../../uri" } mc-fog-view-protocol = { path = "../protocol" } # third-party +aes-gcm = "0.10.1" +futures = "0.3" grpcio = "0.12.1" retry = "2.0" +sha2 = { version = "0.10", default-features = false } +tokio = "1.16.1" diff --git a/fog/view/connection/src/fog_view_router_client.rs b/fog/view/connection/src/fog_view_router_client.rs new file mode 100644 index 0000000000..c341aa57d6 --- /dev/null +++ b/fog/view/connection/src/fog_view_router_client.rs @@ -0,0 +1,256 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +//! Makes requests to the fog view router service + +use aes_gcm::Aes256Gcm; +use futures::{executor::block_on, SinkExt, TryStreamExt}; +use grpcio::{ChannelBuilder, ClientDuplexReceiver, ClientDuplexSender, Environment}; +use mc_attest_ake::{ + AuthResponseInput, ClientInitiate, Error as AttestAkeError, Ready, Start, Transition, +}; +use mc_attest_api::attest::{AuthMessage, Message}; +use mc_attest_core::VerificationReport; +use mc_attest_verifier::Verifier; +use mc_common::logger::{log, o, Logger}; +use mc_crypto_keys::X25519; +use mc_crypto_noise::CipherError; +use mc_crypto_rand::McRng; +use mc_fog_api::{ + view::{FogViewRouterRequest, FogViewRouterResponse}, + view_grpc::FogViewRouterApiClient, +}; +use mc_fog_types::view::{QueryRequest, QueryRequestAAD, QueryResponse}; +use mc_fog_uri::{ConnectionUri, FogViewRouterUri}; +use mc_util_grpc::ConnectionUriGrpcioChannel; +use mc_util_serial::DecodeError; +use mc_util_uri::UriConversionError; +use sha2::Sha512; +use std::sync::Arc; + +/// A high-level object mediating requests to the fog view router service +pub struct FogViewRouterGrpcClient { + /// A logger object + logger: Logger, + + /// The AKE state machine object, if one is available. + attest_cipher: Option>, + + _fog_view_router_client: FogViewRouterApiClient, + + /// Sends requests to the fog view router + request_sender: ClientDuplexSender, + + /// Receives responses from the fog view router + response_receiver: ClientDuplexReceiver, + + uri: FogViewRouterUri, + + /// An object which can verify a fog node's provided IAS report + verifier: Verifier, +} + +impl FogViewRouterGrpcClient { + /// Creates a new fog view router grpc client and opens a streaming + /// connection to the fog view router service. + /// + /// Arguments: + /// * uri: The Uri to connect to + /// * verifier: The attestation verifier + /// * env: A grpc environment (thread pool) to use for this connection + /// * logger: For logging + pub fn new( + uri: FogViewRouterUri, + verifier: Verifier, + env: Arc, + logger: Logger, + ) -> Self { + let logger = logger.new(o!("mc.fog.view.router.uri" => uri.to_string())); + + let ch = ChannelBuilder::default_channel_builder(env).connect_to_uri(&uri, &logger); + let fog_view_router_client = FogViewRouterApiClient::new(ch); + let (request_sender, response_receiver) = fog_view_router_client + .request() + .expect("Could not retrieve grpc sender and receiver."); + + Self { + logger, + attest_cipher: None, + _fog_view_router_client: fog_view_router_client, + request_sender, + response_receiver, + uri, + verifier, + } + } + + fn is_attested(&self) -> bool { + self.attest_cipher.is_some() + } + + async fn attest(&mut self) -> Result { + // If we have an existing attestation, nuke it. + self.deattest(); + + let mut csprng = McRng::default(); + + let initiator = Start::new(self.uri.responder_id()?.to_string()); + + let init_input = ClientInitiate::::default(); + let (initiator, auth_request_output) = initiator.try_next(&mut csprng, init_input)?; + + let attested_message: AuthMessage = auth_request_output.into(); + let mut request = FogViewRouterRequest::new(); + request.set_auth(attested_message); + self.request_sender + .send((request.clone(), grpcio::WriteFlags::default())) + .await?; + + let mut response = self + .response_receiver + .try_next() + .await? + .ok_or(Error::ResponseNotReceived)?; + let auth_response_msg = response.take_auth(); + + // Process server response, check if key exchange is successful + let auth_response_event = + AuthResponseInput::new(auth_response_msg.into(), self.verifier.clone()); + let (initiator, verification_report) = + initiator.try_next(&mut csprng, auth_response_event)?; + + self.attest_cipher = Some(initiator); + + Ok(verification_report) + } + + fn deattest(&mut self) { + if self.is_attested() { + log::trace!(self.logger, "Tearing down existing attested connection."); + self.attest_cipher = None; + } + } + + /// Makes streaming requests to the fog view router service. + pub async fn query( + &mut self, + start_from_user_event_id: i64, + start_from_block_index: u64, + search_keys: Vec>, + ) -> Result { + log::trace!(self.logger, "Query was called"); + if !self.is_attested() { + let verification_report = self.attest().await; + verification_report?; + } + + let plaintext_request = QueryRequest { + get_txos: search_keys, + }; + + let req_aad = QueryRequestAAD { + start_from_user_event_id, + start_from_block_index, + }; + + let aad = mc_util_serial::encode(&req_aad); + + let msg = { + let attest_cipher = self + .attest_cipher + .as_mut() + .expect("no enclave_connection even though attest succeeded"); + + let mut msg = Message::new(); + msg.set_channel_id(Vec::from(attest_cipher.binding())); + msg.set_aad(aad.clone()); + + let plaintext_bytes = mc_util_serial::encode(&plaintext_request); + + let request_ciphertext = attest_cipher.encrypt(&aad, &plaintext_bytes)?; + msg.set_data(request_ciphertext); + msg + }; + let mut request = FogViewRouterRequest::new(); + request.set_query(msg); + + self.request_sender + .send((request.clone(), grpcio::WriteFlags::default())) + .await?; + + let message = self + .response_receiver + .try_next() + .await? + .ok_or(Error::ResponseNotReceived)? + .take_query(); + + { + let attest_cipher = self + .attest_cipher + .as_mut() + .expect("no enclave_connection even though attest succeeded"); + + let plaintext_bytes = attest_cipher.decrypt(message.get_aad(), message.get_data())?; + let plaintext_response: QueryResponse = mc_util_serial::decode(&plaintext_bytes)?; + Ok(plaintext_response) + } + } +} + +impl Drop for FogViewRouterGrpcClient { + fn drop(&mut self) { + block_on(self.request_sender.close()).expect("Couldn't close the router request sender"); + } +} + +/// Errors related to the Fog View Router Client. +#[derive(Debug)] +pub enum Error { + /// Decode errors. + Decode(DecodeError), + + /// Uri conversion errors. + UriConversion(UriConversionError), + + /// Cipher errors. + Cipher(CipherError), + + /// Attestation errors. + Attestation(AttestAkeError), + + /// Grpc errors. + Grpc(grpcio::Error), + + /// Response not received + ResponseNotReceived, +} + +impl From for Error { + fn from(err: DecodeError) -> Self { + Self::Decode(err) + } +} + +impl From for Error { + fn from(err: CipherError) -> Self { + Self::Cipher(err) + } +} + +impl From for Error { + fn from(err: grpcio::Error) -> Self { + Self::Grpc(err) + } +} + +impl From for Error { + fn from(err: UriConversionError) -> Self { + Self::UriConversion(err) + } +} + +impl From for Error { + fn from(err: AttestAkeError) -> Self { + Self::Attestation(err) + } +} diff --git a/fog/view/connection/src/lib.rs b/fog/view/connection/src/lib.rs index eae30212b0..2868c783c4 100644 --- a/fog/view/connection/src/lib.rs +++ b/fog/view/connection/src/lib.rs @@ -5,6 +5,8 @@ #![deny(missing_docs)] +pub mod fog_view_router_client; + use grpcio::{ChannelBuilder, Environment}; use mc_attest_verifier::Verifier; use mc_common::{ diff --git a/fog/view/enclave/Cargo.toml b/fog/view/enclave/Cargo.toml index 84276d76ff..c39db07cdc 100644 --- a/fog/view/enclave/Cargo.toml +++ b/fog/view/enclave/Cargo.toml @@ -12,6 +12,7 @@ mc-attest-core = { path = "../../../attest/core" } mc-attest-enclave-api = { path = "../../../attest/enclave-api" } mc-attest-verifier = { path = "../../../attest/verifier" } mc-common = { path = "../../../common", features = ["log"] } +mc-crypto-ake-enclave = { path = "../../../crypto/ake/enclave" } mc-crypto-keys = { path = "../../../crypto/keys" } mc-enclave-boundary = { path = "../../../enclave-boundary" } mc-sgx-debug-edl = { path = "../../../sgx/debug-edl" } diff --git a/fog/view/enclave/api/Cargo.toml b/fog/view/enclave/api/Cargo.toml index f26b5fa27d..fa4e5bdacb 100644 --- a/fog/view/enclave/api/Cargo.toml +++ b/fog/view/enclave/api/Cargo.toml @@ -11,7 +11,9 @@ rust-version = { workspace = true } mc-attest-core = { path = "../../../../attest/core", default-features = false } mc-attest-enclave-api = { path = "../../../../attest/enclave-api", default-features = false } mc-common = { path = "../../../../common", default-features = false } +mc-crypto-ake-enclave = { path = "../../../../crypto/ake/enclave", default-features = false } mc-crypto-keys = { path = "../../../../crypto/keys", default-features = false } +mc-crypto-noise = { path = "../../../../crypto/noise", default-features = false } mc-sgx-compat = { path = "../../../../sgx/compat", default-features = false } mc-sgx-report-cache-api = { path = "../../../../sgx/report-cache/api" } mc-sgx-types = { path = "../../../../sgx/types", default-features = false } diff --git a/fog/view/enclave/api/src/lib.rs b/fog/view/enclave/api/src/lib.rs index 9a4ec82dd8..fcee56655e 100644 --- a/fog/view/enclave/api/src/lib.rs +++ b/fog/view/enclave/api/src/lib.rs @@ -15,12 +15,14 @@ use displaydoc::Display; use mc_attest_core::{Quote, Report, SgxError, TargetInfo, VerificationReport}; use mc_attest_enclave_api::{ ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, - Error as AttestEnclaveError, + Error as AttestEnclaveError, NonceAuthRequest, NonceAuthResponse, NonceSession, + SealedClientMessage, }; use mc_common::ResponderId; use mc_crypto_keys::X25519Public; +use mc_crypto_noise::CipherError; use mc_fog_recovery_db_iface::FogUserEvent; -use mc_fog_types::ETxOutRecord; +use mc_fog_types::{view::MultiViewStoreQueryResponse, ETxOutRecord}; use mc_sgx_compat::sync::PoisonError; use mc_sgx_report_cache_api::ReportableEnclave; use mc_sgx_types::{sgx_enclave_id_t, sgx_status_t}; @@ -82,8 +84,28 @@ pub enum ViewEnclaveRequest { /// An encrypted fog_types::view::QueryRequest /// Respond with fog_types::view::QueryResponse Query(EnclaveMessage, UntrustedQueryResponse), + /// An encrypted fog_types::view::QueryRequest + /// Respond with fog_types::view::QueryResponse. + QueryStore(EnclaveMessage, UntrustedQueryResponse), /// Request from untrusted to add encrypted tx out records to ORAM AddRecords(Vec), + /// Takes a client query message and returns a SealedClientMessage + /// sealed for the current enclave. + DecryptAndSealQuery(EnclaveMessage), + /// Takes a sealed fog_types::view::QueryRequest and returns a list of + /// fog_types::view::QueryRequest. + CreateMultiViewStoreQuery(SealedClientMessage), + /// Begin a client connection to a Fog View Store discovered after + /// initialization. + ViewStoreInit(ResponderId), + /// Complete the client connection to a Fog View store that accepted our + /// client auth request. This is meant to be called after [ViewStoreInit]. + ViewStoreConnect(ResponderId, NonceAuthResponse), + /// Accept a connection to a frontend. + FrontendAccept(NonceAuthRequest), + /// Collates shard query responses into a single query response for the + /// client. + CollateQueryResponses(SealedClientMessage, Vec), } /// The parameters needed to initialize the view enclave @@ -123,6 +145,23 @@ pub trait ViewEnclaveApi: ReportableEnclave { /// Destroy a peer association fn client_close(&self, channel_id: ClientSession) -> Result<()>; + /// Begin a connection to a Fog View Store. The enclave calling this method + /// will act as a client to the Fog View Store. + fn view_store_init(&self, view_store_id: ResponderId) -> Result; + + /// Accept a connection to a Fog View Router instance acting as a frontend + /// to the Fog View Store. + fn frontend_accept(&self, req: NonceAuthRequest) -> Result<(NonceAuthResponse, NonceSession)>; + + /// Complete the connection to a Fog View Store that has accepted our + /// ClientAuthRequest. This is meant to be called after the enclave has + /// initialized and discovers a new Fog View Store. + fn view_store_connect( + &self, + view_store_id: ResponderId, + view_store_auth_response: NonceAuthResponse, + ) -> Result<()>; + /// Service a user's encrypted QueryRequest fn query( &self, @@ -130,11 +169,44 @@ pub trait ViewEnclaveApi: ReportableEnclave { untrusted_query_response: UntrustedQueryResponse, ) -> Result>; + /// Service a frontend's query request. Intended to be used by a Fog View + /// Store. + fn query_store( + &self, + payload: EnclaveMessage, + untrusted_query_response: UntrustedQueryResponse, + ) -> Result>; + /// SERVER-FACING /// Add encrypted tx out records from the fog recovery db to the view /// enclave's ORAM fn add_records(&self, records: Vec) -> Result<()>; + + /// Decrypts a client query message and converts it into a + /// SealedClientMessage which can be unsealed multiple times to + /// construct the MultiViewStoreQuery. + fn decrypt_and_seal_query( + &self, + client_query: EnclaveMessage, + ) -> Result; + + /// Transforms a client query request into a list of query request data. + /// + /// The returned list is meant to be used to construct the + /// MultiViewStoreQuery, which is sent to each shard. + fn create_multi_view_store_query_data( + &self, + sealed_query: SealedClientMessage, + ) -> Result>>; + + /// Receives all of the shards' query responses and collates them into one + /// query response for the client. + fn collate_shard_query_responses( + &self, + sealed_query: SealedClientMessage, + shard_query_responses: Vec, + ) -> Result>; } /// Helper trait which reduces boiler-plate in untrusted side @@ -188,6 +260,10 @@ pub enum Error { Poison, /// Enclave not initialized EnclaveNotInitialized, + /// Cipher encryption failed: {0} + Cipher(CipherError), + /// Fog View Shard query response collation error. + QueryResponseCollation, } impl From for Error { @@ -243,3 +319,9 @@ impl From for Error { Error::AddRecords(src) } } + +impl From for Error { + fn from(src: CipherError) -> Self { + Error::Cipher(src) + } +} diff --git a/fog/view/enclave/impl/Cargo.toml b/fog/view/enclave/impl/Cargo.toml index b367cc025c..d140abec6b 100644 --- a/fog/view/enclave/impl/Cargo.toml +++ b/fog/view/enclave/impl/Cargo.toml @@ -7,27 +7,27 @@ license = "GPL-3.0" rust-version = { workspace = true } [dependencies] -# mobilecoin +aes-gcm = "0.10.1" +aligned-cmov = "2.2" +static_assertions = "1.1.0" + +mc-attest-ake = { path = "../../../../attest/ake", default-features = false } mc-attest-core = { path = "../../../../attest/core", default-features = false } mc-attest-enclave-api = { path = "../../../../attest/enclave-api", default-features = false } mc-common = { path = "../../../../common", default-features = false } mc-crypto-ake-enclave = { path = "../../../../crypto/ake/enclave" } mc-crypto-keys = { path = "../../../../crypto/keys", default-features = false } mc-crypto-rand = { path = "../../../../crypto/rand", default-features = false } -mc-sgx-compat = { path = "../../../../sgx/compat", default-features = false } -mc-sgx-report-cache-api = { path = "../../../../sgx/report-cache/api" } -mc-util-serial = { path = "../../../../util/serial", default-features = false } - -# mc-oblivious -aligned-cmov = "2.2" -mc-oblivious-map = "2.2" -mc-oblivious-ram = "2.2" -mc-oblivious-traits = "2.2" - -# fog mc-fog-recovery-db-iface = { path = "../../../recovery_db_iface" } mc-fog-types = { path = "../../../types" } mc-fog-view-enclave-api = { path = "../api" } +mc-oblivious-map = "2.2" +mc-oblivious-ram = "2.2" +mc-oblivious-traits = "2.2" +mc-sgx-compat = { path = "../../../../sgx/compat", default-features = false } +mc-sgx-report-cache-api = { path = "../../../../sgx/report-cache/api" } +mc-util-serial = { path = "../../../../util/serial", default-features = false } [dev-dependencies] +itertools = "0.10.3" mc-common = { path = "../../../../common", features = ["loggers"] } diff --git a/fog/view/enclave/impl/src/e_tx_out_store.rs b/fog/view/enclave/impl/src/e_tx_out_store.rs index 1a0f5fcee9..5c541e902f 100644 --- a/fog/view/enclave/impl/src/e_tx_out_store.rs +++ b/fog/view/enclave/impl/src/e_tx_out_store.rs @@ -14,7 +14,7 @@ use aligned_cmov::{ use alloc::boxed::Box; use mc_common::logger::Logger; use mc_crypto_rand::McRng; -use mc_fog_types::view::{TxOutSearchResult, TxOutSearchResultCode}; +use mc_fog_types::view::{FixedTxOutSearchResult, TxOutSearchResultCode, FIXED_CIPHERTEXT_LENGTH}; use mc_fog_view_enclave_api::AddRecordsError; use mc_oblivious_map::CuckooHashTableCreator; use mc_oblivious_ram::PathORAM4096Z4Creator; @@ -134,11 +134,14 @@ impl> ETxOutStore Ok(()) } - pub fn find_record(&mut self, search_key: &[u8]) -> TxOutSearchResult { - let mut result = TxOutSearchResult { + // Should this return a FixedTxOutSearchResult or just a TxOutSearchResult? + pub fn find_record(&mut self, search_key: &[u8]) -> FixedTxOutSearchResult { + let mut result = FixedTxOutSearchResult { search_key: search_key.to_vec(), result_code: TxOutSearchResultCode::InternalError as u32, - ciphertext: vec![0u8; ValueSize::USIZE - 1 - self.last_ciphertext_size_byte as usize], + ciphertext: vec![0u8; FIXED_CIPHERTEXT_LENGTH], + // Use zero as the default. This value will be updated in every scenario. + payload_length: 0, }; // Early return for bad search key @@ -181,12 +184,15 @@ impl> ETxOutStore ); } - // Copy the data in value[1..] to result.ciphertext, resizing if needed - result - .ciphertext - .resize(ValueSize::USIZE - 1 - value[0] as usize, 0u8); - let data_end = ValueSize::USIZE - value[0] as usize; - result.ciphertext.copy_from_slice(&value[1..data_end]); + // To preserve constant time execution, we always copy `ValueSize::USIZE - 1` + // bytes. To ensure the copy doesn't panic, assert that the length to + // copy is less than the maximum length that ciphertext can be, which is + // `FIXED_CIPHERTEXT_LENGTH`. + const LENGTH_TO_COPY: usize = ValueSize::USIZE - 1; + static_assertions::const_assert!(LENGTH_TO_COPY < FIXED_CIPHERTEXT_LENGTH); + + result.ciphertext[..LENGTH_TO_COPY].copy_from_slice(&value[1..(LENGTH_TO_COPY + 1)]); + result.payload_length = (ValueSize::USIZE - 1 - (value[0] as usize)) as u32; result } diff --git a/fog/view/enclave/impl/src/lib.rs b/fog/view/enclave/impl/src/lib.rs index f6578c2fa0..fd9087e2de 100644 --- a/fog/view/enclave/impl/src/lib.rs +++ b/fog/view/enclave/impl/src/lib.rs @@ -8,17 +8,29 @@ extern crate alloc; mod e_tx_out_store; -use e_tx_out_store::{ETxOutStore, StorageDataSize, StorageMetaSize}; +mod oblivious_utils; +mod types; +use crate::types::get_block_data; use alloc::vec::Vec; +use e_tx_out_store::{ETxOutStore, StorageDataSize, StorageMetaSize}; use mc_attest_core::{IasNonce, Quote, QuoteNonce, Report, TargetInfo, VerificationReport}; -use mc_attest_enclave_api::{ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage}; -use mc_common::logger::{log, Logger}; +use mc_attest_enclave_api::{ + ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, NonceAuthRequest, + NonceAuthResponse, NonceSession, SealedClientMessage, +}; +use mc_common::{ + logger::{log, Logger}, + ResponderId, +}; use mc_crypto_ake_enclave::{AkeEnclaveState, NullIdentity}; use mc_crypto_keys::X25519Public; use mc_fog_recovery_db_iface::FogUserEvent; use mc_fog_types::{ - view::{QueryRequest, QueryResponse}, + view::{ + FixedTxOutSearchResult, MultiViewStoreQueryResponse, QueryRequest, QueryResponse, + TxOutSearchResult, + }, ETxOutRecord, }; use mc_fog_view_enclave_api::{ @@ -27,6 +39,7 @@ use mc_fog_view_enclave_api::{ use mc_oblivious_traits::ORAMStorageCreator; use mc_sgx_compat::sync::Mutex; use mc_sgx_report_cache_api::{ReportableEnclave, Result as ReportableEnclaveResult}; +use types::{CommonShardData, DecryptedMultiViewStoreQueryResponse, LastKnownData}; pub struct ViewEnclave where @@ -53,6 +66,64 @@ where logger, } } + + fn query_impl( + &self, + plaintext_request: &[u8], + untrusted_query_response: UntrustedQueryResponse, + ) -> Result> { + let req: QueryRequest = mc_util_serial::decode(plaintext_request).map_err(|e| { + log::error!(self.logger, "Could not decode user request: {}", e); + Error::ProstDecode + })?; + + // Prepare the untrusted part of the response. + let mut missed_block_ranges = Vec::new(); + let mut rng_records = Vec::new(); + let mut decommissioned_ingest_invocations = Vec::new(); + + for event in untrusted_query_response.user_events.into_iter() { + match event { + FogUserEvent::NewRngRecord(rng_record) => rng_records.push(rng_record), + + FogUserEvent::DecommissionIngestInvocation(decommissioned_ingest_invocation) => { + decommissioned_ingest_invocations.push(decommissioned_ingest_invocation) + } + + FogUserEvent::MissingBlocks(range) => missed_block_ranges.push(range), + } + } + + let mut resp = QueryResponse { + highest_processed_block_count: untrusted_query_response.highest_processed_block_count, + highest_processed_block_signature_timestamp: untrusted_query_response + .highest_processed_block_signature_timestamp, + next_start_from_user_event_id: untrusted_query_response.next_start_from_user_event_id, + missed_block_ranges, + rng_records, + decommissioned_ingest_invocations, + tx_out_search_results: Default::default(), + last_known_block_count: untrusted_query_response.last_known_block_count, + last_known_block_cumulative_txo_count: untrusted_query_response + .last_known_block_cumulative_txo_count, + fixed_tx_out_search_results: Default::default(), + }; + + // Do the txos part, scope lock of e_tx_out_store + { + let mut lk = self.e_tx_out_store.lock()?; + let store = lk.as_mut().ok_or(Error::EnclaveNotInitialized)?; + + resp.fixed_tx_out_search_results = req + .get_txos + .iter() + .map(|key| store.find_record(&key[..])) + .collect(); + } + + let response_plaintext_bytes = mc_util_serial::encode(&resp); + Ok(response_plaintext_bytes) + } } impl ReportableEnclave for ViewEnclave @@ -119,70 +190,180 @@ where ) -> Result> { let channel_id = msg.channel_id.clone(); let user_plaintext = self.ake.client_decrypt(msg)?; + let response_plaintext_bytes = + self.query_impl(&user_plaintext, untrusted_query_response)?; + let response = self + .ake + .client_encrypt(&channel_id, &[], &response_plaintext_bytes)?; - let req: QueryRequest = mc_util_serial::decode(&user_plaintext).map_err(|e| { - log::error!(self.logger, "Could not decode user request: {}", e); - Error::ProstDecode - })?; - - // Prepare the untrusted part of the response. - let mut missed_block_ranges = Vec::new(); - let mut rng_records = Vec::new(); - let mut decommissioned_ingest_invocations = Vec::new(); + Ok(response.data) + } - for event in untrusted_query_response.user_events.into_iter() { - match event { - FogUserEvent::NewRngRecord(rng_record) => rng_records.push(rng_record), + fn query_store( + &self, + msg: EnclaveMessage, + untrusted_query_response: UntrustedQueryResponse, + ) -> Result> { + let channel_id = msg.channel_id.clone(); + let user_plaintext = self.ake.frontend_decrypt(msg)?; + let response_plaintext_bytes = + self.query_impl(&user_plaintext, untrusted_query_response)?; + let response = self + .ake + .frontend_encrypt(&channel_id, &[], &response_plaintext_bytes)?; - FogUserEvent::DecommissionIngestInvocation(decommissioned_ingest_invocation) => { - decommissioned_ingest_invocations.push(decommissioned_ingest_invocation) - } + Ok(response) + } - FogUserEvent::MissingBlocks(range) => missed_block_ranges.push(range), - } + fn add_records(&self, records: Vec) -> Result<()> { + let mut lk = self.e_tx_out_store.lock()?; + let store = lk.as_mut().ok_or(Error::EnclaveNotInitialized)?; + for rec in records { + store.add_record(&rec.search_key, &rec.payload)?; } - let mut resp = QueryResponse { - highest_processed_block_count: untrusted_query_response.highest_processed_block_count, - highest_processed_block_signature_timestamp: untrusted_query_response - .highest_processed_block_signature_timestamp, - next_start_from_user_event_id: untrusted_query_response.next_start_from_user_event_id, - missed_block_ranges, - rng_records, - decommissioned_ingest_invocations, - tx_out_search_results: Default::default(), - last_known_block_count: untrusted_query_response.last_known_block_count, - last_known_block_cumulative_txo_count: untrusted_query_response - .last_known_block_cumulative_txo_count, - }; + Ok(()) + } - // Do the txos part, scope lock of e_tx_out_store - { - let mut lk = self.e_tx_out_store.lock()?; - let store = lk.as_mut().ok_or(Error::EnclaveNotInitialized)?; + /// Decrypts a client query message and converts it into a + /// SealedClientMessage which can be unsealed multiple times to + /// construct the MultiViewStoreQuery. + fn decrypt_and_seal_query( + &self, + client_query: EnclaveMessage, + ) -> Result { + Ok(self.ake.decrypt_client_message_for_enclave(client_query)?) + } - resp.tx_out_search_results = req - .get_txos - .iter() - .map(|key| store.find_record(&key[..])) - .collect(); - } + /// Takes in a client's query request and returns a list of query requests + /// to be sent off to each Fog View Store shard. + fn create_multi_view_store_query_data( + &self, + sealed_query: SealedClientMessage, + ) -> Result>> { + Ok(self + .ake + .reencrypt_sealed_message_for_backends(&sealed_query)?) + } - let response_plaintext_bytes = mc_util_serial::encode(&resp); + fn view_store_init(&self, view_store_id: ResponderId) -> Result { + Ok(self.ake.backend_init(view_store_id)?) + } - let response = self + fn view_store_connect( + &self, + view_store_id: ResponderId, + view_store_auth_response: NonceAuthResponse, + ) -> Result<()> { + Ok(self .ake - .client_encrypt(&channel_id, &[], &response_plaintext_bytes)?; + .backend_connect(view_store_id, view_store_auth_response)?) + } - Ok(response.data) + fn frontend_accept(&self, req: NonceAuthRequest) -> Result<(NonceAuthResponse, NonceSession)> { + Ok(self.ake.frontend_accept(req)?) } - fn add_records(&self, records: Vec) -> Result<()> { - let mut lk = self.e_tx_out_store.lock()?; - let store = lk.as_mut().ok_or(Error::EnclaveNotInitialized)?; - for rec in records { - store.add_record(&rec.search_key, &rec.payload)?; + fn collate_shard_query_responses( + &self, + sealed_query: SealedClientMessage, + shard_query_responses: Vec, + ) -> Result> { + if shard_query_responses.is_empty() { + return Ok(EnclaveMessage::default()); } - Ok(()) + let channel_id = sealed_query.channel_id.clone(); + let client_query_plaintext = self.ake.unseal(&sealed_query)?; + let client_query_request: QueryRequest = mc_util_serial::decode(&client_query_plaintext) + .map_err(|e| { + log::error!(self.logger, "Could not decode client query request: {}", e); + Error::ProstDecode + })?; + + let client_query_response = + self.create_client_query_response(client_query_request, shard_query_responses)?; + let response_plaintext_bytes = mc_util_serial::encode(&client_query_response); + let response = + self.ake + .client_encrypt(&channel_id, &sealed_query.aad, &response_plaintext_bytes)?; + + Ok(response) + } +} + +impl ViewEnclave +where + OSC: ORAMStorageCreator, +{ + fn create_client_query_response( + &self, + client_query_request: QueryRequest, + shard_query_responses: Vec, + ) -> Result { + let shard_query_responses = shard_query_responses + .into_iter() + .map(|multi_view_store_query_response| { + let plaintext_bytes = self.ake.backend_decrypt( + &multi_view_store_query_response.store_responder_id, + &multi_view_store_query_response.encrypted_query_response, + )?; + let query_response: QueryResponse = mc_util_serial::decode(&plaintext_bytes)?; + + Ok(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range: multi_view_store_query_response.block_range, + }) + }) + .collect::>>()?; + + Self::create_response(client_query_request, shard_query_responses) + } + + fn create_response( + client_query_request: QueryRequest, + mut responses: Vec, + ) -> Result { + let mut result: QueryResponse = QueryResponse::default(); + let last_known_data = LastKnownData::from(responses.as_slice()); + result.last_known_block_count = last_known_data.last_known_block_count; + result.last_known_block_cumulative_txo_count = + last_known_data.last_known_block_cumulative_txo_count; + + let shared_data: CommonShardData = CommonShardData::from(responses.as_slice()); + result.missed_block_ranges = shared_data.missed_block_ranges; + result.rng_records = shared_data.rng_records; + result.decommissioned_ingest_invocations = shared_data.decommissioned_ingest_invocations; + result.next_start_from_user_event_id = shared_data.next_start_from_user_event_id; + + let block_data = get_block_data(responses.as_mut_slice(), &result.missed_block_ranges); + result.highest_processed_block_count = block_data.highest_processed_block_count; + result.highest_processed_block_signature_timestamp = + block_data.highest_processed_block_signature_timestamp; + + result.fixed_tx_out_search_results = + Self::get_collated_tx_out_search_results(client_query_request, &responses)?; + result.tx_out_search_results = result + .fixed_tx_out_search_results + .iter() + .cloned() + .map(TxOutSearchResult::from) + .collect(); + + Ok(result) + } + + fn get_collated_tx_out_search_results( + client_query_request: QueryRequest, + responses: &[DecryptedMultiViewStoreQueryResponse], + ) -> Result> { + let plaintext_search_results = responses + .iter() + .flat_map(|response| response.query_response.fixed_tx_out_search_results.clone()) + .collect::>(); + + oblivious_utils::collate_shard_tx_out_search_results( + client_query_request.get_txos, + plaintext_search_results, + ) } } diff --git a/fog/view/enclave/impl/src/oblivious_utils.rs b/fog/view/enclave/impl/src/oblivious_utils.rs new file mode 100644 index 0000000000..eb44ddb183 --- /dev/null +++ b/fog/view/enclave/impl/src/oblivious_utils.rs @@ -0,0 +1,627 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +//! Contains methods that allow a Fog View Router enclave to combine all of the +//! Fog View Shard's query responses into one query response that'll be returned +//! for the client. + +use crate::Result; + +use aligned_cmov::{ + subtle::{Choice, ConditionallySelectable, ConstantTimeEq}, + CMov, +}; +use alloc::vec::Vec; +use mc_fog_types::view::{FixedTxOutSearchResult, TxOutSearchResultCode}; + +pub fn collate_shard_tx_out_search_results( + client_search_keys: Vec>, + shard_tx_out_search_results: Vec, +) -> Result> { + // The default [FixedTxOutSearchResult] has a [TxOutSearchResultCode::NotFound] + // `result_code`. + // + // Warning: Do not change this without careful thought because the logic in the + // [should_over_write_tx_out_search_result] method assumes that + // the default code is NotFound. + let mut client_tx_out_search_results: Vec = client_search_keys + .into_iter() + .map(FixedTxOutSearchResult::new_not_found) + .collect(); + + for shard_tx_out_search_result in shard_tx_out_search_results.iter() { + for client_tx_out_search_result in client_tx_out_search_results.iter_mut() { + maybe_overwrite_tx_out_search_result( + client_tx_out_search_result, + shard_tx_out_search_result, + ); + } + } + + Ok(client_tx_out_search_results) +} + +fn maybe_overwrite_tx_out_search_result( + client_tx_out_search_result: &mut FixedTxOutSearchResult, + shard_tx_out_search_result: &FixedTxOutSearchResult, +) { + let should_overwrite_tx_out_search_result = should_overwrite_tx_out_search_result( + client_tx_out_search_result, + shard_tx_out_search_result, + ); + client_tx_out_search_result + .payload_length + .conditional_assign( + &(shard_tx_out_search_result.payload_length), + should_overwrite_tx_out_search_result, + ); + + for idx in 0..shard_tx_out_search_result.ciphertext.len() { + client_tx_out_search_result.ciphertext[idx].conditional_assign( + &shard_tx_out_search_result.ciphertext[idx], + should_overwrite_tx_out_search_result, + ); + } + client_tx_out_search_result.result_code.cmov( + should_overwrite_tx_out_search_result, + &shard_tx_out_search_result.result_code, + ); +} + +fn should_overwrite_tx_out_search_result( + client_tx_out_search_result: &FixedTxOutSearchResult, + shard_tx_out_search_result: &FixedTxOutSearchResult, +) -> Choice { + let do_search_keys_match = client_tx_out_search_result + .search_key + .ct_eq(&shard_tx_out_search_result.search_key); + + let client_tx_out_search_result_code = client_tx_out_search_result.result_code; + let shard_tx_out_search_result_code = shard_tx_out_search_result.result_code; + + let client_code_is_found: Choice = + client_tx_out_search_result_code.ct_eq(&(TxOutSearchResultCode::Found as u32)); + let client_code_is_not_found: Choice = + client_tx_out_search_result_code.ct_eq(&(TxOutSearchResultCode::NotFound as u32)); + + let shard_code_is_found: Choice = + shard_tx_out_search_result_code.ct_eq(&(TxOutSearchResultCode::Found as u32)); + + let shard_code_is_retryable_error = + is_code_retryable_error(shard_tx_out_search_result.result_code); + let shard_code_is_bad_search_key = + shard_tx_out_search_result_code.ct_eq(&(TxOutSearchResultCode::BadSearchKey as u32)); + + // We make the same query to several shards and get several responses, and + // this logic determines how we fill the one client response. + // At a high level, we want to prioritize "found" responses, and then "bad + // search key" responses, which means the argument was invalid. After that + // the other two responses are "retriable" errors that the client will retry + // after a backoff. The "not found" response is the default response and + // gets overwritten by any other response. + do_search_keys_match + // Always write a Found code + & (shard_code_is_found + // Write a BadSearchKey code IFF the client code is + // -InternalError, + // -RateLimitedError + // -NotFound + // -BadSearchKey + | (shard_code_is_bad_search_key & !client_code_is_found)) + // Write an InternalError OR RateLimited code IFF the code is NotFound. + | (shard_code_is_retryable_error & client_code_is_not_found) +} + +fn is_code_retryable_error(result_code: u32) -> Choice { + let is_internal_error = result_code.ct_eq(&(TxOutSearchResultCode::InternalError as u32)); + let is_rate_limited = result_code.ct_eq(&(TxOutSearchResultCode::RateLimited as u32)); + + is_internal_error | is_rate_limited +} + +#[cfg(test)] +mod tests { + extern crate std; + + use super::*; + use itertools::Itertools; + use mc_fog_types::view::FIXED_CIPHERTEXT_LENGTH; + use std::{collections::HashSet, vec}; + + fn create_test_tx_out_search_result( + search_key: Vec, + ciphertext_number: u8, + ciphertext_length: usize, + result_code: TxOutSearchResultCode, + ) -> FixedTxOutSearchResult { + FixedTxOutSearchResult { + search_key, + result_code: result_code as u32, + ciphertext: vec![ciphertext_number; ciphertext_length], + payload_length: ciphertext_length as u32, + } + } + + #[test] + fn should_overwrite_tx_out_search_result_client_not_found_shard_has_tx_out_returns_true() { + let search_key = vec![0u8; 10]; + let client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::NotFound, + ); + let shard_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::Found, + ); + + let result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + + assert!(result); + } + + #[test] + fn should_overwrite_tx_out_search_result_client_bad_search_key_shard_has_tx_out_returns_true() { + let search_key = vec![0u8; 10]; + let client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::BadSearchKey, + ); + let shard_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::Found, + ); + + let result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + + assert!(result); + } + + #[test] + fn should_overwrite_tx_out_search_result_client_has_internal_error_shard_has_tx_out_returns_true( + ) { + let search_key = vec![0u8; 10]; + let client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::InternalError, + ); + let shard_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::Found, + ); + + let result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + + assert!(result); + } + + #[test] + fn should_overwrite_tx_out_search_result_client_has_rate_limited_error_shard_has_tx_out_returns_true( + ) { + let search_key = vec![0u8; 10]; + let client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::RateLimited, + ); + let shard_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::Found, + ); + + let result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + + assert!(result); + } + + #[test] + fn should_overwrite_tx_out_client_has_found_never_overwritten_returns_false_unless_shard_finds() + { + let search_key = vec![0u8; 10]; + let client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::Found, + ); + + let mut shard_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::NotFound, + ); + let mut result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(!result); + + shard_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::BadSearchKey, + ); + result = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(!result); + + shard_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::InternalError, + ); + result = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(!result); + + shard_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::RateLimited, + ); + result = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(!result); + + shard_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::Found, + ); + result = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(result); + } + + #[test] + fn should_overwrite_tx_out_search_result_client_has_not_found_shard_has_retryable_error_returns_true( + ) { + let search_key = vec![0u8; 10]; + let client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::NotFound, + ); + + let mut shard_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::InternalError, + ); + let mut result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(result); + + shard_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::RateLimited, + ); + result = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(result); + } + + #[test] + fn should_overwrite_tx_out_search_result_client_has_bad_search_key_shard_has_retryable_error_returns_false( + ) { + let search_key = vec![0u8; 10]; + let client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::BadSearchKey, + ); + + let mut shard_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::InternalError, + ); + let mut result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(!result); + + shard_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::RateLimited, + ); + result = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(!result); + } + + #[test] + fn should_overwrite_tx_out_search_result_client_has_bad_search_key() { + let search_key = vec![0u8; 10]; + let shard_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::BadSearchKey, + ); + + let mut client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::RateLimited, + ); + let mut result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(result); + + client_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::RateLimited, + ); + result = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(result); + } + + #[test] + fn should_overwrite_tx_out_search_result_client_has_retryable_error_shard_has_not_found_returns_true( + ) { + let search_key = vec![0u8; 10]; + let client_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::BadSearchKey, + ); + + let mut shard_tx_out_search_result = create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::InternalError, + ); + let mut result: bool = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(!result); + + shard_tx_out_search_result = create_test_tx_out_search_result( + search_key, + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::RateLimited, + ); + result = should_overwrite_tx_out_search_result( + &client_tx_out_search_result, + &shard_tx_out_search_result, + ) + .into(); + assert!(!result); + } + + #[test] + fn collate_shard_query_responses_shards_find_all_tx_outs() { + let client_search_keys: Vec> = (0..10).map(|num| vec![num; 10]).collect(); + let shard_tx_out_search_results: Vec = client_search_keys + .iter() + .map(|search_key| { + create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + TxOutSearchResultCode::Found, + ) + }) + .collect(); + + let result = collate_shard_tx_out_search_results( + client_search_keys.clone(), + shard_tx_out_search_results, + ) + .unwrap(); + + let all_tx_out_found = result.iter().all(|tx_out_search_result| { + tx_out_search_result.result_code == TxOutSearchResultCode::Found as u32 + }); + assert!(all_tx_out_found); + + let result_client_search_keys: HashSet> = HashSet::from_iter( + result + .iter() + .map(|tx_out_search_result| tx_out_search_result.search_key.clone()), + ); + assert_eq!( + result_client_search_keys, + HashSet::from_iter(client_search_keys) + ); + } + + #[test] + fn collate_shard_query_responses_shards_one_not_found() { + let client_search_keys: Vec> = (0..10).map(|num| vec![num; 10]).collect(); + let shard_tx_out_search_results: Vec = client_search_keys + .iter() + .enumerate() + .map(|(i, search_key)| { + let result_code = match i { + 0 => TxOutSearchResultCode::NotFound, + _ => TxOutSearchResultCode::Found, + }; + create_test_tx_out_search_result( + search_key.clone(), + 0, + FIXED_CIPHERTEXT_LENGTH - 1, + result_code, + ) + }) + .collect(); + + let result = collate_shard_tx_out_search_results( + client_search_keys.clone(), + shard_tx_out_search_results, + ) + .unwrap(); + + let result_client_search_keys: HashSet> = HashSet::from_iter( + result + .iter() + .map(|tx_out_search_result| tx_out_search_result.search_key.clone()), + ); + assert_eq!( + result_client_search_keys, + HashSet::from_iter(client_search_keys) + ); + + let not_found_count = result + .iter() + .filter(|tx_out_search_result| { + tx_out_search_result.result_code == TxOutSearchResultCode::NotFound as u32 + }) + .count(); + assert_eq!(not_found_count, 1); + } + + #[test] + fn collate_shard_query_responses_ciphertext_is_greater_than_client_ciphertext_length_panics() { + let client_search_keys: Vec> = (0..10).map(|num| vec![num; 10]).collect(); + let shard_tx_out_search_results: Vec = client_search_keys + .iter() + .map(|search_key| FixedTxOutSearchResult { + search_key: search_key.clone(), + result_code: TxOutSearchResultCode::NotFound as u32, + ciphertext: vec![0u8; FIXED_CIPHERTEXT_LENGTH + 1], + payload_length: (FIXED_CIPHERTEXT_LENGTH + 1) as u32, + }) + .collect(); + + let result = std::panic::catch_unwind(|| { + collate_shard_tx_out_search_results( + client_search_keys.clone(), + shard_tx_out_search_results, + ) + }); + + assert!(result.is_err()); + } + + #[test] + fn collate_shard_query_responses_different_ciphertext_lengths_returns_correct_client_ciphertexts( + ) { + let client_search_keys: Vec> = (0..3).map(|num| vec![num; 10]).collect(); + let ciphertext_values = [5u8, 28u8, 128u8]; + let ciphertext_lengths: [u32; 3] = [1, 2, 3]; + let shard_tx_out_search_results: Vec = client_search_keys + .iter() + .enumerate() + .map(|(idx, search_key)| { + let ciphertext = vec![ciphertext_values[idx]; ciphertext_lengths[idx] as usize]; + let payload_length = ciphertext.len() as u32; + FixedTxOutSearchResult { + search_key: search_key.clone(), + result_code: TxOutSearchResultCode::Found as u32, + ciphertext, + payload_length, + } + }) + .collect(); + + let results: Vec = + collate_shard_tx_out_search_results(client_search_keys, shard_tx_out_search_results) + .unwrap() + .into_iter() + // Sort by ciphertext length (ascending) in order to know what each expected result + // should be. + .sorted_by(|a, b| Ord::cmp(&a.ciphertext[0], &b.ciphertext[0])) + .collect(); + + let mut expected_first_result = [0u8; FIXED_CIPHERTEXT_LENGTH]; + expected_first_result[0] = ciphertext_values[0]; + assert_eq!(results[0].ciphertext, expected_first_result); + assert_eq!(results[0].payload_length, ciphertext_lengths[0]); + + let mut expected_second_result = [0u8; FIXED_CIPHERTEXT_LENGTH]; + expected_second_result[0] = ciphertext_values[1]; + expected_second_result[1] = ciphertext_values[1]; + assert_eq!(results[1].ciphertext, expected_second_result); + assert_eq!(results[1].payload_length, ciphertext_lengths[1]); + + let mut expected_third_result = [0u8; FIXED_CIPHERTEXT_LENGTH]; + expected_third_result[0] = ciphertext_values[2]; + expected_third_result[1] = ciphertext_values[2]; + expected_third_result[2] = ciphertext_values[2]; + assert_eq!(results[2].ciphertext, expected_third_result); + assert_eq!(results[2].payload_length, ciphertext_lengths[2]); + } +} diff --git a/fog/view/enclave/impl/src/types.rs b/fog/view/enclave/impl/src/types.rs new file mode 100644 index 0000000000..ce572b4e0b --- /dev/null +++ b/fog/view/enclave/impl/src/types.rs @@ -0,0 +1,1510 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +//! Helper structs for client `QueryResponse` collation. + +use alloc::vec::Vec; +use mc_common::HashSet; +use mc_fog_types::{ + common::BlockRange, + view::{DecommissionedIngestInvocation, QueryResponse, RngRecord}, +}; + +/// Helper struct that contains the decrypted `QueryResponse` and the +/// `BlockRange` the shard is responsible for. +#[derive(Clone)] +pub(crate) struct DecryptedMultiViewStoreQueryResponse { + /// Decrypted `QueryResponse` + pub(crate) query_response: QueryResponse, + /// The `BlockRange` that the shard is meant to process. + pub(crate) block_range: BlockRange, +} + +/// Helper struct that contains block data for the client `QueryResponse` +#[derive(Clone)] +pub(crate) struct BlockData { + /// The highest processed block count that will be returned to the client. + pub(crate) highest_processed_block_count: u64, + /// The timestamp for the highest processed block count + pub(crate) highest_processed_block_signature_timestamp: u64, +} + +/// Helper struct that contains data associated with the "last known" fields in +/// the `QueryResponse`. +#[derive(Default)] +pub(crate) struct LastKnownData { + /// The globally maximum block count that any store has seen but not + /// necessarily processed. + pub(crate) last_known_block_count: u64, + /// The cumulative TxOut count associated with the last known block count. + pub(crate) last_known_block_cumulative_txo_count: u64, +} + +/// Helper struct that contains `QueryResponse` fields that should be shared +/// across all shards, but might not be do to distributed system latencies. +pub(crate) struct CommonShardData { + /// Blocks that Fog Ingest was unable to process. + pub(crate) missed_block_ranges: Vec, + /// All RNG records for a given user. + pub(crate) rng_records: Vec, + /// Any records of decommissioned ingest invocations, which implies that an + /// RNG will no longer be used. + pub(crate) decommissioned_ingest_invocations: Vec, + /// The index of the next user id event that the user should query. + pub(crate) next_start_from_user_event_id: i64, +} + +impl BlockData { + pub(crate) fn new( + highest_processed_block_count: u64, + highest_processed_block_signature_timestamp: u64, + ) -> Self { + Self { + highest_processed_block_count, + highest_processed_block_signature_timestamp, + } + } +} +impl Default for BlockData { + fn default() -> Self { + Self { + highest_processed_block_count: u64::MIN, + highest_processed_block_signature_timestamp: u64::MIN, + } + } +} + +impl LastKnownData { + pub(crate) fn new( + last_known_block_count: u64, + last_known_block_cumulative_txo_count: u64, + ) -> Self { + Self { + last_known_block_count, + last_known_block_cumulative_txo_count, + } + } +} + +impl CommonShardData { + pub(crate) fn new( + missed_block_ranges: Vec, + rng_records: Vec, + decommissioned_ingest_invocations: Vec, + next_start_from_user_event_id: i64, + ) -> Self { + Self { + missed_block_ranges, + rng_records, + decommissioned_ingest_invocations, + next_start_from_user_event_id, + } + } +} + +pub(crate) fn get_block_data( + responses: &mut [DecryptedMultiViewStoreQueryResponse], + missed_block_ranges: &[BlockRange], +) -> BlockData { + responses.sort_unstable_by_key(|response| response.block_range.start_block); + + let mut result = BlockData::default(); + for response in responses.iter() { + let shard_highest_processed_block_count = + response.query_response.highest_processed_block_count; + if shard_highest_processed_block_count > result.highest_processed_block_count { + result = BlockData::new( + shard_highest_processed_block_count, + response + .query_response + .highest_processed_block_signature_timestamp, + ); + } + + // In this case, the shard hasn't processed all the blocks it's responsible for, + // and, as such, those blocks might not be processed so we should return this + // number. + // TODO: Consider implementing logic that accounts for overlapping block ranges. + // If ranges overlap, then the next server might have processed those blocks + // that this shard did not process (but is responsible for). + if !is_shard_complete( + shard_highest_processed_block_count, + response.block_range.end_block, + missed_block_ranges, + ) { + return result; + } + } + + result +} + +fn is_shard_complete( + highest_processed_block_count: u64, + shard_end_block_index: u64, + missed_block_ranges: &[BlockRange], +) -> bool { + if highest_processed_block_count == shard_end_block_index { + return true; + } + + let remaining_block_range = + BlockRange::new(highest_processed_block_count, shard_end_block_index); + are_all_remaining_blocks_missing(remaining_block_range, missed_block_ranges) +} + +/// Given a set of remaining blocks, return true if all of them are missing. +fn are_all_remaining_blocks_missing( + remaining_block_range: BlockRange, + missed_block_ranges: &[BlockRange], +) -> bool { + (remaining_block_range.start_block..remaining_block_range.end_block).all(|block_index| { + missed_block_ranges + .iter() + .any(|missed_block_range| missed_block_range.contains(block_index)) + }) +} + +impl From<&[DecryptedMultiViewStoreQueryResponse]> for LastKnownData { + fn from(responses: &[DecryptedMultiViewStoreQueryResponse]) -> Self { + responses + .iter() + .max_by_key(|response| response.query_response.last_known_block_count) + .map_or_else(LastKnownData::default, |response| { + LastKnownData::new( + response.query_response.last_known_block_count, + response + .query_response + .last_known_block_cumulative_txo_count, + ) + }) + } +} + +impl From<&[DecryptedMultiViewStoreQueryResponse]> for CommonShardData { + fn from(responses: &[DecryptedMultiViewStoreQueryResponse]) -> Self { + let mut missed_block_ranges = HashSet::default(); + let mut rng_records = HashSet::default(); + let mut decommissioned_ingest_invocations = HashSet::default(); + let mut next_start_from_user_event_id = i64::MIN; + + for response in responses { + missed_block_ranges.extend(response.query_response.missed_block_ranges.clone()); + rng_records.extend(response.query_response.rng_records.clone()); + decommissioned_ingest_invocations.extend( + response + .query_response + .decommissioned_ingest_invocations + .clone(), + ); + next_start_from_user_event_id = core::cmp::max( + response.query_response.next_start_from_user_event_id, + next_start_from_user_event_id, + ); + } + + let missed_block_ranges = missed_block_ranges.into_iter().collect::>(); + let rng_records = rng_records.into_iter().collect::>(); + let decommissioned_ingest_invocations = decommissioned_ingest_invocations + .into_iter() + .collect::>(); + + CommonShardData::new( + missed_block_ranges, + rng_records, + decommissioned_ingest_invocations, + next_start_from_user_event_id, + ) + } +} + +#[cfg(test)] +mod last_known_data_tests { + use crate::{DecryptedMultiViewStoreQueryResponse, LastKnownData}; + use alloc::{vec, vec::Vec}; + use mc_fog_types::{common::BlockRange, view::QueryResponse}; + + fn create_query_response( + last_known_block_count: u64, + last_known_block_cumulative_txo_count: u64, + ) -> QueryResponse { + QueryResponse { + highest_processed_block_count: 0, + highest_processed_block_signature_timestamp: 0, + next_start_from_user_event_id: 0, + missed_block_ranges: vec![], + rng_records: vec![], + decommissioned_ingest_invocations: vec![], + tx_out_search_results: vec![], + last_known_block_count, + last_known_block_cumulative_txo_count, + fixed_tx_out_search_results: vec![], + } + } + + #[test] + fn different_last_known_block_counts() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + for i in 0..STORE_COUNT { + let last_known_block_count = ((i + 1) * 10) as u64; + let last_known_block_cumulative_txo_count = last_known_block_count * 2; + let query_response = create_query_response( + last_known_block_count, + last_known_block_cumulative_txo_count, + ); + let block_range = BlockRange::new(i as u64, last_known_block_count); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + } + + let last_response = decrypted_query_responses + .last() + .expect("Couldn't get last decrypted query response"); + let expected_last_known_block_count = last_response.query_response.last_known_block_count; + let expected_last_known_block_cumulative_txo_count = last_response + .query_response + .last_known_block_cumulative_txo_count; + + let result = LastKnownData::from(decrypted_query_responses.as_slice()); + + assert_eq!( + result.last_known_block_count, + expected_last_known_block_count + ); + assert_eq!( + result.last_known_block_cumulative_txo_count, + expected_last_known_block_cumulative_txo_count + ); + } + + #[test] + fn same_last_known_block_counts() { + const STORE_COUNT: usize = 4; + const LAST_KNOWN_BLOCK_COUNT: u64 = 100; + const LAST_KNOWN_BLOCK_CUMULATIVE_TXO_COUNT: u64 = 1000; + + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + for i in 0..STORE_COUNT { + let end_block_count = ((i + 1) * 25) as u64; + let query_response = create_query_response( + LAST_KNOWN_BLOCK_COUNT, + LAST_KNOWN_BLOCK_CUMULATIVE_TXO_COUNT, + ); + let block_range = BlockRange::new(i as u64, end_block_count); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + } + + let result = LastKnownData::from(decrypted_query_responses.as_slice()); + + assert_eq!(result.last_known_block_count, LAST_KNOWN_BLOCK_COUNT); + assert_eq!( + result.last_known_block_cumulative_txo_count, + LAST_KNOWN_BLOCK_CUMULATIVE_TXO_COUNT + ); + } +} + +#[cfg(test)] +mod shared_data_tests { + extern crate std; + use crate::{CommonShardData, DecryptedMultiViewStoreQueryResponse}; + use alloc::{vec, vec::Vec}; + use mc_fog_types::{ + common::BlockRange, + view::{DecommissionedIngestInvocation, KexRngPubkey, QueryResponse, RngRecord}, + }; + use std::collections::HashSet; + + fn create_query_response( + missed_block_ranges: Vec, + rng_records: Vec, + decommissioned_ingest_invocations: Vec, + next_start_from_user_event_id: i64, + ) -> QueryResponse { + QueryResponse { + highest_processed_block_count: 0, + highest_processed_block_signature_timestamp: 0, + next_start_from_user_event_id, + missed_block_ranges, + rng_records, + decommissioned_ingest_invocations, + tx_out_search_results: vec![], + last_known_block_count: 0, + last_known_block_cumulative_txo_count: 0, + fixed_tx_out_search_results: vec![], + } + } + + #[test] + fn responses_have_same_values() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + let missed_block_ranges = vec![ + BlockRange::new(0, 1), + BlockRange::new(10, 12), + BlockRange::new(33, 100), + BlockRange::new(100, 200), + ]; + + let mut rng_records = Vec::with_capacity(STORE_COUNT); + for i in 0..STORE_COUNT { + let egress_public_key = KexRngPubkey { + public_key: vec![i as u8; 32], + version: i as u32, + }; + let rng_record = RngRecord { + ingest_invocation_id: i as i64, + pubkey: egress_public_key, + start_block: 0, + }; + rng_records.push(rng_record); + } + + let mut decommissioned_ingest_invocations = Vec::with_capacity(STORE_COUNT); + for i in 0..STORE_COUNT { + let decommissioned_ingest_invocation = DecommissionedIngestInvocation { + ingest_invocation_id: i as i64, + last_ingested_block: 10, + }; + decommissioned_ingest_invocations.push(decommissioned_ingest_invocation); + } + + const NEXT_START_FROM_USER_EVENT_ID: i64 = 100; + + for i in 0..STORE_COUNT { + let query_response = create_query_response( + missed_block_ranges.clone(), + rng_records.clone(), + decommissioned_ingest_invocations.clone(), + NEXT_START_FROM_USER_EVENT_ID, + ); + let block_range = BlockRange::new(i as u64, (i + 1) as u64); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + } + + let shared_data: CommonShardData = decrypted_query_responses.as_slice().into(); + + let actual_missed_block_ranges = + HashSet::<_>::from_iter(shared_data.missed_block_ranges.iter()); + let actual_rng_records = HashSet::<_>::from_iter(shared_data.rng_records.iter()); + let actual_decommissioned_ingest_invocations = + HashSet::<_>::from_iter(shared_data.decommissioned_ingest_invocations.iter()); + + let expected_missed_block_ranges = HashSet::<_>::from_iter(missed_block_ranges.iter()); + let expected_rng_records = HashSet::<_>::from_iter(rng_records.iter()); + let expected_decommissioned_ingest_invocations = + HashSet::<_>::from_iter(decommissioned_ingest_invocations.iter()); + + assert_eq!(actual_missed_block_ranges, expected_missed_block_ranges); + assert_eq!(actual_rng_records, expected_rng_records); + assert_eq!( + actual_decommissioned_ingest_invocations, + expected_decommissioned_ingest_invocations + ); + assert_eq!( + shared_data.next_start_from_user_event_id, + NEXT_START_FROM_USER_EVENT_ID + ); + } + + #[test] + fn responses_have_different_values() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + let missed_block_ranges = vec![ + BlockRange::new(0, 1), + BlockRange::new(10, 12), + BlockRange::new(33, 100), + BlockRange::new(100, 200), + ]; + + let mut rng_records = Vec::with_capacity(STORE_COUNT); + for i in 0..STORE_COUNT { + let egress_public_key = KexRngPubkey { + public_key: vec![i as u8; 32], + version: i as u32, + }; + let rng_record = RngRecord { + ingest_invocation_id: i as i64, + pubkey: egress_public_key, + start_block: 0, + }; + rng_records.push(rng_record); + } + + let mut decommissioned_ingest_invocations = Vec::with_capacity(STORE_COUNT); + for i in 0..STORE_COUNT { + let decommissioned_ingest_invocation = DecommissionedIngestInvocation { + ingest_invocation_id: i as i64, + last_ingested_block: 10, + }; + decommissioned_ingest_invocations.push(decommissioned_ingest_invocation); + } + + for i in 0..STORE_COUNT { + let missed_block_ranges = vec![missed_block_ranges[i].clone()]; + let rng_records = vec![rng_records[i].clone()]; + let decommissioned_ingest_invocations = + vec![decommissioned_ingest_invocations[i].clone()]; + + let query_response = create_query_response( + missed_block_ranges, + rng_records, + decommissioned_ingest_invocations, + (i + 1) as i64, + ); + let block_range = BlockRange::new(i as u64, (i + 1) as u64); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + } + + let shared_data: CommonShardData = decrypted_query_responses.as_slice().into(); + + let actual_missed_block_ranges = + HashSet::<_>::from_iter(shared_data.missed_block_ranges.iter()); + let actual_rng_records = HashSet::<_>::from_iter(shared_data.rng_records.iter()); + let actual_decommissioned_ingest_invocations = + HashSet::<_>::from_iter(shared_data.decommissioned_ingest_invocations.iter()); + + let expected_missed_block_ranges = HashSet::<_>::from_iter(missed_block_ranges.iter()); + let expected_rng_records = HashSet::<_>::from_iter(rng_records.iter()); + let expected_decommissioned_ingest_invocations = + HashSet::<_>::from_iter(decommissioned_ingest_invocations.iter()); + + assert_eq!(actual_missed_block_ranges, expected_missed_block_ranges); + assert_eq!(actual_rng_records, expected_rng_records); + assert_eq!( + actual_decommissioned_ingest_invocations, + expected_decommissioned_ingest_invocations + ); + assert_eq!( + shared_data.next_start_from_user_event_id, + STORE_COUNT as i64 + ); + } +} + +#[cfg(test)] +mod get_block_data_tests { + use crate::{types::get_block_data, DecryptedMultiViewStoreQueryResponse}; + use alloc::{vec, vec::Vec}; + use mc_fog_types::{common::BlockRange, view::QueryResponse}; + + fn create_query_response( + highest_processed_block_count: u64, + highest_processed_block_signature_timestamp: u64, + ) -> QueryResponse { + QueryResponse { + highest_processed_block_count, + highest_processed_block_signature_timestamp, + next_start_from_user_event_id: 0, + missed_block_ranges: vec![], + rng_records: vec![], + decommissioned_ingest_invocations: vec![], + tx_out_search_results: vec![], + last_known_block_count: highest_processed_block_count, + last_known_block_cumulative_txo_count: 0, + fixed_tx_out_search_results: vec![], + } + } + + #[test] + fn all_responses_fully_processed() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + for i in 0..STORE_COUNT { + let query_response = create_query_response((i + 1) as u64, i as u64); + let block_range = BlockRange::new(i as u64, (i + 1) as u64); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + } + + let result = get_block_data(decrypted_query_responses.as_mut(), &[]); + + let last_response = decrypted_query_responses.last().unwrap(); + assert_eq!( + result.highest_processed_block_count, + last_response.query_response.highest_processed_block_count + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + last_response + .query_response + .highest_processed_block_signature_timestamp + ); + } + + #[test] + fn first_response_incomplete() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response "incomplete"- i.e. it hasn't processed all of its + // blocks. + let incomplete_query_response = create_query_response(2, 2); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response.clone(), + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response fully processed. + let query_response = create_query_response(12, 12); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[]); + + assert_eq!( + result.highest_processed_block_count, + incomplete_query_response.highest_processed_block_count + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_query_response.highest_processed_block_signature_timestamp + ); + } + + #[test] + fn second_response_zero_processed_blocks() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let fully_processed_block_count = 3; + let fully_processed_timestamp = 3; + let query_response = + create_query_response(fully_processed_block_count, fully_processed_timestamp); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response process zero blocks. + let query_response = create_query_response(0, 0); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response incomplete. + let query_response = create_query_response(10, 10); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[]); + + assert_eq!( + result.highest_processed_block_count, + fully_processed_block_count + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + fully_processed_timestamp + ); + } + + #[test] + fn second_response_incomplete() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response "incomplete"- i.e. it hasn't processed all of its + // blocks. + let incomplete_block_count = 4; + let incomplete_timestamp = 4; + let incomplete_query_response = + create_query_response(incomplete_block_count, incomplete_block_count); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response incomplete. + let query_response = create_query_response(10, 10); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[]); + + assert_eq!(result.highest_processed_block_count, incomplete_block_count); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp + ); + } + + #[test] + fn penultimate_response_incomplete() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let incomplete_query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the third response incomplete. + let incomplete_block_count = 8; + let incomplete_timestamp = 8; + let query_response = create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response fully processed. + let query_response = create_query_response(12, 12); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[]); + + assert_eq!(result.highest_processed_block_count, incomplete_block_count); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp + ); + } + + #[test] + fn penultimate_response_zero_processed_blocks() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(1, 1); + let block_range = BlockRange::new(0, 1); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let second_response_highest_processed_block_count = 2; + let second_response_timestamp = 2; + let query_response = create_query_response( + second_response_highest_processed_block_count, + second_response_timestamp, + ); + let block_range = BlockRange::new(1, 2); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response process zero blocks. + let incomplete_query_response = create_query_response(0, 0); + let block_range = BlockRange::new(2, 3); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the fourth response fully processed. + let query_response = create_query_response(4, 4); + let block_range = BlockRange::new(3, 4); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[]); + + assert_eq!( + result.highest_processed_block_count, + second_response_highest_processed_block_count, + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + second_response_timestamp + ); + } + + #[test] + fn final_response_incomplete() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let incomplete_query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response incomplete. + let incomplete_block_count = 10; + let incomplete_timestamp = 10; + let query_response = create_query_response(10, 10); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[]); + + assert_eq!(result.highest_processed_block_count, incomplete_block_count); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp + ); + } + + #[test] + fn final_response_zero_processed_blocks() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let incomplete_query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the third response fully processed. + let last_fully_processed_block_count = 9; + let last_fully_processed_timestamp = 9; + let query_response = create_query_response( + last_fully_processed_block_count, + last_fully_processed_timestamp, + ); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response incomplete. + let query_response = create_query_response(0, 0); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[]); + + assert_eq!( + result.highest_processed_block_count, + last_fully_processed_block_count + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + last_fully_processed_timestamp + ); + } + + #[test] + fn first_response_incomplete_missing_blocks_cover_it() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response only process block at index 0 (block count is 1). + // Make the missed_block_range includes blocks at index 1 and 2. Since this + // ingest is not responsible for them, it should not factor into the + // highest processed block count calculation. + let incomplete_block_count = 1; + let incomplete_timestamp = 1; + let missed_block_range = BlockRange::new(1, 3); + let incomplete_query_response = + create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response complete. + let highest_processed_block_count = 12; + let highest_processed_block_signature_timestamp = 12; + let query_response = create_query_response( + highest_processed_block_count, + highest_processed_block_signature_timestamp, + ); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!( + result.highest_processed_block_count, + highest_processed_block_count + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + highest_processed_block_signature_timestamp + ); + } + + #[test] + fn first_response_incomplete_missing_blocks_do_not_cover_it() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response only process block at index 0 (block count is 1). + // Make the missed_block_range includes blocks at index 1 and 2. Since this + // ingest is not responsible for them, it should not factor into the + // highest processed block count calculation. + let incomplete_block_count = 1; + let incomplete_timestamp = 1; + let missed_block_range = BlockRange::new(1, 2); + let incomplete_query_response = + create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response complete. + let highest_processed_block_count = 12; + let highest_processed_block_signature_timestamp = 12; + let query_response = create_query_response( + highest_processed_block_count, + highest_processed_block_signature_timestamp, + ); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!(result.highest_processed_block_count, incomplete_block_count); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp + ); + } + + #[test] + fn second_response_incomplete_missing_blocks_cover_it() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response only process block at index 3 (block count is 4). + // Make the missed_block_range includes blocks at index 4 and 5. Since this + // ingest is not responsible for them, it should not factor into the + // highest processed block count calculation. + let incomplete_block_count = 4; + let incomplete_timestamp = 4; + let missed_block_range = BlockRange::new(4, 6); + let incomplete_query_response = + create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response complete. + let highest_processed_block_count = 12; + let highest_processed_block_signature_timestamp = 12; + let query_response = create_query_response( + highest_processed_block_count, + highest_processed_block_signature_timestamp, + ); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!( + result.highest_processed_block_count, + highest_processed_block_count + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + highest_processed_block_signature_timestamp + ); + } + + #[test] + fn second_response_incomplete_missing_blocks_do_not_cover_it() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response only process block at index 3 (block count is 4). + // Make the missed_block_range includes blocks at index 4 and 5. Since this + // ingest is not responsible for them, it should not factor into the + // highest processed block count calculation. + let incomplete_block_count = 4; + let incomplete_timestamp = 4; + let missed_block_range = BlockRange::new(4, 5); + let incomplete_query_response = + create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the fourth response complete. + let highest_processed_block_count = 12; + let highest_processed_block_signature_timestamp = 12; + let query_response = create_query_response( + highest_processed_block_count, + highest_processed_block_signature_timestamp, + ); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!(result.highest_processed_block_count, incomplete_block_count); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp + ); + } + + #[test] + fn penultimate_response_incomplete_missing_blocks_cover_it() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response only process block at index 6(block count is 7). + // Make the missed_block_range includes blocks at index 7 and 8. Since this + // ingest is not responsible for them, it should not factor into the + // highest processed block count calculation. + let incomplete_block_count = 7; + let incomplete_timestamp = 7; + let missed_block_range = BlockRange::new(7, 9); + let incomplete_query_response = + create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the fourth response complete. + let highest_processed_block_count = 12; + let highest_processed_block_signature_timestamp = 12; + let query_response = create_query_response( + highest_processed_block_count, + highest_processed_block_signature_timestamp, + ); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!( + result.highest_processed_block_count, + highest_processed_block_count + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + highest_processed_block_signature_timestamp + ); + } + + #[test] + fn penultimate_response_incomplete_missing_blocks_do_not_cover_it() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response only process block at index 6(block count is 7). + // Make the missed_block_range includes blocks at index 7 and 8. Since this + // ingest is not responsible for them, it should not factor into the + // highest processed block count calculation. + let incomplete_block_count = 7; + let incomplete_timestamp = 7; + let missed_block_range = BlockRange::new(7, 8); + let incomplete_query_response = + create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the fourth response complete. + let highest_processed_block_count = 12; + let highest_processed_block_signature_timestamp = 12; + let query_response = create_query_response( + highest_processed_block_count, + highest_processed_block_signature_timestamp, + ); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!(result.highest_processed_block_count, incomplete_block_count); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp + ); + } + + #[test] + fn final_response_incomplete_missing_blocks_cover_it() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the final response only process block at index 9 (block count is 10). + // Make the missed_block_range include blocks at index 10 and 11. + let incomplete_block_count = 10; + let incomplete_timestamp = 10; + let missed_block_range = BlockRange::new(10, 12); + let query_response = create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!(result.highest_processed_block_count, incomplete_block_count); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp + ); + } + + #[test] + fn final_response_incomplete_missing_blocks_do_not_cover_it() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the second response fully processed. + let query_response = create_query_response(6, 6); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the third response fully processed. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the final response only process block at index 9 (block count is 10). + // Make the missed_block_range include blocks at index 10 and 11. + let incomplete_block_count = 10; + let incomplete_timestamp = 10; + let missed_block_range = BlockRange::new(10, 11); + let query_response = create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!(result.highest_processed_block_count, incomplete_block_count); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp + ); + } + + #[test] + fn second_response_incomplete_missing_blocks_cover_two_responses() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the missing blocks cover some of the second response and some of the + // third response. + let missed_block_range = BlockRange::new(4, 8); + + // Make the second response incomplete + let incomplete_block_count = 4; + let incomplete_timestamp = 4; + let incomplete_query_response = + create_query_response(incomplete_block_count, incomplete_timestamp); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the third response process the final block its responsible for. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the final response complete. + let highest_processed_block_count = 12; + let highest_processed_timestamp = 12; + let query_response = + create_query_response(highest_processed_block_count, highest_processed_timestamp); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data(decrypted_query_responses.as_mut(), &[missed_block_range]); + + assert_eq!( + result.highest_processed_block_count, + highest_processed_block_count + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + highest_processed_timestamp + ); + } + + #[test] + fn multiple_missing_blocks() { + const STORE_COUNT: usize = 4; + let mut decrypted_query_responses = Vec::with_capacity(STORE_COUNT); + + // Make the first response fully processed. + let query_response = create_query_response(3, 3); + let block_range = BlockRange::new(0, 3); + let decrypted_query_response = DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }; + decrypted_query_responses.push(decrypted_query_response); + + // Make the first missed blocks cover some of the second response and some of + // the third response. + let missed_block_range_1 = BlockRange::new(4, 8); + + // Make the second response incomplete + let incomplete_block_count_1 = 4; + let incomplete_timestamp_1 = 4; + let incomplete_query_response = + create_query_response(incomplete_block_count_1, incomplete_timestamp_1); + let block_range = BlockRange::new(3, 6); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response: incomplete_query_response, + block_range, + }); + + // Make the third response process the final block its responsible for. + let query_response = create_query_response(9, 9); + let block_range = BlockRange::new(6, 9); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + // Make the second missed blocks cover the beginning of the fourth response. + let missed_block_range_2 = BlockRange::new(9, 10); + + // Make the final response incomplete. + let incomplete_block_count_2 = 11; + let incomplete_timestamp_2 = 11; + let query_response = + create_query_response(incomplete_block_count_2, incomplete_timestamp_2); + let block_range = BlockRange::new(9, 12); + decrypted_query_responses.push(DecryptedMultiViewStoreQueryResponse { + query_response, + block_range, + }); + + let result = get_block_data( + decrypted_query_responses.as_mut(), + &[missed_block_range_1, missed_block_range_2], + ); + + assert_eq!( + result.highest_processed_block_count, + incomplete_block_count_2 + ); + assert_eq!( + result.highest_processed_block_signature_timestamp, + incomplete_timestamp_2 + ); + } +} diff --git a/fog/view/enclave/src/lib.rs b/fog/view/enclave/src/lib.rs index 3a28448a63..9c4815cc81 100644 --- a/fog/view/enclave/src/lib.rs +++ b/fog/view/enclave/src/lib.rs @@ -11,12 +11,15 @@ use std::{path, result::Result as StdResult, sync::Arc}; use mc_attest_core::{ IasNonce, Quote, QuoteNonce, Report, SgxError, TargetInfo, VerificationReport, }; -use mc_attest_enclave_api::{ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage}; +use mc_attest_enclave_api::{ + ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, NonceAuthRequest, + NonceAuthResponse, NonceSession, SealedClientMessage, +}; use mc_attest_verifier::DEBUG_ENCLAVE; use mc_common::{logger::Logger, ResponderId}; use mc_crypto_keys::X25519Public; use mc_enclave_boundary::untrusted::make_variable_length_ecall; -use mc_fog_types::ETxOutRecord; +use mc_fog_types::{view::MultiViewStoreQueryResponse, ETxOutRecord}; use mc_fog_view_enclave_api::UntrustedQueryResponse; use mc_sgx_report_cache_api::{ReportableEnclave, Result as ReportableEnclaveResult}; use mc_sgx_types::{sgx_attributes_t, sgx_enclave_id_t, sgx_launch_token_t, sgx_misc_attribute_t}; @@ -161,6 +164,31 @@ impl ViewEnclaveApi for SgxViewEnclave { mc_util_serial::deserialize(&outbuf[..])? } + fn view_store_init(&self, view_store_id: ResponderId) -> Result { + let inbuf = mc_util_serial::serialize(&ViewEnclaveRequest::ViewStoreInit(view_store_id))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn view_store_connect( + &self, + view_store_id: ResponderId, + view_store_auth_response: NonceAuthResponse, + ) -> Result<()> { + let inbuf = mc_util_serial::serialize(&ViewEnclaveRequest::ViewStoreConnect( + view_store_id, + view_store_auth_response, + ))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn frontend_accept(&self, req: NonceAuthRequest) -> Result<(NonceAuthResponse, NonceSession)> { + let inbuf = mc_util_serial::serialize(&ViewEnclaveRequest::FrontendAccept(req))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + fn query( &self, payload: EnclaveMessage, @@ -174,9 +202,56 @@ impl ViewEnclaveApi for SgxViewEnclave { mc_util_serial::deserialize(&outbuf[..])? } + fn query_store( + &self, + payload: EnclaveMessage, + untrusted_query_response: UntrustedQueryResponse, + ) -> Result> { + let inbuf = mc_util_serial::serialize(&ViewEnclaveRequest::QueryStore( + payload, + untrusted_query_response, + ))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + fn add_records(&self, records: Vec) -> Result<()> { let inbuf = mc_util_serial::serialize(&ViewEnclaveRequest::AddRecords(records))?; let outbuf = self.enclave_call(&inbuf)?; mc_util_serial::deserialize(&outbuf[..])? } + + fn decrypt_and_seal_query( + &self, + client_query: EnclaveMessage, + ) -> Result { + let inbuf = + mc_util_serial::serialize(&ViewEnclaveRequest::DecryptAndSealQuery(client_query))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn create_multi_view_store_query_data( + &self, + sealed_query: SealedClientMessage, + ) -> Result>> { + let inbuf = mc_util_serial::serialize(&ViewEnclaveRequest::CreateMultiViewStoreQuery( + sealed_query, + ))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn collate_shard_query_responses( + &self, + sealed_query: SealedClientMessage, + shard_query_responses: Vec, + ) -> Result> { + let inbuf = mc_util_serial::serialize(&ViewEnclaveRequest::CollateQueryResponses( + sealed_query, + shard_query_responses, + ))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } } diff --git a/fog/view/enclave/trusted/Cargo.lock b/fog/view/enclave/trusted/Cargo.lock index 1939efb4af..17cd2b741c 100644 --- a/fog/view/enclave/trusted/Cargo.lock +++ b/fog/view/enclave/trusted/Cargo.lock @@ -739,6 +739,7 @@ dependencies = [ "mc-attest-verifier", "mc-crypto-noise", "mc-sgx-compat", + "mc-util-serial", "serde", ] @@ -892,6 +893,7 @@ dependencies = [ "mc-sgx-build", "mc-sgx-compat", "mc-util-from-random", + "mc-util-serial", "sha2", ] @@ -1167,6 +1169,8 @@ version = "4.1.0-pre0" dependencies = [ "crc", "displaydoc", + "mc-attest-enclave-api", + "mc-common", "mc-crypto-keys", "mc-fog-kex-rng", "mc-transaction-core", @@ -1183,7 +1187,9 @@ dependencies = [ "mc-attest-core", "mc-attest-enclave-api", "mc-common", + "mc-crypto-ake-enclave", "mc-crypto-keys", + "mc-crypto-noise", "mc-fog-recovery-db-iface", "mc-fog-types", "mc-sgx-compat", @@ -1205,7 +1211,9 @@ dependencies = [ name = "mc-fog-view-enclave-impl" version = "4.1.0-pre0" dependencies = [ + "aes-gcm", "aligned-cmov", + "mc-attest-ake", "mc-attest-core", "mc-attest-enclave-api", "mc-common", @@ -1221,6 +1229,7 @@ dependencies = [ "mc-sgx-compat", "mc-sgx-report-cache-api", "mc-util-serial", + "static_assertions", ] [[package]] diff --git a/fog/view/enclave/trusted/Cargo.toml b/fog/view/enclave/trusted/Cargo.toml index e9be9d72eb..72acd0afce 100644 --- a/fog/view/enclave/trusted/Cargo.toml +++ b/fog/view/enclave/trusted/Cargo.toml @@ -22,14 +22,11 @@ ias-dev = [ ] [dependencies] -# fog -mc-fog-ocall-oram-storage-edl = { path = "../../../ocall_oram_storage/edl" } -mc-fog-ocall-oram-storage-trusted = { path = "../../../ocall_oram_storage/trusted" } -mc-fog-recovery-db-iface = { path = "../../../recovery_db_iface" } -mc-fog-types = { path = "../../../types" } -mc-fog-view-enclave-api = { path = "../api" } -mc-fog-view-enclave-edl = { path = "../edl" } -mc-fog-view-enclave-impl = { path = "../impl" } + +# third-party +lazy_static = { version = "1.4", features = ["spin_no_std"] } +mbedtls = { version = "0.8.1", default-features = false, features = ["no_std_deps", "aesni", "force_aesni_support", "rdrand"] } +mbedtls-sys-auto = { version = "2.26.1", default-features = false, features = ["custom_threading"] } # mobilecoin mc-attest-core = { path = "../../../../attest/core", default-features = false } @@ -38,6 +35,14 @@ mc-attest-verifier = { path = "../../../../attest/verifier", default-features = mc-crypto-keys = { path = "../../../../crypto/keys" } mc-crypto-rand = { path = "../../../../crypto/rand" } mc-enclave-boundary = { path = "../../../../enclave-boundary" } +# fog +mc-fog-ocall-oram-storage-edl = { path = "../../../ocall_oram_storage/edl" } +mc-fog-ocall-oram-storage-trusted = { path = "../../../ocall_oram_storage/trusted" } +mc-fog-recovery-db-iface = { path = "../../../recovery_db_iface" } +mc-fog-types = { path = "../../../types" } +mc-fog-view-enclave-api = { path = "../api" } +mc-fog-view-enclave-edl = { path = "../edl" } +mc-fog-view-enclave-impl = { path = "../impl" } mc-sgx-compat = { path = "../../../../sgx/compat", features = ["sgx"] } mc-sgx-compat-edl = { path = "../../../../sgx/compat-edl" } mc-sgx-debug-edl = { path = "../../../../sgx/debug-edl" } @@ -48,18 +53,13 @@ mc-sgx-slog = { path = "../../../../sgx/slog", features = ["sgx"] } mc-sgx-slog-edl = { path = "../../../../sgx/slog-edl" } mc-sgx-types = { path = "../../../../sgx/types" } mc-util-serial = { path = "../../../../util/serial", default-features = false } - -# third-party -lazy_static = { version = "1.4", features = ["spin_no_std"] } -mbedtls = { version = "0.8.1", default-features = false, features = ["no_std_deps", "aesni", "force_aesni_support", "rdrand"] } -mbedtls-sys-auto = { version = "2.26.1", default-features = false, features = ["custom_threading"] } sha2 = { version = "0.10", default-features = false } [build-dependencies] -mc-util-build-script = { path = "../../../../util/build/script" } -mc-util-build-sgx = { path = "../../../../util/build/sgx" } cargo-emit = "0.2" +mc-util-build-script = { path = "../../../../util/build/script" } +mc-util-build-sgx = { path = "../../../../util/build/sgx" } pkg-config = "0.3" [profile.dev] diff --git a/fog/view/enclave/trusted/src/lib.rs b/fog/view/enclave/trusted/src/lib.rs index 3f3cbd7d38..ff047d9b9a 100644 --- a/fog/view/enclave/trusted/src/lib.rs +++ b/fog/view/enclave/trusted/src/lib.rs @@ -117,11 +117,30 @@ pub fn ecall_dispatcher(inbuf: &[u8]) -> Result, sgx_status_t> { } ViewEnclaveRequest::GetIasReport => serialize(&ENCLAVE.get_ias_report()), ViewEnclaveRequest::ClientAccept(msg) => serialize(&ENCLAVE.client_accept(msg)), + ViewEnclaveRequest::ViewStoreInit(view_store_id) => { + serialize(&ENCLAVE.view_store_init(view_store_id)) + } + ViewEnclaveRequest::ViewStoreConnect(view_store_id, msg) => { + serialize(&ENCLAVE.view_store_connect(view_store_id, msg)) + } + ViewEnclaveRequest::FrontendAccept(msg) => serialize(&ENCLAVE.frontend_accept(msg)), ViewEnclaveRequest::ClientClose(session) => serialize(&ENCLAVE.client_close(session)), ViewEnclaveRequest::Query(req, untrusted_query_response) => { serialize(&ENCLAVE.query(req, untrusted_query_response)) } + ViewEnclaveRequest::QueryStore(req, untrusted_query_response) => { + serialize(&ENCLAVE.query_store(req, untrusted_query_response)) + } ViewEnclaveRequest::AddRecords(records) => serialize(&ENCLAVE.add_records(records)), + ViewEnclaveRequest::DecryptAndSealQuery(client_query) => { + serialize(&ENCLAVE.decrypt_and_seal_query(client_query)) + } + ViewEnclaveRequest::CreateMultiViewStoreQuery(sealed_query) => { + serialize(&ENCLAVE.create_multi_view_store_query_data(sealed_query)) + } + ViewEnclaveRequest::CollateQueryResponses(sealed_query, shard_query_responses) => { + serialize(&ENCLAVE.collate_shard_query_responses(sealed_query, shard_query_responses)) + } } .or(Err(sgx_status_t::SGX_ERROR_UNEXPECTED)) } diff --git a/fog/view/server/Cargo.toml b/fog/view/server/Cargo.toml index f3bdb3e6f9..dd75f0a911 100644 --- a/fog/view/server/Cargo.toml +++ b/fog/view/server/Cargo.toml @@ -15,32 +15,29 @@ path = "src/lib.rs" name = "fog_view_server" path = "src/bin/main.rs" +[[bin]] +name = "fog_view_router" +path = "src/bin/router.rs" + [dependencies] # third party clap = { version = "4.1", features = ["derive", "env"] } displaydoc = { version = "0.2", default-features = false } futures = "0.3" grpcio = "0.12.1" +hex = "0.4" +itertools = "0.10" lazy_static = "1.4" -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } -serde_json = "1.0" # mobilecoin +mc-api = { path = "../../../api" } mc-attest-api = { path = "../../../attest/api" } mc-attest-core = { path = "../../../attest/core" } +mc-attest-enclave-api = { path = "../../../attest/enclave-api" } mc-attest-net = { path = "../../../attest/net" } +mc-blockchain-types = { path = "../../../blockchain/types" } mc-common = { path = "../../../common", features = ["log"] } mc-crypto-keys = { path = "../../../crypto/keys" } -mc-sgx-report-cache-untrusted = { path = "../../../sgx/report-cache/untrusted" } -mc-util-cli = { path = "../../../util/cli" } -mc-util-from-random = { path = "../../../util/from-random" } -mc-util-grpc = { path = "../../../util/grpc" } -mc-util-metered-channel = { path = "../../../util/metered-channel" } -mc-util-metrics = { path = "../../../util/metrics" } -mc-util-parse = { path = "../../../util/parse" } -mc-util-serial = { path = "../../../util/serial" } -mc-util-telemetry = { path = "../../../util/telemetry", features = ["jaeger"] } -mc-util-uri = { path = "../../../util/uri" } # fog mc-fog-api = { path = "../../api" } @@ -51,26 +48,41 @@ mc-fog-types = { path = "../../types" } mc-fog-uri = { path = "../../uri" } mc-fog-view-enclave = { path = "../enclave" } mc-fog-view-enclave-api = { path = "../enclave/api" } +mc-sgx-report-cache-untrusted = { path = "../../../sgx/report-cache/untrusted" } +mc-util-cli = { path = "../../../util/cli" } +mc-util-from-random = { path = "../../../util/from-random" } +mc-util-grpc = { path = "../../../util/grpc" } +mc-util-metered-channel = { path = "../../../util/metered-channel" } +mc-util-metrics = { path = "../../../util/metrics" } +mc-util-parse = { path = "../../../util/parse" } +mc-util-serial = { path = "../../../util/serial" } +mc-util-telemetry = { path = "../../../util/telemetry", features = ["jaeger"] } +mc-util-uri = { path = "../../../util/uri" } +serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } +serde_json = "1.0" [dev-dependencies] -pem = "2.0" -portpicker = "0.1.1" -rand = "0.8" -rand_core = "0.6" - mc-attest-verifier = { path = "../../../attest/verifier" } mc-blockchain-types = { path = "../../../blockchain/types" } mc-common = { path = "../../../common", features = ["loggers"] } mc-crypto-keys = { path = "../../../crypto/keys" } mc-crypto-x509-test-vectors = { path = "../../../crypto/x509/test-vectors" } -mc-transaction-core = { path = "../../../transaction/core" } -mc-util-encodings = { path = "../../../util/encodings" } -mc-util-serial = { path = "../../../util/serial" } -mc-util-test-helper = { path = "../../../util/test-helper" } -mc-util-uri = { path = "../../../util/uri" } mc-fog-test-infra = { path = "../../test_infra" } mc-fog-types = { path = "../../types" } mc-fog-view-connection = { path = "../connection" } mc-fog-view-enclave-measurement = { path = "../enclave/measurement" } mc-fog-view-protocol = { path = "../protocol" } +mc-fog-view-server-test-utils = { path = "./test-utils" } +mc-transaction-core = { path = "../../../transaction/core" } +mc-util-encodings = { path = "../../../util/encodings" } +mc-util-serial = { path = "../../../util/serial" } +mc-util-test-helper = { path = "../../../util/test-helper" } +mc-util-uri = { path = "../../../util/uri" } + +pem = "1.0" +portpicker = "0.1.1" +rand = "0.8" +rand_core = "0.6" +tempdir = "0.3" +yare = "1.0.2" diff --git a/fog/view/server/src/bin/main.rs b/fog/view/server/src/bin/main.rs index 2bcb02daad..bb33a6a6b5 100644 --- a/fog/view/server/src/bin/main.rs +++ b/fog/view/server/src/bin/main.rs @@ -6,7 +6,7 @@ use mc_attest_net::{Client, RaClient}; use mc_common::{logger::log, time::SystemTimeProvider}; use mc_fog_sql_recovery_db::SqlRecoveryDb; use mc_fog_view_enclave::{SgxViewEnclave, ENCLAVE_FILE}; -use mc_fog_view_server::{config::MobileAcctViewConfig, server::ViewServer}; +use mc_fog_view_server::{config, config::MobileAcctViewConfig, server::ViewServer}; use mc_util_cli::ParserWithBuildInfo; use mc_util_grpc::AdminServer; use std::{env, sync::Arc}; @@ -53,12 +53,15 @@ fn main() { let ias_client = Client::new(&config.ias_api_key).expect("Could not create IAS client"); + let config::ShardingStrategy::Epoch(sharding_strategy) = config.sharding_strategy.clone(); + let mut server = ViewServer::new( config.clone(), sgx_enclave, recovery_db, ias_client, SystemTimeProvider::default(), + sharding_strategy, logger.clone(), ); server.start(); diff --git a/fog/view/server/src/bin/router.rs b/fog/view/server/src/bin/router.rs new file mode 100644 index 0000000000..f67927b3be --- /dev/null +++ b/fog/view/server/src/bin/router.rs @@ -0,0 +1,79 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation +#![deny(missing_docs)] + +//! MobileCoin Fog View Router target +use grpcio::ChannelBuilder; +use mc_attest_net::{Client, RaClient}; +use mc_common::{logger::log, time::SystemTimeProvider}; +use mc_fog_api::view_grpc::FogViewStoreApiClient; +use mc_fog_view_enclave::{SgxViewEnclave, ENCLAVE_FILE}; +use mc_fog_view_server::{ + config::FogViewRouterConfig, + fog_view_router_server::{FogViewRouterServer, Shard}, + sharding_strategy::{EpochShardingStrategy, ShardingStrategy}, +}; +use mc_util_cli::ParserWithBuildInfo; +use mc_util_grpc::ConnectionUriGrpcioChannel; +use std::{ + env, + sync::{Arc, RwLock}, +}; + +fn main() { + mc_common::setup_panic_handler(); + let (logger, _global_logger_guard) = + mc_common::logger::create_app_logger(mc_common::logger::o!()); + let config = FogViewRouterConfig::parse(); + + let enclave_path = env::current_exe() + .expect("Could not get the path of our executable") + .with_file_name(ENCLAVE_FILE); + log::info!( + logger, + "enclave path {}, responder ID {}", + enclave_path.to_str().unwrap(), + &config.client_responder_id + ); + let sgx_enclave = SgxViewEnclave::new( + enclave_path, + config.client_responder_id.clone(), + config.omap_capacity, + logger.clone(), + ); + + let mut shards = Vec::new(); + let grpc_env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("Main-RPC".to_string()) + .build(), + ); + for shard_uri in config.shard_uris.clone() { + let fog_view_store_grpc_client = FogViewStoreApiClient::new( + ChannelBuilder::default_channel_builder(grpc_env.clone()) + .connect_to_uri(&shard_uri, &logger), + ); + + // TODO: update this logic once we introduce other types of sharding strategies. + let epoch_sharding_strategy = EpochShardingStrategy::try_from(shard_uri.clone()) + .unwrap_or_else(|_| panic!("Could not get sharding strategy for uri: {shard_uri:?}")); + let block_range = epoch_sharding_strategy.get_block_range(); + let shard = Shard::new(shard_uri, Arc::new(fog_view_store_grpc_client), block_range); + shards.push(shard); + } + let shards = Arc::new(RwLock::new(shards)); + + let ias_client = Client::new(&config.ias_api_key).expect("Could not create IAS client"); + let mut router_server = FogViewRouterServer::new( + config, + sgx_enclave, + ias_client, + shards, + SystemTimeProvider::default(), + logger, + ); + router_server.start(); + + loop { + std::thread::sleep(std::time::Duration::from_millis(1000)); + } +} diff --git a/fog/view/server/src/block_tracker.rs b/fog/view/server/src/block_tracker.rs index ca6036211d..ed52126ac5 100644 --- a/fog/view/server/src/block_tracker.rs +++ b/fog/view/server/src/block_tracker.rs @@ -1,5 +1,6 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation +use crate::sharding_strategy::ShardingStrategy; use mc_common::logger::{log, Logger}; use mc_crypto_keys::CompressedRistrettoPublic; use mc_fog_recovery_db_iface::IngressPublicKeyRecord; @@ -12,17 +13,25 @@ use std::collections::HashMap; /// - Finding out what is the highest block index we have encountered so far. /// - Finding out for which block index have we processed data for all ingress /// keys, while taking into account ingress keys reported lost -pub struct BlockTracker { +pub struct BlockTracker +where + SS: ShardingStrategy, +{ processed_block_per_ingress_key: HashMap, last_highest_processed_block_count: u64, + sharding_strategy: SS, logger: Logger, } -impl BlockTracker { - pub fn new(logger: Logger) -> Self { +impl BlockTracker +where + SS: ShardingStrategy, +{ + pub fn new(logger: Logger, sharding_strategy: SS) -> Self { Self { processed_block_per_ingress_key: HashMap::default(), last_highest_processed_block_count: 0, + sharding_strategy, logger, } } @@ -58,9 +67,19 @@ impl BlockTracker { next_blocks } - /// Notify the tracker that a block has been processed (loaded into enclave - /// and is now available) - pub fn block_processed(&mut self, ingress_key: CompressedRistrettoPublic, block_index: u64) { + /// Notify the tracker that a block has been processed. + /// + /// The meaning of the term "processed" is context dependent. In some cases, + /// it might mean that the block has been added to the enclave, in + /// others it might mean that a block was fetched from the DB. + /// + /// returns `true` if this BlockTracker instance is responsible for the + /// block based on the sharding strategy. + pub fn block_processed( + &mut self, + ingress_key: CompressedRistrettoPublic, + block_index: u64, + ) -> bool { if let Some(previous_block_index) = self .processed_block_per_ingress_key .insert(ingress_key, block_index) @@ -68,6 +87,7 @@ impl BlockTracker { // Sanity check that we are only moving forward and not skipping any blocks. assert!(block_index == previous_block_index + 1); } + self.sharding_strategy.should_process_block(block_index) } /// Given a list of ingress keys, missing blocks and current state, @@ -128,20 +148,21 @@ impl BlockTracker { // Go over all known ingress keys and check if // any of them need to provide this block and have not provided it for rec in ingress_keys { - // If this ingress key isn't responsible to provide this block index, we can - // move on - if !rec.covers_block_index(next_block_index) { + let epoch = self.sharding_strategy.get_block_range(); + let is_key_responsible = rec.get_block_range().overlaps(&epoch) + && rec.covers_block_index(next_block_index); + if !is_key_responsible { continue; } // Check if the last block we actually loaded with this key is less than // next_block_index, if so then this is what we are stuck on - if let Some(last_processed_block) = + if let Some(last_processed_block_index) = self.processed_block_per_ingress_key.get(&rec.key) { - if next_block_index > *last_processed_block { + if next_block_index > *last_processed_block_index { // This ingress key needs to provide this block, but we haven't got it yet - log::trace!(self.logger, "cannot advance highest_processed_block_count to {}, because ingress_key {:?} only processed block {}", next_block_count, rec.key, last_processed_block); + log::trace!(self.logger, "cannot advance highest_processed_block_count to {}, because ingress_key {:?} only processed block {}", next_block_count, rec.key, last_processed_block_index); reason_we_stopped = Some(rec.clone()); break 'outer; } @@ -187,6 +208,7 @@ impl BlockTracker { #[cfg(test)] mod tests { use super::*; + use crate::sharding_strategy::EpochShardingStrategy; use mc_common::logger::test_with_logger; use mc_fog_recovery_db_iface::IngressPublicKeyStatus; use mc_util_from_random::FromRandom; @@ -195,7 +217,7 @@ mod tests { #[test_with_logger] fn next_blocks_empty(logger: Logger) { - let block_tracker = BlockTracker::new(logger); + let block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); assert_eq!(block_tracker.next_blocks(&[]).len(), 0); } @@ -203,7 +225,7 @@ mod tests { #[test_with_logger] fn next_blocks_single_key_hasnt_scanned(logger: Logger) { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let rec = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), status: IngressPublicKeyStatus { @@ -244,7 +266,7 @@ mod tests { #[test_with_logger] fn next_blocks_single_range_commissioned_scanned_some(logger: Logger) { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let rec = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), status: IngressPublicKeyStatus { @@ -280,7 +302,7 @@ mod tests { #[test_with_logger] fn next_blocks_single_key_retired_hasnt_scanned(logger: Logger) { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let key = CompressedRistrettoPublic::from_random(&mut rng); let rec = IngressPublicKeyRecord { @@ -331,7 +353,7 @@ mod tests { #[test_with_logger] fn next_blocks_single_range_retired_scanned_some(logger: Logger) { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let last_ingested_block = 126; let rec = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -374,7 +396,7 @@ mod tests { #[test_with_logger] fn next_blocks_single_key_lost_hasnt_scanned(logger: Logger) { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let block_tracker = BlockTracker::new(logger); + let block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let key = CompressedRistrettoPublic::from_random(&mut rng); let rec = IngressPublicKeyRecord { @@ -400,7 +422,7 @@ mod tests { #[test_with_logger] fn next_blocks_single_key_lost_scanned_some(logger: Logger) { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let key = CompressedRistrettoPublic::from_random(&mut rng); let rec = IngressPublicKeyRecord { @@ -448,7 +470,7 @@ mod tests { #[test_with_logger] fn next_blocks_multiple_keys(logger: Logger) { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let rec1 = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), status: IngressPublicKeyStatus { @@ -505,7 +527,7 @@ mod tests { // highest_fully_processed_block_count behaves as expected #[test_with_logger] fn highest_fully_processed_block_count_all_empty(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); assert_eq!( block_tracker.highest_fully_processed_block_count(&[]), @@ -516,7 +538,7 @@ mod tests { // Check with a key that hasn't yet processed anything. #[test_with_logger] fn highest_fully_processed_block_missing_blocks_nothing_processed1(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let rec = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -539,7 +561,7 @@ mod tests { // are processed when the start block is 0. #[test_with_logger] fn highest_fully_processed_block_tracks_block_processed1(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let rec = IngressPublicKeyRecord { @@ -574,8 +596,7 @@ mod tests { // when the start block is greater than zero #[test_with_logger] fn highest_fully_processed_block_tracks_block_processed2(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); - + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let rec = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -610,8 +631,7 @@ mod tests { // then the key is reported lost #[test_with_logger] fn highest_fully_processed_block_tracks_block_processed3(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); - + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let mut rec = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -657,8 +677,7 @@ mod tests { // When the slow one is marked lost, that unblocks progress. #[test_with_logger] fn highest_fully_processed_block_tracks_multiple_recs(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); - + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let mut rec1 = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -754,8 +773,7 @@ mod tests { // key is loaded #[test_with_logger] fn highest_fully_processed_block_tracks_multiple_recs_some_lost2(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); - + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let mut rec1 = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -868,8 +886,7 @@ mod tests { /// key, makes progress #[test_with_logger] fn highest_fully_processed_block_tracks_retired_key_followed_by_gap(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); - + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let mut rec1 = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -963,8 +980,7 @@ mod tests { /// when everything works. #[test_with_logger] fn highest_fully_processed_block_tracks_retired_key_concurrent_with_active(logger: Logger) { - let mut block_tracker = BlockTracker::new(logger); - + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let mut rec1 = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -1103,8 +1119,7 @@ mod tests { fn highest_fully_processed_block_tracks_retired_key_concurrent_with_active_both_lost( logger: Logger, ) { - let mut block_tracker = BlockTracker::new(logger); - + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let mut rec1 = IngressPublicKeyRecord { key: CompressedRistrettoPublic::from_random(&mut rng), @@ -1270,7 +1285,7 @@ mod tests { // Highest known block count is 0 when there are no inputs. #[test_with_logger] fn highest_known_block_count_when_empty(logger: Logger) { - let block_tracker = BlockTracker::new(logger); + let block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); assert_eq!(block_tracker.highest_known_block_count(), 0); } @@ -1280,7 +1295,7 @@ mod tests { #[test_with_logger] fn highest_known_block_count_tracks_processed(logger: Logger) { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let mut block_tracker = BlockTracker::new(logger); + let mut block_tracker = BlockTracker::new(logger, EpochShardingStrategy::default()); block_tracker.block_processed(CompressedRistrettoPublic::from_random(&mut rng), 100); assert_eq!(block_tracker.highest_known_block_count(), 101); diff --git a/fog/view/server/src/config.rs b/fog/view/server/src/config.rs index 5c4e42f86f..c2173cbb6a 100644 --- a/fog/view/server/src/config.rs +++ b/fog/view/server/src/config.rs @@ -2,16 +2,16 @@ //! Configuration parameters for the MobileCoin Fog View Node #![deny(missing_docs)] - +use crate::sharding_strategy::EpochShardingStrategy; use clap::Parser; use mc_attest_core::ProviderId; use mc_common::ResponderId; use mc_fog_sql_recovery_db::SqlRecoveryDbConnectionConfig; -use mc_fog_uri::FogViewUri; +use mc_fog_uri::{FogViewRouterUri, FogViewStoreUri, FogViewUri}; use mc_util_parse::parse_duration_in_seconds; use mc_util_uri::AdminUri; use serde::Serialize; -use std::time::Duration; +use std::{str::FromStr, time::Duration}; /// Configuration parameters for the MobileCoin Fog View Node #[derive(Clone, Parser, Serialize)] @@ -38,7 +38,7 @@ pub struct MobileAcctViewConfig { /// gRPC listening URI for client requests. #[clap(long, env = "MC_CLIENT_LISTEN_URI")] - pub client_listen_uri: FogViewUri, + pub client_listen_uri: FogViewStoreUri, /// Optional admin listening URI. #[clap(long, env = "MC_ADMIN_LISTEN_URI")] @@ -77,4 +77,116 @@ pub struct MobileAcctViewConfig { /// and should not much harm performance otherwise when loading the DB. #[clap(long, default_value = "1000", env = "MC_BLOCK_QUERY_BATCH_SIZE")] pub block_query_batch_size: usize, + + /// Determines which group of TxOuts the Fog View Store instance will + /// process. + #[clap(long, default_value = "default")] + pub sharding_strategy: ShardingStrategy, +} + +/// Determines which group of TxOuts the Fog View Store instance will process. +#[derive(Clone, Serialize)] +pub enum ShardingStrategy { + /// URI used by the FogViewServer when fulfilling direct client requests. + Epoch(EpochShardingStrategy), +} + +impl FromStr for ShardingStrategy { + type Err = String; + + fn from_str(s: &str) -> Result { + if s.eq("default") { + return Ok(ShardingStrategy::Epoch(EpochShardingStrategy::default())); + } + if let Ok(epoch_sharding_strategy) = EpochShardingStrategy::from_str(s) { + return Ok(ShardingStrategy::Epoch(epoch_sharding_strategy)); + } + + Err("Invalid sharding strategy config.".to_string()) + } +} + +/// Configuration parameters for the Fog View Router. +#[derive(Clone, Parser, Serialize)] +#[clap(version)] +pub struct FogViewRouterConfig { + /// The ID with which to respond to client attestation requests. + /// + /// This ID needs to match the host:port clients use in their URI when + /// referencing this node. + #[clap(long, env = "MC_CLIENT_RESPONDER_ID")] + pub client_responder_id: ResponderId, + + /// gRPC listening URI for client requests. + #[clap(long, env = "MC_CLIENT_LISTEN_URI")] + pub client_listen_uri: RouterClientListenUri, + + /// gRPC listening URI for Fog View Stores. Should be indexed the same as + /// the `sharding_strategies` field. + #[clap(long, use_value_delimiter = true, env = "MC_VIEW_SHARD_URIS")] + pub shard_uris: Vec, + + /// PEM-formatted keypair to send with an Attestation Request. + #[clap(long, env = "MC_IAS_API_KEY")] + pub ias_api_key: String, + + /// The IAS SPID to use when getting a quote + #[clap(long, env = "MC_IAS_SPID")] + pub ias_spid: ProviderId, + + /// The capacity to build the OMAP (ORAM hash table) with. + /// About 75% of this capacity can be used. + /// The hash table will overflow when there are more TxOut's than this, + /// and the server will have to be restarted with a larger number. + /// + /// Note: At time of writing, the hash table will be allocated to use all + /// available SGX EPC memory, and then beyond that it will be allocated on + /// the heap in the untrusted side. Once the needed capacity exceeds RAM, + /// you will either get killed by OOM killer, or it will start being swapped + /// to disk by linux kernel. + #[clap(long, default_value = "1048576", env = "MC_OMAP_CAPACITY")] + pub omap_capacity: u64, + + /// Router admin listening URI. + #[clap(long, env = "MC_ADMIN_LISTEN_URI")] + pub admin_listen_uri: AdminUri, + + /// The chain id of the network we are a part of + #[clap(long, env = "MC_CHAIN_ID")] + pub chain_id: String, + + /// Enables authenticating client requests using Authorization tokens using + /// the provided hex-encoded 32 bytes shared secret. + #[clap(long, value_parser = mc_util_parse::parse_hex::<[u8; 32]>, env = "MC_CLIENT_AUTH_TOKEN_SECRET")] + pub client_auth_token_secret: Option<[u8; 32]>, + + /// Maximal client authentication token lifetime, in seconds (only relevant + /// when --client-auth-token-secret is used. Defaults to 86400 - 24 + /// hours). + #[clap(long, default_value = "86400", value_parser = parse_duration_in_seconds, env = "MC_CLIENT_AUTH_TOKEN_MAX_LIFETIME")] + pub client_auth_token_max_lifetime: Duration, +} + +/// A FogViewRouterServer can either fulfill streaming or unary requests, and +/// these different modes require different URIs. +#[derive(Clone, Serialize)] +pub enum RouterClientListenUri { + /// URI used by the FogViewRouterAPI service. + Streaming(FogViewRouterUri), + /// URI used by the FogViewAPI service. + Unary(FogViewUri), +} + +impl FromStr for RouterClientListenUri { + type Err = String; + fn from_str(input: &str) -> Result { + if let Ok(fog_view_uri) = FogViewUri::from_str(input) { + return Ok(RouterClientListenUri::Unary(fog_view_uri)); + } + if let Ok(fog_view_router_uri) = FogViewRouterUri::from_str(input) { + return Ok(RouterClientListenUri::Streaming(fog_view_router_uri)); + } + + Err(format!("Incorrect ClientListenUri string: {input}.")) + } } diff --git a/fog/view/server/src/db_fetcher.rs b/fog/view/server/src/db_fetcher.rs index d258ac32c6..f6b316e077 100644 --- a/fog/view/server/src/db_fetcher.rs +++ b/fog/view/server/src/db_fetcher.rs @@ -2,11 +2,11 @@ //! An object for managing background data fetches from the recovery database. -use crate::{block_tracker::BlockTracker, counters}; +use crate::{block_tracker::BlockTracker, counters, sharding_strategy::ShardingStrategy}; use mc_common::logger::{log, Logger}; use mc_crypto_keys::CompressedRistrettoPublic; use mc_fog_recovery_db_iface::{IngressPublicKeyRecord, IngressPublicKeyRecordFilters, RecoveryDb}; -use mc_fog_types::ETxOutRecord; +use mc_fog_types::{common::BlockRange, ETxOutRecord}; use mc_util_grpc::ReadinessIndicator; use std::{ sync::{ @@ -75,12 +75,17 @@ pub struct DbFetcher { } impl DbFetcher { - pub fn new( + pub fn new( db: DB, readiness_indicator: ReadinessIndicator, + sharding_strategy: SS, block_query_batch_size: usize, logger: Logger, - ) -> Self { + ) -> Self + where + DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, + { let stop_requested = Arc::new(AtomicBool::new(false)); let shared_state = Arc::new(Mutex::new(DbFetcherSharedState::default())); @@ -103,6 +108,7 @@ impl DbFetcher { thread_shared_state, thread_num_queued_records_limiter, readiness_indicator, + sharding_strategy, block_query_batch_size, logger, ) @@ -167,11 +173,15 @@ impl Drop for DbFetcher { } } -struct DbFetcherThread { +struct DbFetcherThread +where + DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, +{ db: DB, stop_requested: Arc, shared_state: Arc>, - block_tracker: BlockTracker, + block_tracker: BlockTracker, num_queued_records_limiter: Arc<(Mutex, Condvar)>, readiness_indicator: ReadinessIndicator, block_query_batch_size: usize, @@ -180,13 +190,18 @@ struct DbFetcherThread { /// Background worker thread implementation that takes care of periodically /// polling data out of the database. -impl DbFetcherThread { +impl DbFetcherThread +where + DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, +{ pub fn start( db: DB, stop_requested: Arc, shared_state: Arc>, num_queued_records_limiter: Arc<(Mutex, Condvar)>, readiness_indicator: ReadinessIndicator, + sharding_strategy: SS, block_query_batch_size: usize, logger: Logger, ) { @@ -198,7 +213,7 @@ impl DbFetcherThread { db, stop_requested, shared_state, - block_tracker: BlockTracker::new(logger.clone()), + block_tracker: BlockTracker::new(logger.clone(), sharding_strategy), num_queued_records_limiter, readiness_indicator, block_query_batch_size, @@ -284,14 +299,13 @@ impl DbFetcherThread { ); for (ingress_key, block_index) in next_block_index_per_ingress_key.into_iter() { - // Attempt to load data for the next block. + let block_range = + BlockRange::new_from_length(block_index, self.block_query_batch_size as u64); + // Attempt to load data for the block range. let get_tx_outs_by_block_result = { let _metrics_timer = counters::GET_TX_OUTS_BY_BLOCK_TIME.start_timer(); - self.db.get_tx_outs_by_block_range_and_key( - ingress_key, - block_index, - self.block_query_batch_size, - ) + self.db + .get_tx_outs_by_block_range_and_key(ingress_key, &block_range) }; match get_tx_outs_by_block_result { @@ -317,11 +331,17 @@ impl DbFetcherThread { for (idx, tx_outs) in block_results.into_iter().enumerate() { // shadow block_index using the offset from enumerate // block_index is now the index of these tx_outs - let block_index = block_index + idx as u64; + let block_index = block_index + (idx as u64); let num_tx_outs = tx_outs.len(); - // Mark that we are done fetching data for this block. - self.block_tracker.block_processed(ingress_key, block_index); + if !self.block_tracker.block_processed(ingress_key, block_index) { + log::trace!( + self.logger, + "Not adding block_index {} TxOuts because this shard is not responsible for it.", + block_index, + ); + continue; + } // Store the fetched records so that they could be consumed by the // enclave when its ready. @@ -378,6 +398,7 @@ impl DbFetcherThread { #[cfg(test)] mod tests { use super::*; + use crate::sharding_strategy::EpochShardingStrategy; use mc_attest_core::VerificationReport; use mc_common::logger::test_with_logger; use mc_fog_recovery_db_iface::{IngressPublicKeyStatus, ReportData, ReportDb}; @@ -393,7 +414,13 @@ mod tests { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let db_test_context = SqlRecoveryDbTestContext::new(logger.clone()); let db = db_test_context.get_db_instance(); - let db_fetcher = DbFetcher::new(db.clone(), Default::default(), 1, logger); + let db_fetcher = DbFetcher::new( + db.clone(), + Default::default(), + EpochShardingStrategy::default(), + 1, + logger, + ); // Initially, our database starts empty. let ingress_keys = db_fetcher.get_highest_processed_block_context(); @@ -623,7 +650,13 @@ mod tests { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let db_test_context = SqlRecoveryDbTestContext::new(logger.clone()); let db = db_test_context.get_db_instance(); - let db_fetcher = DbFetcher::new(db.clone(), Default::default(), 1, logger); + let db_fetcher = DbFetcher::new( + db.clone(), + Default::default(), + EpochShardingStrategy::default(), + 1, + logger, + ); // Register two ingress keys that have some overlap: // key_id1 starts at block 0, key2 starts at block 5. @@ -680,7 +713,13 @@ mod tests { let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let db_test_context = SqlRecoveryDbTestContext::new(logger.clone()); let db = db_test_context.get_db_instance(); - let db_fetcher = DbFetcher::new(db.clone(), Default::default(), 1, logger); + let db_fetcher = DbFetcher::new( + db.clone(), + Default::default(), + EpochShardingStrategy::default(), + 1, + logger, + ); // Register two ingress keys that have some overlap: // invoc_id1 starts at block 0, invoc_id2 starts at block 50. diff --git a/fog/view/server/src/error.rs b/fog/view/server/src/error.rs index 473dcfaec2..cef1687a80 100644 --- a/fog/view/server/src/error.rs +++ b/fog/view/server/src/error.rs @@ -1,8 +1,68 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation use displaydoc::Display; +use grpcio::RpcStatus; +use mc_common::logger::Logger; use mc_fog_view_enclave::Error as ViewEnclaveError; use mc_sgx_report_cache_untrusted::Error as ReportCacheError; +use mc_util_grpc::{rpc_internal_error, rpc_permissions_error}; + +#[derive(Debug, Display)] +pub enum RouterServerError { + /// Error related to contacting Fog View Store: {0} + ViewStoreError(String), + /// View Enclave error: {0} + Enclave(ViewEnclaveError), +} + +impl From for RouterServerError { + fn from(src: grpcio::Error) -> Self { + RouterServerError::ViewStoreError(format!("{src}")) + } +} + +impl From for RouterServerError { + fn from(src: mc_common::ResponderIdParseError) -> Self { + RouterServerError::ViewStoreError(src.to_string()) + } +} + +impl From for RouterServerError { + fn from(src: mc_util_uri::UriParseError) -> Self { + RouterServerError::ViewStoreError(src.to_string()) + } +} + +impl From for RouterServerError { + fn from(src: mc_api::ConversionError) -> Self { + RouterServerError::ViewStoreError(src.to_string()) + } +} + +impl From for RouterServerError { + fn from(src: mc_util_uri::UriConversionError) -> Self { + RouterServerError::ViewStoreError(src.to_string()) + } +} + +pub fn router_server_err_to_rpc_status( + context: &str, + src: RouterServerError, + logger: Logger, +) -> RpcStatus { + match src { + RouterServerError::ViewStoreError(_) => { + rpc_internal_error(context, src.to_string(), &logger) + } + RouterServerError::Enclave(_) => rpc_permissions_error(context, src.to_string(), &logger), + } +} + +impl From for RouterServerError { + fn from(src: ViewEnclaveError) -> Self { + RouterServerError::Enclave(src) + } +} #[derive(Debug, Display)] pub enum ViewServerError { diff --git a/fog/view/server/src/fog_view_router_server.rs b/fog/view/server/src/fog_view_router_server.rs new file mode 100644 index 0000000000..ae688e66b7 --- /dev/null +++ b/fog/view/server/src/fog_view_router_server.rs @@ -0,0 +1,229 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +//! Server object containing a view node +//! Constructible from config (for testability) and with a mechanism for +//! stopping it + +use crate::{ + config::{FogViewRouterConfig, RouterClientListenUri}, + counters, + fog_view_router_service::FogViewRouterService, + router_admin_service::FogViewRouterAdminService, +}; +use futures::executor::block_on; +use mc_attest_net::RaClient; +use mc_common::{ + logger::{log, Logger}, + time::TimeProvider, +}; +use mc_fog_api::view_grpc; +use mc_fog_types::common::BlockRange; +use mc_fog_uri::{ConnectionUri, FogViewStoreUri}; +use mc_fog_view_enclave::ViewEnclaveProxy; +use mc_sgx_report_cache_untrusted::ReportCacheThread; +use mc_util_grpc::{ + AnonymousAuthenticator, Authenticator, ConnectionUriGrpcioServer, TokenAuthenticator, +}; +use std::sync::{Arc, RwLock}; + +pub struct FogViewRouterServer +where + E: ViewEnclaveProxy, + RC: RaClient + Send + Sync + 'static, +{ + router_server: grpcio::Server, + admin_server: grpcio::Server, + enclave: E, + config: FogViewRouterConfig, + logger: Logger, + ra_client: RC, + report_cache_thread: Option, +} + +/// A shard that fulfills a portion of the router's query requests. +#[derive(Clone)] +pub struct Shard { + /// The uri that this shard listens on. + pub uri: FogViewStoreUri, + + /// The gRPC client that is used to communicate with the shard. + pub grpc_client: Arc, + + /// The `BlockRange` that this shard is responsible for providing. + pub block_range: BlockRange, +} + +impl Shard { + pub fn new( + uri: FogViewStoreUri, + grpc_client: Arc, + block_range: BlockRange, + ) -> Self { + Self { + uri, + grpc_client, + block_range, + } + } +} + +impl FogViewRouterServer +where + E: ViewEnclaveProxy, + RC: RaClient + Send + Sync + 'static, +{ + /// Creates a new view router server instance + pub fn new( + config: FogViewRouterConfig, + enclave: E, + ra_client: RC, + shards: Arc>>, + time_provider: impl TimeProvider + 'static, + logger: Logger, + ) -> FogViewRouterServer + where + E: ViewEnclaveProxy, + { + let env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("Fog-view-router-server".to_string()) + .build(), + ); + + let client_authenticator: Arc = + if let Some(shared_secret) = config.client_auth_token_secret.as_ref() { + Arc::new(TokenAuthenticator::new( + *shared_secret, + config.client_auth_token_max_lifetime, + time_provider, + )) + } else { + Arc::new(AnonymousAuthenticator::default()) + }; + + let fog_view_router_admin_service = view_grpc::create_fog_view_router_admin_api( + FogViewRouterAdminService::new(shards.clone(), logger.clone()), + ); + log::debug!(logger, "Constructed Fog View Router Admin GRPC Service"); + + // Health check service + let health_service = mc_util_grpc::HealthService::new(None, logger.clone()).into_service(); + + let router_server = match config.client_listen_uri { + RouterClientListenUri::Streaming(ref streaming_uri) => { + let fog_view_router_service = + view_grpc::create_fog_view_router_api(FogViewRouterService::new( + enclave.clone(), + shards, + config.chain_id.clone(), + client_authenticator, + logger.clone(), + )); + log::debug!(logger, "Constructed Fog View Router streaming GRPC Service"); + log::info!( + logger, + "Starting Fog View Router streaming server on {}", + streaming_uri.addr(), + ); + + grpcio::ServerBuilder::new(env.clone()) + .register_service(fog_view_router_service) + .register_service(health_service) + .build_using_uri(streaming_uri, logger.clone()) + .expect("Unable to build streaming Fog View Router server") + } + RouterClientListenUri::Unary(ref unary_uri) => { + let fog_view_router_service = + view_grpc::create_fog_view_api(FogViewRouterService::new( + enclave.clone(), + shards, + config.chain_id.clone(), + client_authenticator, + logger.clone(), + )); + log::debug!(logger, "Constructed Fog View Router unary GRPC Service"); + log::info!( + logger, + "Starting Fog View Router unary server on {}", + unary_uri.addr(), + ); + grpcio::ServerBuilder::new(env.clone()) + .register_service(fog_view_router_service) + .register_service(health_service) + .build_using_uri(unary_uri, logger.clone()) + .expect("Unable to build unary Fog View Router server") + } + }; + + let admin_server = grpcio::ServerBuilder::new(env) + .register_service(fog_view_router_admin_service) + .build_using_uri(&config.admin_listen_uri, logger.clone()) + .expect("Unable to build Fog View Router admin server"); + + Self { + router_server, + admin_server, + enclave, + config, + logger, + ra_client, + report_cache_thread: None, + } + } + + /// Starts the server + pub fn start(&mut self) { + self.report_cache_thread = Some( + ReportCacheThread::start( + self.enclave.clone(), + self.ra_client.clone(), + self.config.ias_spid, + &counters::ENCLAVE_REPORT_TIMESTAMP, + self.logger.clone(), + ) + .expect("failed starting report cache thread"), + ); + self.router_server.start(); + match &self.config.client_listen_uri { + RouterClientListenUri::Streaming(uri) => { + log::info!( + self.logger, + "Router streaming GRPC API listening on {}", + uri.addr(), + ); + } + RouterClientListenUri::Unary(uri) => { + log::info!( + self.logger, + "Router unary GRPC API listening on {}", + uri.addr(), + ); + } + } + self.admin_server.start(); + log::info!( + self.logger, + "Router Admin API listening on {}", + self.config.admin_listen_uri.addr(), + ); + } + + /// Stops the server + pub fn stop(&mut self) { + if let Some(ref mut thread) = self.report_cache_thread.take() { + thread.stop().expect("Could not stop report cache thread"); + } + block_on(self.router_server.shutdown()).expect("Could not stop router grpc server"); + block_on(self.admin_server.shutdown()).expect("Could not stop admin router server"); + } +} + +impl Drop for FogViewRouterServer +where + E: ViewEnclaveProxy, + RC: RaClient + Send + Sync + 'static, +{ + fn drop(&mut self) { + self.stop(); + } +} diff --git a/fog/view/server/src/fog_view_router_service.rs b/fog/view/server/src/fog_view_router_service.rs new file mode 100644 index 0000000000..c5646594ec --- /dev/null +++ b/fog/view/server/src/fog_view_router_service.rs @@ -0,0 +1,147 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use crate::{fog_view_router_server::Shard, router_request_handler, SVC_COUNTERS}; +use futures::{executor::block_on, FutureExt, TryFutureExt}; +use grpcio::{DuplexSink, RequestStream, RpcContext, UnarySink}; +use mc_attest_api::attest; +use mc_common::logger::{log, Logger}; +use mc_fog_api::{ + view::{FogViewRouterRequest, FogViewRouterResponse}, + view_grpc::{FogViewApi, FogViewRouterApi}, +}; +use mc_fog_view_enclave_api::ViewEnclaveProxy; +use mc_util_grpc::{check_request_chain_id, rpc_logger, send_result, Authenticator}; +use mc_util_metrics::ServiceMetrics; +use mc_util_telemetry::tracer; +use std::sync::{Arc, RwLock}; + +#[derive(Clone)] +pub struct FogViewRouterService +where + E: ViewEnclaveProxy, +{ + enclave: E, + shards: Arc>>, + chain_id: String, + /// GRPC request authenticator. + authenticator: Arc, + logger: Logger, +} + +impl FogViewRouterService { + /// Creates a new FogViewRouterService that can be used by a gRPC server to + /// fulfill gRPC requests. + /// + /// TODO: Add a `view_store_clients` parameter of type FogApiClient, and + /// perform view store authentication on each one. + pub fn new( + enclave: E, + shards: Arc>>, + chain_id: String, + authenticator: Arc, + logger: Logger, + ) -> Self { + Self { + enclave, + shards, + chain_id, + authenticator, + logger, + } + } +} + +impl FogViewRouterApi for FogViewRouterService +where + E: ViewEnclaveProxy, +{ + fn request( + &mut self, + ctx: RpcContext, + requests: RequestStream, + responses: DuplexSink, + ) { + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + let logger = logger.clone(); + // TODO: Confirm that we don't need to perform the authenticator logic. I think + // we don't because of streaming... + let shards = self.shards.read().expect("RwLock poisoned"); + let method_name = ServiceMetrics::get_method_name(&ctx); + let future = router_request_handler::handle_requests( + method_name, + shards.clone(), + self.enclave.clone(), + requests, + responses, + logger.clone(), + ) + .map_err(move |err: grpcio::Error| log::error!(&logger, "failed to reply: {}", err)) + // TODO: Do stuff with the error + .map(|_| ()); + + ctx.spawn(future) + }); + } +} + +impl FogViewApi for FogViewRouterService +where + E: ViewEnclaveProxy, +{ + fn auth( + &mut self, + ctx: RpcContext, + request: attest::AuthMessage, + sink: UnarySink, + ) { + let _timer = SVC_COUNTERS.req(&ctx); + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + if let Err(err) = check_request_chain_id(&self.chain_id, &ctx) { + return send_result(ctx, sink, Err(err), logger); + } + + if let Err(err) = self.authenticator.authenticate_rpc(&ctx) { + return send_result(ctx, sink, err.into(), logger); + } + let result = router_request_handler::handle_auth_request( + self.enclave.clone(), + request, + self.logger.clone(), + ) + .map(|mut response| response.take_auth()); + + send_result(ctx, sink, result, logger); + }) + } + + fn query( + &mut self, + ctx: RpcContext, + request: attest::Message, + sink: UnarySink, + ) { + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + if let Err(err) = check_request_chain_id(&self.chain_id, &ctx) { + return send_result(ctx, sink, Err(err), logger); + } + + if let Err(err) = self.authenticator.authenticate_rpc(&ctx) { + return send_result(ctx, sink, err.into(), logger); + } + + // This will block the async API. We should use some sort of differentiator... + let shards = self.shards.read().expect("RwLock poisoned"); + let tracer = tracer!(); + let result = block_on(router_request_handler::handle_query_request( + request, + self.enclave.clone(), + shards.clone(), + self.logger.clone(), + &tracer, + )) + .map(|mut response| response.take_query()); + + send_result(ctx, sink, result, logger) + }) + } +} diff --git a/fog/view/server/src/fog_view_service.rs b/fog/view/server/src/fog_view_service.rs index e4905c3956..daef519880 100644 --- a/fog/view/server/src/fog_view_service.rs +++ b/fog/view/server/src/fog_view_service.rs @@ -1,26 +1,35 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation -use crate::{config::MobileAcctViewConfig, server::DbPollSharedState, SVC_COUNTERS}; +use crate::{server::DbPollSharedState, sharding_strategy::ShardingStrategy, SVC_COUNTERS}; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use mc_attest_api::attest; use mc_common::logger::{log, Logger}; -use mc_fog_api::view_grpc::FogViewApi; +use mc_fog_api::{ + fog_common::BlockRange, + view::{ + MultiViewStoreQueryRequest, MultiViewStoreQueryResponse, MultiViewStoreQueryResponseStatus, + }, + view_grpc::FogViewStoreApi, +}; use mc_fog_recovery_db_iface::RecoveryDb; use mc_fog_types::view::QueryRequestAAD; +use mc_fog_uri::{ConnectionUri, FogViewStoreUri}; use mc_fog_view_enclave::{Error as ViewEnclaveError, ViewEnclaveProxy}; use mc_fog_view_enclave_api::UntrustedQueryResponse; use mc_util_grpc::{ - check_request_chain_id, rpc_internal_error, rpc_invalid_arg_error, rpc_logger, - rpc_permissions_error, send_result, Authenticator, + rpc_internal_error, rpc_invalid_arg_error, rpc_logger, rpc_permissions_error, send_result, + Authenticator, }; -use mc_util_telemetry::{tracer, Tracer}; +use mc_util_telemetry::{tracer, BoxedTracer, Tracer}; use std::sync::{Arc, Mutex}; #[derive(Clone)] -pub struct FogViewService { - /// Server Config - config: MobileAcctViewConfig, - +pub struct FogViewService +where + E: ViewEnclaveProxy, + DB: RecoveryDb + Send + Sync, + SS: ShardingStrategy, +{ /// Enclave providing access to the Recovery DB enclave: E, @@ -30,92 +39,196 @@ pub struct FogViewService { /// Shared state from db polling thread. db_poll_shared_state: Arc>, + /// The URI that this service listens on. + uri: FogViewStoreUri, + /// GRPC request authenticator. authenticator: Arc, /// Slog logger object logger: Logger, + + /// Dictates what blocks to process. + sharding_strategy: SS, } -impl FogViewService { +impl FogViewService +where + E: ViewEnclaveProxy, + DB: RecoveryDb + Send + Sync, + SS: ShardingStrategy, +{ /// Creates a new fog-view-service node (but does not create sockets and /// start it etc.) pub fn new( - config: MobileAcctViewConfig, enclave: E, db: Arc, db_poll_shared_state: Arc>, + uri: FogViewStoreUri, authenticator: Arc, + sharding_strategy: SS, logger: Logger, ) -> Self { Self { - config, enclave, db, db_poll_shared_state, + uri, authenticator, + sharding_strategy, logger, } } + fn auth_impl( + &mut self, + mut request: attest::AuthMessage, + logger: &Logger, + ) -> Result { + // TODO: Use the prost message directly, once available + match self.enclave.frontend_accept(request.take_data().into()) { + Ok((response, _)) => { + let mut result = attest::AuthMessage::new(); + result.set_data(response.into()); + Ok(result) + } + Err(frontend_error) => { + // This is debug because there's no requirement on the remote party to trigger + // it. + log::debug!( + logger, + "ViewEnclaveApi::frontend_accept failed: {}", + frontend_error + ); + let rpc_permissions_error = rpc_permissions_error( + "fontend_accept", + format!("Permission denied: {frontend_error}"), + logger, + ); + Err(rpc_permissions_error) + } + } + } + + pub fn create_untrusted_query_response( + &mut self, + aad: &[u8], + tracer: &BoxedTracer, + ) -> Result { + // Attempt and deserialize the untrusted portion of this request. + let query_request_aad: QueryRequestAAD = mc_util_serial::decode(aad).map_err(|err| { + RpcStatus::with_message( + RpcStatusCode::INVALID_ARGUMENT, + format!("AAD deserialization error: {err}"), + ) + })?; + + let (user_events, next_start_from_user_event_id) = + tracer.in_span("search_user_events", |_cx| { + self.db + .search_user_events(query_request_aad.start_from_user_event_id) + .map_err(|e| rpc_internal_error("search_user_events", e, &self.logger)) + })?; + + let ( + highest_processed_block_count, + highest_processed_block_signature_timestamp, + last_known_block_count, + last_known_block_cumulative_txo_count, + ) = tracer.in_span("get_shared_state", |_cx_| { + let shared_state = self.db_poll_shared_state.lock().expect("mutex poisoned"); + ( + shared_state.highest_processed_block_count, + shared_state.highest_processed_block_signature_timestamp, + shared_state.last_known_block_count, + shared_state.last_known_block_cumulative_txo_count, + ) + }); + + let untrusted_query_response = UntrustedQueryResponse { + user_events, + next_start_from_user_event_id, + highest_processed_block_count, + highest_processed_block_signature_timestamp, + last_known_block_count, + last_known_block_cumulative_txo_count, + }; + + Ok(untrusted_query_response) + } + /// Unwrap and forward to enclave pub fn query_impl(&mut self, request: attest::Message) -> Result { - log::trace!(self.logger, "Getting encrypted request"); let tracer = tracer!(); tracer.in_span("query_impl", |_cx| { - // Attempt and deserialize the untrusted portion of this request. - let query_request_aad: QueryRequestAAD = mc_util_serial::decode(request.get_aad()) - .map_err(|err| { - RpcStatus::with_message( - RpcStatusCode::INVALID_ARGUMENT, - format!("AAD deserialization error: {err}"), - ) - })?; - - let (user_events, next_start_from_user_event_id) = - tracer.in_span("search_user_events", |_cx| { - self.db - .search_user_events(query_request_aad.start_from_user_event_id) - .map_err(|e| rpc_internal_error("search_user_events", e, &self.logger)) - })?; - - let ( - highest_processed_block_count, - highest_processed_block_signature_timestamp, - last_known_block_count, - last_known_block_cumulative_txo_count, - ) = tracer.in_span("get_shared_state", |_cx_| { - let shared_state = self.db_poll_shared_state.lock().expect("mutex poisoned"); - ( - shared_state.highest_processed_block_count, - shared_state.highest_processed_block_signature_timestamp, - shared_state.last_known_block_count, - shared_state.last_known_block_cumulative_txo_count, - ) - }); - - let untrusted_query_response = UntrustedQueryResponse { - user_events, - next_start_from_user_event_id, - highest_processed_block_count, - highest_processed_block_signature_timestamp, - last_known_block_count, - last_known_block_cumulative_txo_count, - }; - - let result_blob = tracer.in_span("enclave_query", |_cx| { + let untrusted_query_response = + self.create_untrusted_query_response(request.get_aad(), &tracer)?; + let data = tracer.in_span("enclave_query", |_cx| { self.enclave .query(request.into(), untrusted_query_response) .map_err(|e| self.enclave_err_to_rpc_status("enclave request", e)) })?; let mut resp = attest::Message::new(); - resp.set_data(result_blob); + resp.set_data(data); Ok(resp) }) } + /// Unwrap and forward to enclave + pub fn query_nonce_impl( + &mut self, + request: attest::NonceMessage, + ) -> Result { + let tracer = tracer!(); + tracer.in_span("query_nonce_impl", |_cx| { + let untrusted_query_response = + self.create_untrusted_query_response(request.get_aad(), &tracer)?; + let enclave_nonce_message = tracer.in_span("enclave_query_store", |_cx| { + self.enclave + .query_store(request.into(), untrusted_query_response) + .map_err(|e| self.enclave_err_to_rpc_status("enclave request", e)) + })?; + + Ok(enclave_nonce_message.into()) + }) + } + + fn process_queries( + &mut self, + fog_view_store_uri: FogViewStoreUri, + queries: Vec, + ) -> MultiViewStoreQueryResponse { + let mut response = MultiViewStoreQueryResponse::new(); + let fog_view_store_uri_string = fog_view_store_uri.url().to_string(); + response.set_store_uri(fog_view_store_uri_string); + let block_range = BlockRange::from(&self.sharding_strategy.get_block_range()); + response.set_block_range(block_range); + for query in queries.into_iter() { + let result = self.query_nonce_impl(query); + // Only one of the query messages in an MVSQR is intended for this store + if let Ok(attested_message) = result { + { + let shared_state = self.db_poll_shared_state.lock().expect("mutex poisoned"); + if !self + .sharding_strategy + .is_ready_to_serve_tx_outs(shared_state.processed_block_count.into()) + { + response.set_status(MultiViewStoreQueryResponseStatus::NOT_READY); + } else { + response.set_query_response(attested_message); + response.set_status(MultiViewStoreQueryResponseStatus::SUCCESS); + } + } + return response; + } + } + + response.set_status(MultiViewStoreQueryResponseStatus::AUTHENTICATION_ERROR); + response + } + // Helper function that is common fn enclave_err_to_rpc_status(&self, context: &str, src: ViewEnclaveError) -> RpcStatus { // Treat prost-decode error as an invalid arg, @@ -133,71 +246,44 @@ impl FogViewService { } } -// Implement grpc trait -impl FogViewApi for FogViewService { +/// Implement the FogViewStoreService gRPC trait. +impl FogViewStoreApi for FogViewService +where + E: ViewEnclaveProxy, + DB: RecoveryDb + Send + Sync, + SS: ShardingStrategy, +{ fn auth( &mut self, ctx: RpcContext, - mut request: attest::AuthMessage, + request: attest::AuthMessage, sink: UnarySink, ) { let _timer = SVC_COUNTERS.req(&ctx); mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { - if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { - return send_result(ctx, sink, Err(err), logger); - } - if let Err(err) = self.authenticator.authenticate_rpc(&ctx) { return send_result(ctx, sink, err.into(), logger); } - // TODO: Use the prost message directly, once available - match self.enclave.client_accept(request.take_data().into()) { - Ok((response, _)) => { - let mut result = attest::AuthMessage::new(); - result.set_data(response.into()); - send_result(ctx, sink, Ok(result), logger); - } - Err(client_error) => { - // This is debug because there's no requirement on the remote party to trigger - // it. - log::debug!( - logger, - "ViewEnclaveApi::client_accept failed: {}", - client_error - ); - send_result( - ctx, - sink, - Err(rpc_permissions_error( - "client_auth", - format!("Permission denied: {client_error}"), - logger, - )), - logger, - ); - } - } - }); + send_result(ctx, sink, self.auth_impl(request, logger), logger); + }) } - fn query( + /// Fulfills the query if the MultiViewStoreQueryRequest contains an + /// encrypted Query for the store. If it doesn't, then it responds with + /// an grpc error that contains the store's hostname. + fn multi_view_store_query( &mut self, ctx: RpcContext, - request: attest::Message, - sink: UnarySink, + request: MultiViewStoreQueryRequest, + sink: UnarySink, ) { - let _timer = SVC_COUNTERS.req(&ctx); mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { - if let Err(err) = check_request_chain_id(&self.config.chain_id, &ctx) { - return send_result(ctx, sink, Err(err), logger); - } - if let Err(err) = self.authenticator.authenticate_rpc(&ctx) { return send_result(ctx, sink, err.into(), logger); } - - send_result(ctx, sink, self.query_impl(request), logger) - }) + let response = self.process_queries(self.uri.clone(), request.queries.into_vec()); + send_result(ctx, sink, Ok(response), logger) + }); } } diff --git a/fog/view/server/src/lib.rs b/fog/view/server/src/lib.rs index 7e1a61af39..84422f1b20 100644 --- a/fog/view/server/src/lib.rs +++ b/fog/view/server/src/lib.rs @@ -6,12 +6,18 @@ use mc_util_metrics::ServiceMetrics; pub mod config; pub mod error; +pub mod fog_view_router_server; +pub mod fog_view_router_service; pub mod fog_view_service; pub mod server; +pub mod sharding_strategy; mod block_tracker; mod counters; mod db_fetcher; +mod router_admin_service; +mod router_request_handler; +mod shard_responses_processor; lazy_static::lazy_static! { pub static ref SVC_COUNTERS: ServiceMetrics = ServiceMetrics::new_and_registered("fog_view"); diff --git a/fog/view/server/src/router_admin_service.rs b/fog/view/server/src/router_admin_service.rs new file mode 100644 index 0000000000..b94da57f96 --- /dev/null +++ b/fog/view/server/src/router_admin_service.rs @@ -0,0 +1,85 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use crate::{ + fog_view_router_server::Shard, + sharding_strategy::{EpochShardingStrategy, ShardingStrategy}, + SVC_COUNTERS, +}; +use grpcio::{ChannelBuilder, RpcContext, RpcStatus, UnarySink}; +use mc_common::logger::{log, Logger}; +use mc_fog_api::{ + view::AddShardRequest, + view_grpc::{FogViewRouterAdminApi, FogViewStoreApiClient}, +}; +use mc_fog_uri::FogViewStoreUri; +use mc_util_grpc::{ + rpc_invalid_arg_error, rpc_logger, rpc_precondition_error, send_result, + ConnectionUriGrpcioChannel, Empty, +}; +use std::{ + str::FromStr, + sync::{Arc, RwLock}, +}; + +#[derive(Clone)] +pub struct FogViewRouterAdminService { + shards: Arc>>, + logger: Logger, +} + +impl FogViewRouterAdminService { + pub fn new(shards: Arc>>, logger: Logger) -> Self { + Self { shards, logger } + } + + fn add_shard_impl(&mut self, shard_uri: &str, logger: &Logger) -> Result { + let view_store_uri = FogViewStoreUri::from_str(shard_uri).map_err(|_| { + rpc_invalid_arg_error( + "add_shard", + format!("Shard uri string {shard_uri} is invalid"), + logger, + ) + })?; + let mut shards = self.shards.write().expect("RwLock Poisoned"); + if shards + .iter() + .any(|shard| shard.uri.clone() == view_store_uri) + { + let error = rpc_precondition_error( + "add_shard", + format!("Shard uri {shard_uri} already exists in the shard list"), + logger, + ); + return Err(error); + } + let grpc_env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("add-shard".to_string()) + .build(), + ); + let view_store_client = FogViewStoreApiClient::new( + ChannelBuilder::default_channel_builder(grpc_env) + .connect_to_uri(&view_store_uri, logger), + ); + let block_range = EpochShardingStrategy::default().get_block_range(); + let shard = Shard::new(view_store_uri, Arc::new(view_store_client), block_range); + shards.push(shard); + + Ok(Empty::new()) + } +} + +impl FogViewRouterAdminApi for FogViewRouterAdminService { + fn add_shard(&mut self, ctx: RpcContext, request: AddShardRequest, sink: UnarySink) { + log::info!(self.logger, "Request received in add_shard fn"); + let _timer = SVC_COUNTERS.req(&ctx); + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + send_result( + ctx, + sink, + self.add_shard_impl(request.get_shard_uri(), logger), + logger, + ); + }); + } +} diff --git a/fog/view/server/src/router_request_handler.rs b/fog/view/server/src/router_request_handler.rs new file mode 100644 index 0000000000..b54cc81cc0 --- /dev/null +++ b/fog/view/server/src/router_request_handler.rs @@ -0,0 +1,300 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use crate::{ + error::{router_server_err_to_rpc_status, RouterServerError}, + fog_view_router_server::Shard, + shard_responses_processor, SVC_COUNTERS, +}; +use futures::{future::try_join_all, SinkExt, TryStreamExt}; +use grpcio::{ChannelBuilder, DuplexSink, RequestStream, RpcStatus, WriteFlags}; +use mc_attest_api::attest; +use mc_attest_enclave_api::SealedClientMessage; +use mc_common::logger::Logger; +use mc_fog_api::{ + view::{FogViewRouterRequest, FogViewRouterResponse, MultiViewStoreQueryRequest}, + view_grpc::FogViewStoreApiClient, +}; +use mc_fog_types::view::MultiViewStoreQueryResponse; +use mc_fog_uri::FogViewStoreUri; +use mc_fog_view_enclave_api::ViewEnclaveProxy; +use mc_util_grpc::{rpc_invalid_arg_error, ConnectionUriGrpcioChannel, ResponseStatus}; +use mc_util_metrics::GrpcMethodName; +use mc_util_telemetry::{create_context, tracer, BoxedTracer, FutureExt, Tracer}; +use mc_util_uri::ConnectionUri; +use std::sync::Arc; + +const RETRY_COUNT: usize = 3; + +/// Handles a series of requests sent by the Fog Router client. +pub async fn handle_requests( + method_name: GrpcMethodName, + shards: Vec, + enclave: E, + mut requests: RequestStream, + mut responses: DuplexSink, + logger: Logger, +) -> Result<(), grpcio::Error> +where + E: ViewEnclaveProxy, +{ + while let Some(request) = requests.try_next().await? { + let _timer = SVC_COUNTERS.req_impl(&method_name); + let result = handle_request(request, shards.clone(), enclave.clone(), logger.clone()).await; + + // Perform prometheus logic before the match statement to ensure that + // this logic is executed. + let response_status = ResponseStatus::from(&result); + SVC_COUNTERS.resp_impl(&method_name, response_status.is_success); + SVC_COUNTERS.status_code_impl(&method_name, response_status.code); + + match result { + Ok(response) => responses.send((response, WriteFlags::default())).await?, + Err(rpc_status) => return responses.fail(rpc_status).await, + } + } + responses.close().await?; + Ok(()) +} + +/// Handles a client's request by performing either an authentication or a +/// query. +pub async fn handle_request( + mut request: FogViewRouterRequest, + shards: Vec, + enclave: E, + logger: Logger, +) -> Result +where + E: ViewEnclaveProxy, +{ + let tracer = tracer!(); + if request.has_auth() { + tracer.in_span("router_auth", |_cx| { + handle_auth_request(enclave, request.take_auth(), logger) + }) + } else if request.has_query() { + handle_query_request(request.take_query(), enclave, shards, logger, &tracer) + .with_context(create_context(&tracer, "router_query")) + .await + } else { + let rpc_status = rpc_invalid_arg_error( + "Inavlid FogViewRouterRequest request", + "Neither the query nor auth fields were set".to_string(), + &logger, + ); + Err(rpc_status) + } +} + +/// Handles a client's authentication request. +pub fn handle_auth_request( + enclave: E, + auth_message: attest::AuthMessage, + logger: Logger, +) -> Result +where + E: ViewEnclaveProxy, +{ + let (client_auth_response, _) = enclave.client_accept(auth_message.into()).map_err(|err| { + router_server_err_to_rpc_status("Auth: e client accept", err.into(), logger) + })?; + + let mut response = FogViewRouterResponse::new(); + response.mut_auth().set_data(client_auth_response.into()); + Ok(response) +} + +/// Handles a client's query request. +pub async fn handle_query_request( + query: attest::Message, + enclave: E, + shards: Vec, + logger: Logger, + tracer: &BoxedTracer, +) -> Result +where + E: ViewEnclaveProxy, +{ + let sealed_query = enclave + .decrypt_and_seal_query(query.into()) + .map_err(|err| { + router_server_err_to_rpc_status( + "Query: internal decrypt and seal error", + err.into(), + logger.clone(), + ) + })?; + + let query_responses = get_query_responses( + sealed_query.clone(), + enclave.clone(), + shards.clone(), + logger.clone(), + ) + .with_context(create_context(tracer, "router_get_query_responses")) + .await?; + + let query_response = tracer.in_span("router_collate_query_responses", |_cx| { + enclave + .collate_shard_query_responses(sealed_query, query_responses) + .map_err(|err| { + router_server_err_to_rpc_status( + "Query: shard response collation", + RouterServerError::Enclave(err), + logger.clone(), + ) + }) + })?; + + let mut response = FogViewRouterResponse::new(); + response.set_query(query_response.into()); + Ok(response) +} + +async fn get_query_responses( + sealed_query: SealedClientMessage, + enclave: E, + mut shards: Vec, + logger: Logger, +) -> Result, RpcStatus> +where + E: ViewEnclaveProxy, +{ + let mut query_responses: Vec = Vec::with_capacity(shards.len()); + let mut remaining_tries = RETRY_COUNT; + while remaining_tries > 0 { + let multi_view_store_query_request = enclave + .create_multi_view_store_query_data(sealed_query.clone()) + .map_err(|err| { + router_server_err_to_rpc_status( + "Query: internal encryption error for MultiViewStoreQueryData", + err.into(), + logger.clone(), + ) + })? + .into(); + let clients_and_responses = route_query(&multi_view_store_query_request, shards.clone()) + .await + .map_err(|err| { + router_server_err_to_rpc_status( + "Query: internal query routing error", + err, + logger.clone(), + ) + })?; + + let processed_shard_response_data = shard_responses_processor::process_shard_responses( + clients_and_responses, + logger.clone(), + ) + .map_err(|err| { + router_server_err_to_rpc_status( + "Query: internal query response processing", + err, + logger.clone(), + ) + })?; + + for multi_view_store_query_response in processed_shard_response_data + .multi_view_store_query_responses + .into_iter() + { + query_responses.push(multi_view_store_query_response); + } + + shards = processed_shard_response_data.shards_for_retry; + if shards.is_empty() { + break; + } + + let view_store_uris_for_authentication = + processed_shard_response_data.view_store_uris_for_authentication; + if !view_store_uris_for_authentication.is_empty() { + authenticate_view_stores( + enclave.clone(), + view_store_uris_for_authentication, + logger.clone(), + ) + .await?; + } else { + remaining_tries -= 1; + } + } + + if remaining_tries == 0 { + return Err(router_server_err_to_rpc_status( + "Query: timed out connecting to view stores", + RouterServerError::ViewStoreError(format!( + "Received {RETRY_COUNT} responses which failed to advance the MultiViewStoreRequest" + )), + logger.clone(), + )); + } + + Ok(query_responses) +} + +/// Sends a client's query request to all of the Fog View shards. +async fn route_query( + request: &MultiViewStoreQueryRequest, + shards: Vec, +) -> Result, RouterServerError> { + let responses = shards + .into_iter() + .map(|shard_client| query_shard(request, shard_client)); + try_join_all(responses).await +} + +/// Sends a client's query request to one of the Fog View shards. +async fn query_shard( + request: &MultiViewStoreQueryRequest, + shard: Shard, +) -> Result<(Shard, MultiViewStoreQueryResponse), RouterServerError> { + let client_unary_receiver = shard.grpc_client.multi_view_store_query_async(request)?; + let response = client_unary_receiver.await?; + + Ok((shard, response.try_into()?)) +} + +/// Authenticates Fog View Stores that have previously not been authenticated. +async fn authenticate_view_stores( + enclave: E, + view_store_uris: Vec, + logger: Logger, +) -> Result, RpcStatus> { + let pending_auth_requests = view_store_uris + .into_iter() + .map(|store_uri| authenticate_view_store(enclave.clone(), store_uri, logger.clone())); + + try_join_all(pending_auth_requests).await.map_err(|err| { + router_server_err_to_rpc_status( + "Query: cannot authenticate with each Fog View Store:", + err, + logger.clone(), + ) + }) +} + +/// Authenticates a Fog View Store that has previously not been authenticated. +async fn authenticate_view_store( + enclave: E, + view_store_url: FogViewStoreUri, + logger: Logger, +) -> Result<(), RouterServerError> { + let view_store_id = view_store_url.host_and_port_responder_id()?; + let nonce_auth_request = enclave.view_store_init(view_store_id.clone())?; + let grpc_env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("authenticate-view-store".to_string()) + .build(), + ); + let view_store_client = FogViewStoreApiClient::new( + ChannelBuilder::default_channel_builder(grpc_env).connect_to_uri(&view_store_url, &logger), + ); + + let auth_unary_receiver = view_store_client.auth_async(&nonce_auth_request.into())?; + let nonce_auth_response = auth_unary_receiver.await?; + + enclave.view_store_connect(view_store_id, nonce_auth_response.into())?; + Ok(()) +} diff --git a/fog/view/server/src/server.rs b/fog/view/server/src/server.rs index a8cbbe8b05..46062c84a5 100644 --- a/fog/view/server/src/server.rs +++ b/fog/view/server/src/server.rs @@ -6,7 +6,7 @@ use crate::{ block_tracker::BlockTracker, config::MobileAcctViewConfig, counters, db_fetcher::DbFetcher, - fog_view_service::FogViewService, + fog_view_service::FogViewService, sharding_strategy::ShardingStrategy, }; use futures::executor::block_on; use mc_attest_net::RaClient; @@ -19,7 +19,7 @@ use mc_crypto_keys::CompressedRistrettoPublic; use mc_fog_api::view_grpc; use mc_fog_recovery_db_iface::RecoveryDb; use mc_fog_types::ETxOutRecord; -use mc_fog_uri::ConnectionUri; +use mc_fog_uri::{ConnectionUri, FogViewStoreUri}; use mc_fog_view_enclave::ViewEnclaveProxy; use mc_sgx_report_cache_untrusted::ReportCacheThread; use mc_util_grpc::{ @@ -38,26 +38,28 @@ use std::{ time::{Duration, Instant}, }; -pub struct ViewServer +pub struct ViewServer where E: ViewEnclaveProxy, RC: RaClient + Send + Sync + 'static, DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, { config: MobileAcctViewConfig, server: grpcio::Server, enclave: E, ra_client: RC, report_cache_thread: Option, - db_poll_thread: DbPollThread, + db_poll_thread: DbPollThread, logger: Logger, } -impl ViewServer +impl ViewServer where E: ViewEnclaveProxy, RC: RaClient + Send + Sync + 'static, DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, { /// Make a new view server instance pub fn new( @@ -66,8 +68,9 @@ where recovery_db: DB, ra_client: RC, time_provider: impl TimeProvider + 'static, + sharding_strategy: SS, logger: Logger, - ) -> ViewServer { + ) -> ViewServer { let readiness_indicator = ReadinessIndicator::default(); let db_poll_thread = DbPollThread::new( @@ -75,12 +78,13 @@ where enclave.clone(), recovery_db.clone(), readiness_indicator.clone(), + sharding_strategy.clone(), logger.clone(), ); let env = Arc::new( grpcio::EnvBuilder::new() - .name_prefix("Main-RPC".to_string()) + .name_prefix("Fog-view-server".to_string()) .build(), ); @@ -95,26 +99,35 @@ where Arc::new(AnonymousAuthenticator::default()) }; - let fog_view_service = view_grpc::create_fog_view_api(FogViewService::new( - config.clone(), + // Health check service + let health_service = + mc_util_grpc::HealthService::new(Some(readiness_indicator.into()), logger.clone()) + .into_service(); + + log::debug!(logger, "Starting View Store GRPC Service"); + let use_tls = config.client_listen_uri.use_tls(); + let responder_id = config + .client_listen_uri + .responder_id() + .expect("Could not get store responder id"); + let uri = FogViewStoreUri::try_from_responder_id(responder_id, use_tls) + .expect("Could not create uri from responder id"); + + let fog_view_service = view_grpc::create_fog_view_store_api(FogViewService::new( enclave.clone(), Arc::new(recovery_db), db_poll_thread.get_shared_state(), + uri, client_authenticator, + sharding_strategy, logger.clone(), )); - log::debug!(logger, "Constructed View GRPC Service"); - - // Health check service - let health_service = - mc_util_grpc::HealthService::new(Some(readiness_indicator.into()), logger.clone()) - .into_service(); // Package service into grpc server log::info!( logger, - "Starting View server on {}", - config.client_listen_uri.addr(), + "Starting View Store server on {}", + config.client_listen_uri, ); let server_builder = grpcio::ServerBuilder::new(env) .register_service(fog_view_service) @@ -180,11 +193,12 @@ where } } -impl Drop for ViewServer +impl Drop for ViewServer where E: ViewEnclaveProxy, RC: RaClient + Send + Sync + 'static, DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, { fn drop(&mut self) { self.stop(); @@ -206,13 +220,17 @@ pub struct DbPollSharedState { /// The cumulative txo count of the last known block. pub last_known_block_cumulative_txo_count: u64, + + /// The number of blocks that have been processed. + pub processed_block_count: u64, } /// A thread that periodically pushes new tx data from db to enclave -struct DbPollThread +struct DbPollThread where E: ViewEnclaveProxy, DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, { /// Config config: MobileAcctViewConfig, @@ -235,6 +253,9 @@ where /// Readiness indicator. readiness_indicator: ReadinessIndicator, + /// Sharding strategy, + sharding_strategy: SS, + /// Logger. logger: Logger, } @@ -242,10 +263,11 @@ where /// How long to wait between polling db const DB_POLL_INTERNAL: Duration = Duration::from_millis(100); -impl DbPollThread +impl DbPollThread where E: ViewEnclaveProxy, DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, { /// Get the shared state. pub fn get_shared_state(&self) -> Arc> { @@ -258,6 +280,7 @@ where enclave: E, db: DB, readiness_indicator: ReadinessIndicator, + sharding_strategy: SS, logger: Logger, ) -> Self { let stop_requested = Arc::new(AtomicBool::new(false)); @@ -271,6 +294,7 @@ where stop_requested, shared_state, readiness_indicator, + sharding_strategy, logger, } } @@ -290,6 +314,7 @@ where let thread_stop_requested = self.stop_requested.clone(); let thread_shared_state = self.shared_state.clone(); let thread_readiness_indicator = self.readiness_indicator.clone(); + let thread_sharding_strategy = self.sharding_strategy.clone(); let thread_logger = self.logger.clone(); self.join_handle = Some( @@ -303,6 +328,7 @@ where thread_stop_requested, thread_shared_state, thread_readiness_indicator, + thread_sharding_strategy, thread_logger, ) }) @@ -327,6 +353,7 @@ where stop_requested: Arc, shared_state: Arc>, readiness_indicator: ReadinessIndicator, + sharding_strategy: SS, logger: Logger, ) { log::debug!(logger, "Db poll thread started"); @@ -338,6 +365,7 @@ where db, shared_state, readiness_indicator, + sharding_strategy, logger.clone(), ); loop { @@ -357,20 +385,22 @@ where } } -impl Drop for DbPollThread +impl Drop for DbPollThread where E: ViewEnclaveProxy, DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, { fn drop(&mut self) { let _ = self.stop(); } } -struct DbPollThreadWorker +struct DbPollThreadWorker where E: ViewEnclaveProxy, DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy, { /// Stop request trigger, used to signal the thread to stop. stop_requested: Arc, @@ -389,7 +419,7 @@ where db_fetcher: DbFetcher, /// Keeps track of which blocks we have fed into the enclave. - enclave_block_tracker: BlockTracker, + enclave_block_tracker: BlockTracker, /// Flag which the db fetcher sets to indicate when it has exhausted it's /// initial work. @@ -416,10 +446,11 @@ pub enum WorkerTickResult { /// Telemetry: block indes currently being worked on. const TELEMETRY_BLOCK_INDEX_KEY: Key = telemetry_static_key!("block-index"); -impl DbPollThreadWorker +impl DbPollThreadWorker where E: ViewEnclaveProxy, DB: RecoveryDb + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Clone + Send + Sync + 'static, { pub fn new( config: MobileAcctViewConfig, @@ -428,6 +459,7 @@ where db: DB, shared_state: Arc>, server_readiness_indicator: ReadinessIndicator, + sharding_strategy: SS, logger: Logger, ) -> Self { let db_fetcher_readiness_indicator = ReadinessIndicator::default(); @@ -435,6 +467,7 @@ where let db_fetcher = DbFetcher::new( db.clone(), db_fetcher_readiness_indicator.clone(), + sharding_strategy.clone(), config.block_query_batch_size, logger.clone(), ); @@ -445,7 +478,7 @@ where db, shared_state, db_fetcher, - enclave_block_tracker: BlockTracker::new(logger.clone()), + enclave_block_tracker: BlockTracker::new(logger.clone(), sharding_strategy), db_fetcher_readiness_indicator, server_readiness_indicator, last_unblocked_at: Instant::now(), @@ -631,12 +664,17 @@ where ); // Track that this block was processed. - self.enclave_block_tracker - .block_processed(ingress_key, block_index); - - // Update metrics - counters::BLOCKS_ADDED_COUNT.inc(); - counters::TXOS_ADDED_COUNT.inc_by(num_records as u64); + if self + .enclave_block_tracker + .block_processed(ingress_key, block_index) + { + let mut shared_state = self.shared_state.lock().expect("mutex poisoned"); + shared_state.processed_block_count += 1; + + // Update metrics + counters::BLOCKS_ADDED_COUNT.inc(); + counters::TXOS_ADDED_COUNT.inc_by(num_records as u64); + } } } } diff --git a/fog/view/server/src/shard_responses_processor.rs b/fog/view/server/src/shard_responses_processor.rs new file mode 100644 index 0000000000..1c6f5aaf98 --- /dev/null +++ b/fog/view/server/src/shard_responses_processor.rs @@ -0,0 +1,402 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use crate::{error::RouterServerError, fog_view_router_server::Shard}; +use mc_common::logger::{log, Logger}; +use mc_fog_types::view::MultiViewStoreQueryResponse; +use mc_fog_uri::FogViewStoreUri; +use std::str::FromStr; + +/// The result of processing the MultiViewStoreQueryResponse from each Fog View +/// Shard. +pub struct ProcessedShardResponseData { + /// gRPC clients for Shards that need to be retried for a successful + /// response. + pub shards_for_retry: Vec, + + /// Uris for *individual* Fog View Stores that need to be authenticated with + /// by the Fog Router. It should only have entries if + /// `shard_clients_for_retry` has entries. + pub view_store_uris_for_authentication: Vec, + + /// New, successfully processed query responses. + pub multi_view_store_query_responses: Vec, +} + +impl ProcessedShardResponseData { + pub fn new( + shards_for_retry: Vec, + view_store_uris_for_authentication: Vec, + new_query_responses: Vec, + ) -> Self { + ProcessedShardResponseData { + shards_for_retry, + view_store_uris_for_authentication, + multi_view_store_query_responses: new_query_responses, + } + } +} + +/// Processes the MultiViewStoreQueryResponses returned by each Fog View Shard. +pub fn process_shard_responses( + shards_and_responses: Vec<(Shard, MultiViewStoreQueryResponse)>, + logger: Logger, +) -> Result { + let mut shards_for_retry = Vec::new(); + let mut view_store_uris_for_authentication = Vec::new(); + let mut new_query_responses = Vec::new(); + + for (shard, response) in shards_and_responses { + if response.block_range != shard.block_range { + return Err(RouterServerError::ViewStoreError(format!("The shard response's block range {} does not match the shard's configured block range {}.", response.block_range, shard.block_range))); + } + match response.status { + mc_fog_types::view::MultiViewStoreQueryResponseStatus::Unknown => { + log::error!( + logger, + "Received a response with status 'unknown' from store{}", + FogViewStoreUri::from_str(&response.store_uri)? + ); + shards_for_retry.push(shard); + } + mc_fog_types::view::MultiViewStoreQueryResponseStatus::Success => { + new_query_responses.push(response.clone()); + } + // The shard was unable to produce a query response because the Fog View Store + // it contacted isn't authenticated with the Fog View Router. Therefore + // we need to (a) retry the query (b) authenticate with the Fog View + // Store. + mc_fog_types::view::MultiViewStoreQueryResponseStatus::AuthenticationError => { + shards_for_retry.push(shard); + view_store_uris_for_authentication + .push(FogViewStoreUri::from_str(&response.store_uri)?); + } + // Don't do anything if the Fog View Store isn't ready. It's already authenticated, + // hasn't returned a new query response, and shouldn't be retried yet. + mc_fog_types::view::MultiViewStoreQueryResponseStatus::NotReady => (), + } + } + + Ok(ProcessedShardResponseData::new( + shards_for_retry, + view_store_uris_for_authentication, + new_query_responses, + )) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::sharding_strategy::{EpochShardingStrategy, ShardingStrategy}; + use grpcio::ChannelBuilder; + use mc_common::logger::{test_with_logger, Logger}; + use mc_fog_api::view_grpc::FogViewStoreApiClient; + use mc_fog_types::common::BlockRange; + use mc_fog_uri::FogViewStoreScheme; + use mc_util_grpc::ConnectionUriGrpcioChannel; + use mc_util_uri::UriScheme; + use std::sync::Arc; + + fn create_successful_mvq_response( + shard_index: usize, + block_range: BlockRange, + ) -> MultiViewStoreQueryResponse { + let mut successful_response = mc_fog_api::view::MultiViewStoreQueryResponse::new(); + let client_auth_request = Vec::new(); + successful_response + .mut_query_response() + .set_data(client_auth_request); + let view_uri_string = format!( + "{}://node{}.test.mobilecoin.com:{}", + FogViewStoreScheme::SCHEME_INSECURE, + shard_index, + FogViewStoreScheme::DEFAULT_INSECURE_PORT, + ); + successful_response.set_store_uri(view_uri_string); + successful_response.set_block_range(mc_fog_api::fog_common::BlockRange::from(&block_range)); + successful_response + .set_status(mc_fog_api::view::MultiViewStoreQueryResponseStatus::SUCCESS); + + successful_response + .try_into() + .expect("Couldn't convert MultiViewStoreQueryResponse proto to internal struct") + } + + fn create_failed_mvq_response( + shard_index: usize, + block_range: BlockRange, + status: mc_fog_api::view::MultiViewStoreQueryResponseStatus, + ) -> MultiViewStoreQueryResponse { + let mut failed_response = mc_fog_api::view::MultiViewStoreQueryResponse::new(); + let view_uri_string = format!( + "{}://node{}.test.mobilecoin.com:{}", + FogViewStoreScheme::SCHEME_INSECURE, + shard_index, + FogViewStoreScheme::DEFAULT_INSECURE_PORT, + ); + failed_response.set_store_uri(view_uri_string); + failed_response.set_block_range(mc_fog_api::fog_common::BlockRange::from(&block_range)); + failed_response.set_status(status); + + failed_response + .try_into() + .expect("Couldn't convert MultiViewStoreQueryResponse proto to internal struct") + } + + fn create_shard(i: usize, block_range: BlockRange, logger: Logger) -> Shard { + let view_uri_string = format!( + "{}://node{}.test.mobilecoin.com:{}", + FogViewStoreScheme::SCHEME_INSECURE, + i, + FogViewStoreScheme::DEFAULT_INSECURE_PORT, + ); + let uri = FogViewStoreUri::from_str(&view_uri_string).unwrap(); + let grpc_env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("processor-test".to_string()) + .build(), + ); + + let grpc_client = FogViewStoreApiClient::new( + ChannelBuilder::default_channel_builder(grpc_env).connect_to_uri(&uri, &logger), + ); + + Shard::new(uri, Arc::new(grpc_client), block_range) + } + + #[test_with_logger] + fn one_successful_response_no_shards(logger: Logger) { + let shard_index = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let successful_mvq_response = create_successful_mvq_response(shard_index, block_range); + let shards_and_responses = vec![(shard, successful_mvq_response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let shards_for_retry = result.unwrap().shards_for_retry; + assert!(shards_for_retry.is_empty()); + } + + #[test_with_logger] + fn one_successful_response_no_pending_authentications(logger: Logger) { + let shard_index = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let successful_mvq_response = create_successful_mvq_response(shard_index, block_range); + let shards_and_responses = vec![(shard, successful_mvq_response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let view_store_uris_for_authentication = result.unwrap().view_store_uris_for_authentication; + assert!(view_store_uris_for_authentication.is_empty()); + } + + #[test_with_logger] + fn one_successful_response_one_new_query_response(logger: Logger) { + let shard_index = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let successful_mvq_response = create_successful_mvq_response(shard_index, block_range); + let shards_and_responses = vec![(shard, successful_mvq_response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let new_query_response = result.unwrap().multi_view_store_query_responses; + assert_eq!(new_query_response.len(), 1); + } + + #[test_with_logger] + fn one_auth_error_response_one_pending_shard(logger: Logger) { + let shard_index = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let failed_mvq_response = create_failed_mvq_response( + shard_index, + block_range, + mc_fog_api::view::MultiViewStoreQueryResponseStatus::AUTHENTICATION_ERROR, + ); + let shards_and_responses = vec![(shard, failed_mvq_response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let shards_for_retry = result.unwrap().shards_for_retry; + assert_eq!(shards_for_retry.len(), 1); + } + + #[test_with_logger] + fn one_auth_error_response_one_pending_authentications(logger: Logger) { + let shard_index: usize = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let failed_mvq_response = create_failed_mvq_response( + shard_index, + block_range, + mc_fog_api::view::MultiViewStoreQueryResponseStatus::AUTHENTICATION_ERROR, + ); + let shards_and_responses = vec![(shard, failed_mvq_response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let view_store_uris_for_authentication = result.unwrap().view_store_uris_for_authentication; + assert_eq!(view_store_uris_for_authentication.len(), 1); + } + + #[test_with_logger] + fn one_auth_error_response_zero_new_query_responses(logger: Logger) { + let shard_index: usize = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let failed_mvq_response = create_failed_mvq_response( + shard_index, + block_range, + mc_fog_api::view::MultiViewStoreQueryResponseStatus::AUTHENTICATION_ERROR, + ); + let shards_and_responses = vec![(shard, failed_mvq_response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let new_query_responses = result.unwrap().multi_view_store_query_responses; + assert!(new_query_responses.is_empty()); + } + + #[test_with_logger] + fn one_not_ready_response_zero_new_query_responses(logger: Logger) { + let shard_index: usize = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let failed_mvq_response = create_failed_mvq_response( + shard_index, + block_range, + mc_fog_api::view::MultiViewStoreQueryResponseStatus::NOT_READY, + ); + let shards_and_responses = vec![(shard, failed_mvq_response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let new_query_responses = result.unwrap().multi_view_store_query_responses; + assert!(new_query_responses.is_empty()); + } + + #[test_with_logger] + fn one_not_ready_response_zero_pending_authentications(logger: Logger) { + let shard_index: usize = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let failed_mvq_response = create_failed_mvq_response( + shard_index, + block_range, + mc_fog_api::view::MultiViewStoreQueryResponseStatus::NOT_READY, + ); + let shards_and_responses = vec![(shard, failed_mvq_response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let view_store_uris_for_authentication = result.unwrap().view_store_uris_for_authentication; + assert_eq!(view_store_uris_for_authentication.len(), 0); + } + + #[test_with_logger] + fn one_not_ready_response_zero_pending_shard_clients(logger: Logger) { + let shard_index: usize = 0; + let sharding_strategy = EpochShardingStrategy::default(); + let block_range = sharding_strategy.get_block_range(); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let failed_mvq_response = create_failed_mvq_response( + shard_index, + block_range, + mc_fog_api::view::MultiViewStoreQueryResponseStatus::NOT_READY, + ); + let shards_and_responses = vec![(shard, failed_mvq_response)]; + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_ok()); + + let shard_clients_for_retry = result.unwrap().shards_for_retry; + assert_eq!(shard_clients_for_retry.len(), 0); + } + + #[test_with_logger] + fn mixed_auth_error_and_successful_responses_processes_correctly(logger: Logger) { + const NUMBER_OF_FAILURES: usize = 11; + const NUMBER_OF_SUCCESSES: usize = 8; + + let mut shards_and_clients = Vec::new(); + for i in 0..NUMBER_OF_FAILURES { + let block_range = BlockRange::new_from_length(i as u64, 1); + let shard = create_shard(i, block_range.clone(), logger.clone()); + let failed_mvq_response = create_failed_mvq_response( + i, + block_range, + mc_fog_api::view::MultiViewStoreQueryResponseStatus::AUTHENTICATION_ERROR, + ); + shards_and_clients.push((shard, failed_mvq_response)); + } + for i in 0..NUMBER_OF_SUCCESSES { + let shard_index = i + NUMBER_OF_FAILURES; + let block_range = BlockRange::new_from_length(shard_index as u64, 1); + let shard = create_shard(shard_index, block_range.clone(), logger.clone()); + let successful_mvq_response = create_successful_mvq_response(shard_index, block_range); + shards_and_clients.push((shard, successful_mvq_response)); + } + + let result = process_shard_responses(shards_and_clients, logger); + assert!(result.is_ok()); + let processed_shard_response_data = result.unwrap(); + + assert_eq!( + processed_shard_response_data.shards_for_retry.len(), + NUMBER_OF_FAILURES + ); + assert_eq!( + processed_shard_response_data + .view_store_uris_for_authentication + .len(), + NUMBER_OF_FAILURES + ); + assert_eq!( + processed_shard_response_data + .multi_view_store_query_responses + .len(), + NUMBER_OF_SUCCESSES + ); + } + + #[test_with_logger] + fn shard_block_range_does_not_match_configured_block_range(logger: Logger) { + let shard_index: usize = 0; + let configured_block_range = BlockRange::new(0, 10); + let shard = create_shard(shard_index, configured_block_range, logger.clone()); + + let response_block_range = BlockRange::new(100, 110); + let response = create_successful_mvq_response(shard_index, response_block_range); + let shards_and_responses = vec![(shard, response)]; + + let result = process_shard_responses(shards_and_responses, logger); + + assert!(result.is_err()); + } +} diff --git a/fog/view/server/src/sharding_strategy.rs b/fog/view/server/src/sharding_strategy.rs new file mode 100644 index 0000000000..3956062308 --- /dev/null +++ b/fog/view/server/src/sharding_strategy.rs @@ -0,0 +1,274 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +//! Enables a Fog View Store to know for which blocks to process TxOuts. +//! +//! By determining which TxOuts to process, we are able to "shard" the set of +//! TxOuts across Fog View Store instances. + +use mc_blockchain_types::BlockIndex; +use mc_fog_types::{ + common::{BlockRange, BLOCK_RANGE_DELIMITER}, + BlockCount, +}; +use mc_fog_uri::FogViewStoreUri; +use mc_util_uri::ConnectionUri; +use serde::Serialize; +use std::str::FromStr; + +/// Tells a Fog View Store for which blocks it should process TxOuts. +pub trait ShardingStrategy { + /// Returns true if the Fog View Store should process this block. + fn should_process_block(&self, block_index: BlockIndex) -> bool; + + /// Returns true if the Fog View Store is ready to serve TxOuts to the + /// client. + /// + /// Different sharding strategies might be ready to serve TxOuts when + /// different conditions have been met. + fn is_ready_to_serve_tx_outs(&self, processed_block_count: BlockCount) -> bool; + + /// Returns the block range that this sharding strategy is responsible for. + fn get_block_range(&self) -> BlockRange; +} + +/// Determines whether or not to process a block's TxOuts based on the "epoch" +/// sharding strategy, in which a block is processed IFF it falls within the +/// contiguous range of blocks. +/// +/// In practice, the set of Fog View Shards will contain overlapping +/// [epoch_block_ranges] in order to obfuscate which shard processed the TxOuts. +#[derive(Clone, Serialize)] +pub struct EpochShardingStrategy { + /// If a block falls within this range, then the Fog View Store should + /// process its TxOuts. + epoch_block_range: BlockRange, +} + +impl TryFrom for EpochShardingStrategy { + type Error = String; + + fn try_from(src: FogViewStoreUri) -> Result { + let sharding_strategy_string = src + .get_param("sharding_strategy") + .unwrap_or_else(|| "default".to_string()); + EpochShardingStrategy::from_str(&sharding_strategy_string) + } +} + +impl ShardingStrategy for EpochShardingStrategy { + fn should_process_block(&self, block_index: BlockIndex) -> bool { + self.epoch_block_range.contains(block_index) + } + + fn is_ready_to_serve_tx_outs(&self, processed_block_count: BlockCount) -> bool { + self.have_enough_blocks_been_processed(processed_block_count) + } + + fn get_block_range(&self) -> BlockRange { + self.epoch_block_range.clone() + } +} + +impl Default for EpochShardingStrategy { + fn default() -> Self { + Self { + epoch_block_range: BlockRange::new(0, u64::MAX), + } + } +} + +impl ToString for EpochShardingStrategy { + fn to_string(&self) -> String { + let start_block = self.epoch_block_range.start_block; + let end_block = self.epoch_block_range.end_block; + format!("{start_block}{BLOCK_RANGE_DELIMITER}{end_block}") + } +} + +impl EpochShardingStrategy { + #[allow(dead_code)] + pub fn new(epoch_block_range: BlockRange) -> Self { + Self { epoch_block_range } + } + + fn have_enough_blocks_been_processed(&self, processed_block_count: BlockCount) -> bool { + if self.is_first_epoch() { + return true; + } + + let epoch_block_range_length = + self.epoch_block_range.end_block - self.epoch_block_range.start_block; + let minimum_processed_block_count = epoch_block_range_length / 2; + + u64::from(processed_block_count) >= minimum_processed_block_count + } + + fn is_first_epoch(&self) -> bool { + self.epoch_block_range.start_block == 0 + } +} + +impl FromStr for EpochShardingStrategy { + type Err = String; + + fn from_str(s: &str) -> Result { + if s.eq("default") { + return Ok(EpochShardingStrategy::default()); + } + match BlockRange::from_str(s) { + Ok(block_range) => Ok(Self::new(block_range)), + Err(e) => Err(format!("Invalid epoch sharding strategy: {e}")), + } + } +} + +#[cfg(test)] +mod epoch_sharding_strategy_tests { + use super::*; + + #[test] + fn should_process_block_block_index_is_before_epoch_start_returns_false() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = epoch_sharding_strategy.should_process_block(START_BLOCK - 1); + + assert!(!should_process_block) + } + + #[test] + fn should_process_block_block_index_is_epoch_start_returns_true() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = epoch_sharding_strategy.should_process_block(START_BLOCK); + + assert!(should_process_block) + } + + #[test] + fn should_process_block_block_index_is_in_epoch_block_range_returns_true() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let included_block_index = ((END_BLOCK_EXCLUSIVE - START_BLOCK) / 2) + START_BLOCK; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = + epoch_sharding_strategy.should_process_block(included_block_index); + + assert!(should_process_block) + } + + #[test] + fn should_process_block_block_index_is_one_before_epoch_end_block_range_returns_true() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + let should_process_block = + epoch_sharding_strategy.should_process_block(END_BLOCK_EXCLUSIVE - 1); + assert!(should_process_block) + } + + #[test] + fn should_process_block_block_index_is_epoch_end_block_range_returns_false() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = + epoch_sharding_strategy.should_process_block(END_BLOCK_EXCLUSIVE); + + assert!(!should_process_block) + } + + #[test] + fn should_process_block_block_index_is_after_epoch_end_block_range_returns_false() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = + epoch_sharding_strategy.should_process_block(END_BLOCK_EXCLUSIVE + 1); + + assert!(!should_process_block) + } + + #[test] + fn is_ready_to_serve_tx_outs_allows_0_in_0_to_100_shard() { + // The first epoch has a start block == 0. + const START_BLOCK: BlockIndex = 0; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready_to_serve_txouts = epoch_sharding_strategy.is_ready_to_serve_tx_outs(0.into()); + + assert!(is_ready_to_serve_txouts) + } + + #[test] + fn is_ready_to_serve_tx_outs_allows_70_in_0_to_100_shard() { + // The first epoch has a start block == 0. + const START_BLOCK: BlockIndex = 0; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready_to_serve_txouts = epoch_sharding_strategy.is_ready_to_serve_tx_outs(70.into()); + + assert!(is_ready_to_serve_txouts) + } + + #[test] + fn is_ready_to_serve_tx_outs_not_first_shard_prevents_less_than_minimum() { + const START_BLOCK: BlockIndex = 100; + const END_BLOCK_EXCLUSIVE: BlockIndex = 111; + let epoch_block_range_length = END_BLOCK_EXCLUSIVE - START_BLOCK; + let minimum_processed_block_count = epoch_block_range_length / 2; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready_to_serve_txouts = epoch_sharding_strategy + .is_ready_to_serve_tx_outs((minimum_processed_block_count - 1).into()); + + assert!(!is_ready_to_serve_txouts) + } + + #[test] + fn is_ready_to_serve_tx_outs_not_first_shard_allows_minimum() { + const START_BLOCK: BlockIndex = 100; + const END_BLOCK_EXCLUSIVE: BlockIndex = 111; + let epoch_block_range_length = END_BLOCK_EXCLUSIVE - START_BLOCK; + let minimum_processed_block_count = epoch_block_range_length / 2; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready_to_serve_txouts = + epoch_sharding_strategy.is_ready_to_serve_tx_outs(minimum_processed_block_count.into()); + + assert!(is_ready_to_serve_txouts) + } + + #[test] + fn is_ready_to_serve_tx_outs_not_first_shard_allows_over_minimum() { + const START_BLOCK: BlockIndex = 100; + const END_BLOCK_EXCLUSIVE: BlockIndex = 110; + let epoch_block_range_length = END_BLOCK_EXCLUSIVE - START_BLOCK; + let minimum_processed_block_count = epoch_block_range_length / 2; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready_to_serve_txouts = epoch_sharding_strategy + .is_ready_to_serve_tx_outs((minimum_processed_block_count + 1).into()); + + assert!(is_ready_to_serve_txouts) + } +} diff --git a/fog/view/server/test-utils/Cargo.toml b/fog/view/server/test-utils/Cargo.toml new file mode 100644 index 0000000000..03a4ed5722 --- /dev/null +++ b/fog/view/server/test-utils/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "mc-fog-view-server-test-utils" +version = "2.0.0" +authors = ["MobileCoin"] +edition = "2021" +license = "GPL-3.0" + +[dependencies] +# MC +mc-attest-net = { path = "../../../../attest/net" } +mc-attest-verifier = { path = "../../../../attest/verifier" } +mc-blockchain-types = { path = "../../../../blockchain/types" } +mc-common = { path = "../../../../common", features = ["log"] } +mc-transaction-core = { path = "../../../../transaction/core" } +mc-util-from-random = { path = "../../../../util/from-random" } +mc-util-grpc = { path = "../../../../util/grpc" } +mc-util-uri = { path = "../../../../util/uri" } + +# Fog +mc-fog-api = { path = "../../../api" } +mc-fog-recovery-db-iface = { path = "../../../recovery_db_iface" } +mc-fog-sql-recovery-db = { path = "../../../sql_recovery_db" } +mc-fog-test-infra = { path = "../../../test_infra" } +mc-fog-types = { path = "../../../types" } +mc-fog-uri = { path = "../../../uri" } +mc-fog-view-connection = { path = "../../connection" } +mc-fog-view-enclave = { path = "../../enclave" } +mc-fog-view-enclave-measurement = { path = "../../enclave/measurement" } +mc-fog-view-protocol = { path = "../../protocol" } +mc-fog-view-server = { path = "../." } + +# Third-party +grpcio = "0.12.0" +portpicker = "0.1.1" diff --git a/fog/view/server/test-utils/src/lib.rs b/fog/view/server/test-utils/src/lib.rs new file mode 100644 index 0000000000..54c019b2db --- /dev/null +++ b/fog/view/server/test-utils/src/lib.rs @@ -0,0 +1,496 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +//! Contains helper methods and structs used by the router integration test. + +use grpcio::ChannelBuilder; +use mc_attest_net::{Client as AttestClient, RaClient}; +use mc_attest_verifier::{MrSignerVerifier, Verifier, DEBUG_ENCLAVE}; +use mc_blockchain_types::{Block, BlockID, BlockIndex}; +use mc_common::{ + logger::{log, Logger}, + time::SystemTimeProvider, + ResponderId, +}; +use mc_fog_api::view_grpc::FogViewStoreApiClient; +use mc_fog_recovery_db_iface::{AddBlockDataStatus, IngestInvocationId, RecoveryDb}; +use mc_fog_sql_recovery_db::{test_utils::SqlRecoveryDbTestContext, SqlRecoveryDb}; +use mc_fog_test_infra::get_enclave_path; +use mc_fog_types::{ + common::BlockRange, + view::{FixedTxOutSearchResult, TxOutSearchResult, TxOutSearchResultCode}, + ETxOutRecord, +}; +use mc_fog_uri::{FogViewRouterUri, FogViewStoreUri, FogViewUri}; +use mc_fog_view_connection::{fog_view_router_client::FogViewRouterGrpcClient, FogViewGrpcClient}; +use mc_fog_view_enclave::SgxViewEnclave; +use mc_fog_view_protocol::FogViewConnection; +use mc_fog_view_server::{ + config::{ + FogViewRouterConfig, MobileAcctViewConfig as ViewConfig, RouterClientListenUri, + ShardingStrategy::Epoch, + }, + fog_view_router_server::{FogViewRouterServer, Shard}, + server::ViewServer, + sharding_strategy::EpochShardingStrategy, +}; +use mc_transaction_core::BlockVersion; +use mc_util_grpc::{ConnectionUriGrpcioChannel, GrpcRetryConfig}; +use mc_util_uri::{AdminUri, ConnectionUri}; +use std::{ + str::FromStr, + sync::{Arc, RwLock}, + thread::sleep, + time::Duration, +}; + +const GRPC_RETRY_CONFIG: GrpcRetryConfig = GrpcRetryConfig { + grpc_retry_count: 3, + grpc_retry_millis: 20, +}; + +/// Contains the core structs used by router integration tests and manages their +/// drop order. +/// +/// Note: We need to define a precise field drop order in order for this test to +/// not hang indefinitely upon completion, and wrapping each field in an +/// `Option` allows us to define drop order. If we don't do this, then the drop +/// order is defined by the field definition order, which is prone to error. I.e +/// simply reordering the fields would cause the test to fail without a clear +/// explanation as to why. + +type TestViewServer = + ViewServer; + +pub struct RouterTestEnvironment { + pub router_server: Option>, + pub router_streaming_client: Option, + pub router_unary_client: Option, + pub store_servers: Option>, + pub db_test_context: Option, +} + +impl RouterTestEnvironment { + /// Creates a `RouterTestEnvironment` for the router integration tests. + pub fn new(omap_capacity: u64, store_block_ranges: Vec, logger: Logger) -> Self { + let (db_test_context, store_servers, store_clients, shard_uris) = + Self::create_view_stores(omap_capacity, store_block_ranges, logger.clone()); + let port = portpicker::pick_unused_port().expect("pick_unused_port"); + let router_uri = + FogViewRouterUri::from_str(&format!("insecure-fog-view-router://127.0.0.1:{port}")) + .unwrap(); + let port = portpicker::pick_unused_port().expect("pick_unused_port"); + let admin_listen_uri = + AdminUri::from_str(&format!("insecure-mca://127.0.0.1:{port}")).unwrap(); + let config = FogViewRouterConfig { + chain_id: "local".to_string(), + client_responder_id: router_uri + .responder_id() + .expect("Could not get responder id for Fog View Router."), + ias_api_key: Default::default(), + shard_uris, + ias_spid: Default::default(), + client_listen_uri: RouterClientListenUri::Streaming(router_uri.clone()), + client_auth_token_max_lifetime: Default::default(), + client_auth_token_secret: None, + omap_capacity, + admin_listen_uri, + }; + let router_server = Self::create_router_server(config, store_clients, &logger); + let router_client = Self::create_router_streaming_client(router_uri, logger); + Self { + db_test_context: Some(db_test_context), + router_server: Some(router_server), + router_streaming_client: Some(router_client), + router_unary_client: None, + store_servers: Some(store_servers), + } + } + + /// Creates a `RouterTestEnvironment` for the router integration tests. + pub fn new_unary( + omap_capacity: u64, + store_block_ranges: Vec, + logger: Logger, + ) -> Self { + let (db_test_context, store_servers, store_clients, shard_uris) = + Self::create_view_stores(omap_capacity, store_block_ranges, logger.clone()); + let port = portpicker::pick_unused_port().expect("pick_unused_port"); + let router_uri = + FogViewUri::from_str(&format!("insecure-fog-view://127.0.0.1:{port}")).unwrap(); + let port = portpicker::pick_unused_port().expect("pick_unused_port"); + let admin_listen_uri = + AdminUri::from_str(&format!("insecure-mca://127.0.0.1:{port}")).unwrap(); + let chain_id = "local".to_string(); + let config = FogViewRouterConfig { + chain_id: chain_id.clone(), + client_responder_id: router_uri + .responder_id() + .expect("Could not get responder id for Fog View Router."), + ias_api_key: Default::default(), + ias_spid: Default::default(), + shard_uris, + client_listen_uri: RouterClientListenUri::Unary(router_uri.clone()), + client_auth_token_max_lifetime: Default::default(), + client_auth_token_secret: None, + omap_capacity, + admin_listen_uri, + }; + let router_server = Self::create_router_server(config, store_clients, &logger); + let router_client = Self::create_router_unary_client(chain_id, router_uri, logger); + + Self { + db_test_context: Some(db_test_context), + router_server: Some(router_server), + router_unary_client: Some(router_client), + router_streaming_client: None, + store_servers: Some(store_servers), + } + } + + fn create_router_server( + config: FogViewRouterConfig, + shards: Arc>>, + logger: &Logger, + ) -> FogViewRouterServer { + let enclave = SgxViewEnclave::new( + get_enclave_path(mc_fog_view_enclave::ENCLAVE_FILE), + config.client_responder_id.clone(), + config.omap_capacity, + logger.clone(), + ); + let ra_client = + AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); + let mut router_server = FogViewRouterServer::new( + config, + enclave, + ra_client, + shards, + SystemTimeProvider::default(), + logger.clone(), + ); + router_server.start(); + router_server + } + + fn create_router_streaming_client( + router_uri: FogViewRouterUri, + logger: Logger, + ) -> FogViewRouterGrpcClient { + let grpcio_env = Arc::new(grpcio::EnvBuilder::new().build()); + let mut mr_signer_verifier = + MrSignerVerifier::from(mc_fog_view_enclave_measurement::sigstruct()); + mr_signer_verifier.allow_hardening_advisory("INTEL-SA-00334"); + let mut verifier = Verifier::default(); + verifier.mr_signer(mr_signer_verifier).debug(DEBUG_ENCLAVE); + + FogViewRouterGrpcClient::new(router_uri, verifier, grpcio_env, logger) + } + + fn create_router_unary_client( + chain_id: String, + router_uri: FogViewUri, + logger: Logger, + ) -> FogViewGrpcClient { + let grpcio_env = Arc::new(grpcio::EnvBuilder::new().build()); + let mr_signer_verifier = + MrSignerVerifier::from(mc_fog_view_enclave_measurement::sigstruct()); + let mut verifier = Verifier::default(); + verifier.mr_signer(mr_signer_verifier).debug(DEBUG_ENCLAVE); + + FogViewGrpcClient::new( + chain_id, + router_uri, + GRPC_RETRY_CONFIG, + verifier, + grpcio_env, + logger, + ) + } + + /// Creates fog view stores with sane defaults. + fn create_view_stores( + omap_capacity: u64, + store_block_ranges: Vec, + logger: Logger, + ) -> ( + SqlRecoveryDbTestContext, + Vec, + Arc>>, + Vec, + ) { + let db_test_context = SqlRecoveryDbTestContext::new(logger.clone()); + let db = db_test_context.get_db_instance(); + let mut store_servers = Vec::new(); + let mut shards = Vec::new(); + let mut shard_uris: Vec = Vec::new(); + + for (i, store_block_range) in store_block_ranges.into_iter().enumerate() { + let (store, store_uri) = { + let port = portpicker::pick_unused_port().expect("pick_unused_port"); + let epoch_sharding_strategy = EpochShardingStrategy::new(store_block_range.clone()); + let responder_id = ResponderId::from_str(&format!("127.0.0.1:{port}")) + .expect("Could not create responder id"); + let uri = FogViewStoreUri::from_str(&format!( + "insecure-fog-view-store://127.0.0.1:{port}?responder-id={}&sharding_strategy={}", + responder_id, + epoch_sharding_strategy.to_string() + )) + .unwrap(); + + let sharding_strategy = Epoch(epoch_sharding_strategy); + shard_uris.push(uri.clone()); + + let config = ViewConfig { + chain_id: "local".to_string(), + client_responder_id: uri.responder_id().unwrap(), + client_listen_uri: uri.clone(), + client_auth_token_secret: None, + omap_capacity, + ias_spid: Default::default(), + ias_api_key: Default::default(), + admin_listen_uri: Default::default(), + client_auth_token_max_lifetime: Default::default(), + sharding_strategy, + postgres_config: Default::default(), + block_query_batch_size: 2, + }; + + let enclave = SgxViewEnclave::new( + get_enclave_path(mc_fog_view_enclave::ENCLAVE_FILE), + config.client_responder_id.clone(), + config.omap_capacity, + logger.clone(), + ); + + let ra_client = + AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); + + let Epoch(ref sharding_strategy) = config.sharding_strategy; + let mut store = ViewServer::new( + config.clone(), + enclave, + db.clone(), + ra_client, + SystemTimeProvider::default(), + sharding_strategy.clone(), + logger.clone(), + ); + store.start(); + (store, uri) + }; + store_servers.push(store); + + let grpc_env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix(format!("view-store-{i}")) + .build(), + ); + let store_client = FogViewStoreApiClient::new( + ChannelBuilder::default_channel_builder(grpc_env) + .connect_to_uri(&store_uri, &logger), + ); + let shard = Shard::new(store_uri, Arc::new(store_client), store_block_range); + shards.push(shard); + } + + let store_clients = Arc::new(RwLock::new(shards)); + + (db_test_context, store_servers, store_clients, shard_uris) + } +} + +/// Defines the drop order for each field. Do not change the order or the test +/// will hang indefinitely. +impl Drop for RouterTestEnvironment { + fn drop(&mut self) { + // This needs to be dropped first because failure to do so keeps the gRPC + // connection alive and the router server will never close down. + self.router_streaming_client = None; + self.router_server = None; + self.store_servers = None; + // This needs to be dropped after the servers because they have threads that are + // constantly checking the db. + self.db_test_context = None; + } +} + +/// Ensure that all provided ETxOutRecords are in the enclave, and that +/// non-existing ones aren't. +pub fn assert_e_tx_out_records(client: &mut FogViewGrpcClient, records: &[ETxOutRecord]) { + // Construct an array of expected results that includes both records we expect + // to find and records we expect not to find. + let mut expected_fixed_results = Vec::new(); + for record in records { + let fixed_result = FixedTxOutSearchResult::new( + record.search_key.clone(), + &record.payload, + TxOutSearchResultCode::Found, + ); + expected_fixed_results.push(fixed_result); + } + for i in 0..3 { + let search_key = vec![i + 1; 16]; + let not_found_fixed_result = FixedTxOutSearchResult::new_not_found(search_key); + expected_fixed_results.push(not_found_fixed_result); + } + expected_fixed_results.sort_by_key(|result| result.search_key.clone()); + let expected_results = expected_fixed_results + .iter() + .cloned() + .map(|fixed_result| fixed_result.into()) + .collect::>(); + + let search_keys: Vec<_> = expected_fixed_results + .iter() + .map(|result| result.search_key.clone()) + .collect(); + + let mut allowed_tries = 60usize; + loop { + let result = client.request(0, 0, search_keys.clone()).unwrap(); + + let mut actual_fixed_results = result.fixed_tx_out_search_results.clone(); + actual_fixed_results.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + let mut actual_results = result.tx_out_search_results.clone(); + actual_results.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + + let actual_fixed_matches = actual_fixed_results == expected_fixed_results; + let actual_matches = actual_results == expected_results; + if actual_fixed_matches && actual_matches { + break; + } + if allowed_tries == 0 { + panic!("Server did not catch up to database!"); + } + allowed_tries -= 1; + sleep(Duration::from_millis(1000)); + } +} + +/// Adds block data with sane defaults +pub fn add_block_data( + db: &SqlRecoveryDb, + invocation_id: &IngestInvocationId, + block_index: BlockIndex, + cumulative_tx_out_count: u64, + txs: &[ETxOutRecord], +) -> AddBlockDataStatus { + db.add_block_data( + invocation_id, + &Block::new( + BlockVersion::ZERO, + &BlockID::default(), + block_index, + cumulative_tx_out_count, + &Default::default(), + &Default::default(), + ), + 0, + txs, + ) + .unwrap() +} + +/// Wait until first server has added stuff to ORAM. Since all view servers +/// should load ORAM at the same time, we could choose to wait for any view +/// server. +pub fn wait_for_highest_block_to_load( + db: &SqlRecoveryDb, + store_servers: &[TestViewServer], + logger: &Logger, +) { + let mut allowed_tries = 1000usize; + loop { + let db_num_blocks = db + .get_highest_known_block_index() + .unwrap() + .map(|v| v + 1) // convert index to count + .unwrap_or(0); + let server_num_blocks = get_highest_processed_block_count(store_servers); + if server_num_blocks > db_num_blocks { + panic!( + "Server num blocks should never be larger than db num blocks: {server_num_blocks} > {db_num_blocks}" + ); + } + if server_num_blocks == db_num_blocks { + log::info!(logger, "Stopping, block {}", server_num_blocks); + break; + } + log::info!( + logger, + "Waiting for server to catch up to db... {} < {}", + server_num_blocks, + db_num_blocks + ); + if allowed_tries == 0 { + panic!("Server did not catch up to database!"); + } + allowed_tries -= 1; + sleep(Duration::from_secs(1)); + } +} + +/// Wait until a server has added a specific number of blocks to load. +pub fn wait_for_block_to_load(block_count: u64, store_servers: &[TestViewServer], logger: &Logger) { + let mut allowed_tries = 60usize; + loop { + let server_num_blocks = get_highest_processed_block_count(store_servers); + if server_num_blocks >= block_count { + break; + } + log::info!( + logger, + "Waiting for server to catch up to db... {} < {}", + server_num_blocks, + block_count, + ); + if allowed_tries == 0 { + panic!("Server did not catch up to database!"); + } + allowed_tries -= 1; + sleep(Duration::from_millis(1000)); + } +} + +/// Wait until a server has added a specific number of blocks to load. +pub fn wait_for_highest_processed_and_last_known( + view_client: &mut FogViewGrpcClient, + highest_processed_block_count: u64, + last_known_block_count: u64, +) { + let mut allowed_tries = 60usize; + loop { + let nonsense_search_keys = vec![vec![50u8]]; + let result = view_client.request(0, 0, nonsense_search_keys).unwrap(); + if result.highest_processed_block_count == highest_processed_block_count + && result.last_known_block_count == last_known_block_count + { + break; + } + + if allowed_tries == 0 { + panic!("Server did not catch up to database! highest_processed_block_count = {}, last_known_block_count = {}", result.highest_processed_block_count, result.last_known_block_count); + } + allowed_tries -= 1; + sleep(Duration::from_millis(1000)); + } +} + +/// Find the highest processed number of blocks in a collection of store +/// servers. +pub fn get_highest_processed_block_count(store_servers: &[TestViewServer]) -> u64 { + store_servers + .iter() + .map(|server| server.highest_processed_block_count()) + .max() + .unwrap_or_default() +} + +/// Creates a list of BlockRanges for store servers. +pub fn create_block_ranges(store_count: usize, blocks_per_store: u64) -> Vec { + let total_block_count = store_count * (blocks_per_store as usize); + (0..total_block_count) + .step_by(blocks_per_store as usize) + .map(|i| BlockRange::new_from_length(i as u64, blocks_per_store)) + .collect::>() +} diff --git a/fog/view/server/tests/smoke_tests.rs b/fog/view/server/tests/smoke_tests.rs deleted file mode 100644 index 4a543a5f57..0000000000 --- a/fog/view/server/tests/smoke_tests.rs +++ /dev/null @@ -1,850 +0,0 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation - -// This integration-level test mocks out consensus and tries to show -// that the users are able to recover their transactions. -// -// This is a rewrite of what was historically called test_ingest_view and was an -// end-to-end integration tests of ingest+view+fog-client. -// It exercises both the ingest enclave, and the fog-related crypto that makes -// its way into the client. - -use mc_attest_net::{Client as AttestClient, RaClient}; -use mc_attest_verifier::{MrSignerVerifier, Verifier, DEBUG_ENCLAVE}; -use mc_blockchain_types::{Block, BlockID, BlockVersion}; -use mc_common::{ - logger::{log, test_with_logger, Logger}, - time::SystemTimeProvider, - ResponderId, -}; -use mc_crypto_keys::{CompressedRistrettoPublic, RistrettoPublic}; -use mc_fog_kex_rng::KexRngPubkey; -use mc_fog_recovery_db_iface::{RecoveryDb, ReportData, ReportDb}; -use mc_fog_sql_recovery_db::{test_utils::SqlRecoveryDbTestContext, SqlRecoveryDb}; -use mc_fog_test_infra::{ - db_tests::{random_block, random_kex_rng_pubkey}, - get_enclave_path, -}; -use mc_fog_types::{ - common::BlockRange, - view::{TxOutSearchResult, TxOutSearchResultCode}, - ETxOutRecord, -}; -use mc_fog_uri::{ConnectionUri, FogViewUri}; -use mc_fog_view_connection::FogViewGrpcClient; -use mc_fog_view_enclave::SgxViewEnclave; -use mc_fog_view_protocol::FogViewConnection; -use mc_fog_view_server::{config::MobileAcctViewConfig as ViewConfig, server::ViewServer}; -use mc_util_from_random::FromRandom; -use mc_util_grpc::GrpcRetryConfig; -use rand::{rngs::StdRng, SeedableRng}; -use std::{str::FromStr, sync::Arc, thread::sleep, time::Duration}; - -const GRPC_RETRY_CONFIG: GrpcRetryConfig = GrpcRetryConfig { - grpc_retry_count: 3, - grpc_retry_millis: 20, -}; - -fn get_test_environment( - view_omap_capacity: u64, - logger: Logger, -) -> ( - SqlRecoveryDbTestContext, - ViewServer, - FogViewGrpcClient, -) { - let db_test_context = SqlRecoveryDbTestContext::new(logger.clone()); - let db = db_test_context.get_db_instance(); - - let port = portpicker::pick_unused_port().expect("pick_unused_port"); - let uri = FogViewUri::from_str(&format!("insecure-fog-view://127.0.0.1:{port}")).unwrap(); - - let server = { - let config = ViewConfig { - chain_id: "local".to_string(), - client_responder_id: ResponderId::from_str(&uri.addr()).unwrap(), - client_listen_uri: uri.clone(), - client_auth_token_secret: None, - omap_capacity: view_omap_capacity, - ias_spid: Default::default(), - ias_api_key: Default::default(), - admin_listen_uri: Default::default(), - client_auth_token_max_lifetime: Default::default(), - postgres_config: Default::default(), - block_query_batch_size: 2, - }; - - let enclave = SgxViewEnclave::new( - get_enclave_path(mc_fog_view_enclave::ENCLAVE_FILE), - config.client_responder_id.clone(), - config.omap_capacity, - logger.clone(), - ); - - let ra_client = - AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); - - let mut server = ViewServer::new( - config, - enclave, - db, - ra_client, - SystemTimeProvider::default(), - logger.clone(), - ); - server.start(); - server - }; - - let client = { - let grpcio_env = Arc::new(grpcio::EnvBuilder::new().build()); - let mut mr_signer_verifier = - MrSignerVerifier::from(mc_fog_view_enclave_measurement::sigstruct()); - mr_signer_verifier - .allow_hardening_advisories(mc_fog_view_enclave_measurement::HARDENING_ADVISORIES); - - let mut verifier = Verifier::default(); - verifier.mr_signer(mr_signer_verifier).debug(DEBUG_ENCLAVE); - - FogViewGrpcClient::new( - "local".to_string(), - uri, - GRPC_RETRY_CONFIG, - verifier, - grpcio_env, - logger, - ) - }; - - (db_test_context, server, client) -} - -// Smoke tests that if we add stuff to recovery database, client can see -// results when they hit a view server. -fn test_view_integration(view_omap_capacity: u64, logger: Logger) { - let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let (db_context, server, mut view_client) = - get_test_environment(view_omap_capacity, logger.clone()); - let db = db_context.get_db_instance(); - - let ingress_key = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); - let accepted_block_1 = db.new_ingress_key(&ingress_key, 0).unwrap(); - assert_eq!(accepted_block_1, 0); - - // First add some data to the database - let txs: Vec = (1u8..21u8) - .map(|x| ETxOutRecord { - search_key: vec![x; 16], - payload: vec![x; 232], - }) - .collect(); - - let pubkey1 = KexRngPubkey { - public_key: [1; 32].to_vec(), - version: 0, - }; - let invoc_id1 = db - .new_ingest_invocation(None, &ingress_key, &pubkey1, 0) - .unwrap(); - - db.add_block_data( - &invoc_id1, - &Block::new( - BlockVersion::ZERO, - &BlockID::default(), - 0, - 2, - &Default::default(), - &Default::default(), - ), - 0, - &txs[0..2], - ) - .unwrap(); - - db.add_block_data( - &invoc_id1, - &Block::new( - BlockVersion::ZERO, - &BlockID::default(), - 1, - 6, - &Default::default(), - &Default::default(), - ), - 0, - &txs[2..6], - ) - .unwrap(); - - let pubkey2 = KexRngPubkey { - public_key: [2; 32].to_vec(), - version: 0, - }; - let invoc_id2 = db - .new_ingest_invocation(None, &ingress_key, &pubkey2, 2) - .unwrap(); - - db.add_block_data( - &invoc_id2, - &Block::new( - BlockVersion::ZERO, - &BlockID::default(), - 2, - 12, - &Default::default(), - &Default::default(), - ), - 0, - &txs[6..12], - ) - .unwrap(); - - // Block 3 is missing (on a different key) - let ingress_key2 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); - let accepted_block_2 = db.new_ingress_key(&ingress_key2, 3).unwrap(); - assert_eq!(accepted_block_2, 3); - - db.set_report( - &ingress_key2, - "", - &ReportData { - pubkey_expiry: 4, - ingest_invocation_id: None, - report: Default::default(), - }, - ) - .unwrap(); - db.report_lost_ingress_key(ingress_key2).unwrap(); - - // Block 3 has no data for the original key - // (view server must support this, ingest skips some TxOuts if the decrypted fog - // hint is junk) - db.add_block_data( - &invoc_id2, - &Block::new( - BlockVersion::ZERO, - &BlockID::default(), - 3, - 12, - &Default::default(), - &Default::default(), - ), - 0, - &[], - ) - .unwrap(); - - db.add_block_data( - &invoc_id2, - &Block::new( - BlockVersion::ZERO, - &BlockID::default(), - 4, - 16, - &Default::default(), - &Default::default(), - ), - 0, - &txs[12..16], - ) - .unwrap(); - - db.decommission_ingest_invocation(&invoc_id1).unwrap(); - - db.add_block_data( - &invoc_id2, - &Block::new( - BlockVersion::ZERO, - &BlockID::default(), - 5, - 20, - &Default::default(), - &Default::default(), - ), - 0, - &txs[16..20], - ) - .unwrap(); - - // Wait until server has added stuff to ORAM - let mut allowed_tries = 1000usize; - loop { - let db_num_blocks = db - .get_highest_known_block_index() - .unwrap() - .map(|v| v + 1) // convert index to count - .unwrap_or(0); - let server_num_blocks = server.highest_processed_block_count(); - if server_num_blocks > db_num_blocks { - panic!( - "Server num blocks should never be larger than db num blocks: {server_num_blocks} > {db_num_blocks}" - ); - } - if server_num_blocks == db_num_blocks { - log::info!(logger, "Stopping, block {}", server_num_blocks); - break; - } - log::info!( - logger, - "Waiting for server to catch up to db... {} < {}", - server_num_blocks, - db_num_blocks - ); - if allowed_tries == 0 { - panic!("Server did not catch up to database!"); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - - // Now make some requests against view_client - let result = view_client.request(0, 0, Default::default()).unwrap(); - assert_eq!(result.highest_processed_block_count, 6); - // 4 events are expected (in the following order): - // - 1 new rng record (for invoc_id1) - // - 1 new rng record (for invoc_id2) - // - 1 missing block range - // - 1 ingest decommissioning (for invoc_id1) - assert_eq!(result.next_start_from_user_event_id, 4); - assert_eq!(result.rng_records.len(), 2); - assert_eq!(result.rng_records[0].pubkey, pubkey1); - assert_eq!(result.rng_records[1].pubkey, pubkey2); - assert_eq!(result.tx_out_search_results.len(), 0); - assert_eq!(result.missed_block_ranges.len(), 1); - assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); - assert_eq!(result.last_known_block_count, 6); - - let result = view_client.request(0, 0, Default::default()).unwrap(); - assert_eq!(result.highest_processed_block_count, 6); - assert_eq!(result.next_start_from_user_event_id, 4); - assert_eq!(result.rng_records.len(), 2); - assert_eq!(result.rng_records[0].pubkey, pubkey1); - assert_eq!(result.rng_records[1].pubkey, pubkey2); - assert_eq!(result.tx_out_search_results.len(), 0); - assert_eq!(result.missed_block_ranges.len(), 1); - assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); - assert_eq!(result.last_known_block_count, 6); - - let result = view_client - // starting at user event id 2 skips invoc_id1 - // (event id 1 is for invoc_id1) - .request(1, 0, Default::default()) - .unwrap(); - assert_eq!(result.highest_processed_block_count, 6); - assert_eq!(result.next_start_from_user_event_id, 4); - assert_eq!(result.rng_records.len(), 1); - assert_eq!(result.rng_records[0].pubkey, pubkey2); - assert_eq!(result.rng_records[0].start_block, 2); - assert_eq!(result.tx_out_search_results.len(), 0); - assert_eq!(result.missed_block_ranges.len(), 1); - assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); - assert_eq!(result.last_known_block_count, 6); - - // No events after event id 4 - let result = view_client.request(4, 0, Default::default()).unwrap(); - assert_eq!(result.highest_processed_block_count, 6); - assert_eq!(result.next_start_from_user_event_id, 4); - assert_eq!(result.rng_records.len(), 0); - assert_eq!(result.tx_out_search_results.len(), 0); - assert_eq!(result.missed_block_ranges.len(), 0); - assert_eq!(result.last_known_block_count, 6); - - let result = view_client.request(80, 0, Default::default()).unwrap(); - assert_eq!(result.highest_processed_block_count, 6); - assert_eq!(result.next_start_from_user_event_id, 80); - assert_eq!(result.rng_records.len(), 0); - assert_eq!(result.tx_out_search_results.len(), 0); - assert_eq!(result.missed_block_ranges.len(), 0); - assert_eq!(result.last_known_block_count, 6); - - let result = view_client - .request(4, 0, vec![vec![1u8; 16], vec![2u8; 16], vec![3u8; 16]]) - .unwrap(); - assert_eq!(result.highest_processed_block_count, 6); - assert_eq!(result.next_start_from_user_event_id, 4); - assert_eq!(result.rng_records.len(), 0); - assert_eq!(result.tx_out_search_results.len(), 3); - { - let mut sort_txs = result.tx_out_search_results.clone(); - sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); - assert_eq!(sort_txs[0].search_key, vec![1u8; 16]); - assert_eq!(sort_txs[0].result_code, 1); - assert_eq!(sort_txs[0].ciphertext, vec![1u8; 232]); - - assert_eq!(sort_txs[1].search_key, vec![2u8; 16]); - assert_eq!(sort_txs[1].result_code, 1); - assert_eq!(sort_txs[1].ciphertext, vec![2u8; 232]); - - assert_eq!(sort_txs[2].search_key, vec![3u8; 16]); - assert_eq!(sort_txs[2].result_code, 1); - assert_eq!(sort_txs[2].ciphertext, vec![3u8; 232]); - } - assert_eq!(result.missed_block_ranges.len(), 0); // no range reported since we started at event id 4 - assert_eq!(result.last_known_block_count, 6); - - let result = view_client - .request(4, 0, vec![vec![5u8; 16], vec![8u8; 16], vec![200u8; 16]]) - .unwrap(); - assert_eq!(result.highest_processed_block_count, 6); - assert_eq!(result.next_start_from_user_event_id, 4); - assert_eq!(result.rng_records.len(), 0); - assert_eq!(result.tx_out_search_results.len(), 3); - { - let mut sort_txs = result.tx_out_search_results.clone(); - sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); - assert_eq!(sort_txs[0].search_key, vec![5u8; 16]); - assert_eq!(sort_txs[0].result_code, 1); - assert_eq!(sort_txs[0].ciphertext, vec![5u8; 232]); - - assert_eq!(sort_txs[1].search_key, vec![8u8; 16]); - assert_eq!(sort_txs[1].result_code, 1); - assert_eq!(sort_txs[1].ciphertext, vec![8u8; 232]); - - assert_eq!(sort_txs[2].search_key, vec![200u8; 16]); - assert_eq!(sort_txs[2].result_code, 2); - assert_eq!(sort_txs[2].ciphertext, vec![0u8; 232]); - } - - assert_eq!(result.missed_block_ranges.len(), 0); // no range reported since we started at event id 4 - assert_eq!(result.last_known_block_count, 6); - - let result = view_client.request(0, 0, vec![vec![200u8; 17]]).unwrap(); - assert_eq!(result.highest_processed_block_count, 6); - assert_eq!(result.next_start_from_user_event_id, 4); - assert_eq!(result.rng_records.len(), 2); - assert_eq!(result.rng_records[0].pubkey, pubkey1); - assert_eq!(result.rng_records[1].pubkey, pubkey2); - assert_eq!(result.tx_out_search_results.len(), 1); - { - let mut sort_txs = result.tx_out_search_results.clone(); - sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); - assert_eq!(sort_txs[0].search_key, vec![200u8; 17]); - assert_eq!(sort_txs[0].result_code, 3); - assert_eq!(sort_txs[0].ciphertext, vec![0u8; 232]); - } - assert_eq!(result.missed_block_ranges.len(), 1); - assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); - assert_eq!(result.last_known_block_count, 6); -} - -#[test_with_logger] -fn test_view_sql_512(logger: Logger) { - test_view_integration(512, logger); - - // Sleep before exiting to give view server threads time to join - sleep(Duration::from_millis(1000)); -} - -#[test_with_logger] -fn test_view_sql_1mil(logger: Logger) { - test_view_integration(1024 * 1024, logger); - - // Sleep before exiting to give view server threads time to join - sleep(Duration::from_millis(1000)); -} - -/// Ensure that all provided ETxOutRecords are in the enclave, and that -/// non-existing ones aren't. -fn assert_e_tx_out_records_sanity( - client: &mut FogViewGrpcClient, - records: &[ETxOutRecord], - - logger: &Logger, -) { - // Construct an array of expected results that includes both records we expect - // to find and records we expect not to find. - let mut expected_results = Vec::new(); - for record in records { - expected_results.push(TxOutSearchResult { - search_key: record.search_key.clone(), - result_code: TxOutSearchResultCode::Found as u32, - ciphertext: record.payload.clone(), - }); - } - for i in 0..3 { - expected_results.push(TxOutSearchResult { - search_key: vec![i + 1; 16], // Search key if all zeros is invalid. - result_code: TxOutSearchResultCode::NotFound as u32, - ciphertext: vec![0; 64], - }); - } - - let search_keys: Vec<_> = expected_results - .iter() - .map(|result| result.search_key.clone()) - .collect(); - - let mut allowed_tries = 60usize; - loop { - let result = client.request(0, 0, search_keys.clone()).unwrap(); - if result.tx_out_search_results == expected_results { - break; - } - - log::info!(logger, "A {:?}", result.tx_out_search_results); - log::info!(logger, "B {:?}", expected_results); - - if allowed_tries == 0 { - panic!("Server did not catch up to database!"); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } -} - -/// Test that view server behaves correctly when there is some overlap between -/// two currently active ingest invocations. -#[test_with_logger] -fn test_overlapping_ingest_ranges(logger: Logger) { - let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let (db_context, server, mut view_client) = get_test_environment(512, logger.clone()); - let db = db_context.get_db_instance(); - - let ingress_key1 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); - db.new_ingress_key(&ingress_key1, 0).unwrap(); - - let ingress_key2 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); - db.new_ingress_key(&ingress_key2, 10).unwrap(); - - // invoc_id1 starts at block 0 - let invoc_id1 = db - .new_ingest_invocation(None, &ingress_key1, &random_kex_rng_pubkey(&mut rng), 0) - .unwrap(); - - // invoc_id2 starts at block 10 - let invoc_id2 = db - .new_ingest_invocation(None, &ingress_key2, &random_kex_rng_pubkey(&mut rng), 10) - .unwrap(); - - // Add 5 blocks to both invocations. This will add blocks 0-4 to invoc1 and - // blocks 10-14 to invoc2. Since we're missing blocks 5-9, we should only - // see blocks 0-4 for now. - let mut expected_records = Vec::new(); - for i in 0..5 { - let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block - db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); - expected_records.extend(records); - - let (block, records) = random_block(&mut rng, i + 10, 5); // start block is 10 - db.add_block_data(&invoc_id2, &block, 0, &records).unwrap(); - expected_records.extend(records); - } - - let mut allowed_tries = 60usize; - loop { - let server_num_blocks = server.highest_processed_block_count(); - if server_num_blocks >= 5 { - break; - } - log::info!( - logger, - "Waiting for server to catch up to db... {} < 5", - server_num_blocks, - ); - if allowed_tries == 0 { - panic!("Server did not catch up to database!"); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - - assert_eq!(server.highest_processed_block_count(), 5); - - assert_e_tx_out_records_sanity(&mut view_client, &expected_records, &logger); - - // Give server time to process some more blocks, although it shouldn't. - sleep(Duration::from_millis(1000)); - assert_eq!(server.highest_processed_block_count(), 5); - - // See that we get a sane client response. - let result = view_client.request(0, 0, Default::default()).unwrap(); - assert_eq!(result.highest_processed_block_count, 5); - assert_eq!(result.last_known_block_count, 15); // The last known block is not tied to the serial processing of blocks. - - // Add blocks 5-19 to invoc_id1. This will allow us to query blocks 0-14, since - // invoc_id2 only has blocks 10-14. - for i in 5..20 { - let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block - db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); - - expected_records.extend(records); - } - - let mut allowed_tries = 60usize; - loop { - let server_num_blocks = server.highest_processed_block_count(); - if server_num_blocks >= 15 { - break; - } - log::info!( - logger, - "Waiting for server to catch up to db... {} < 15", - server_num_blocks, - ); - if allowed_tries == 0 { - panic!("Server did not catch up to database!"); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - - assert_eq!(server.highest_processed_block_count(), 15); - - // Give server time to process some more blocks, although it shouldn't. - let mut allowed_tries = 60usize; - while allowed_tries > 0 { - sleep(Duration::from_millis(1000)); - allowed_tries -= 1; - - if server.highest_processed_block_count() != 15 { - continue; - } - - let result = view_client.request(0, 0, Default::default()).unwrap(); - if result.last_known_block_count != 20 { - continue; - } - - break; - } - - assert_eq!(server.highest_processed_block_count(), 15); - - // See that we get a sane client response. - let result = view_client.request(0, 0, Default::default()).unwrap(); - assert_eq!(result.highest_processed_block_count, 15); - assert_eq!(result.last_known_block_count, 20); // The last known block is not tied to the serial processing of blocks. - - // Add blocks 15-30 to invoc_id2, this should bring us to block 20. - for i in 15..30 { - let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block - db.add_block_data(&invoc_id2, &block, 0, &records).unwrap(); - expected_records.extend(records); - } - - let mut allowed_tries = 60usize; - loop { - let server_num_blocks = server.highest_processed_block_count(); - if server_num_blocks >= 20 { - break; - } - log::info!( - logger, - "Waiting for server to catch up to db... {} < 20", - server_num_blocks, - ); - if allowed_tries == 0 { - panic!("Server did not catch up to database!"); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - - assert_eq!(server.highest_processed_block_count(), 20); - - // Give server time to process some more blocks, although it shouldn't. - sleep(Duration::from_millis(1000)); - assert_eq!(server.highest_processed_block_count(), 20); - - // See that we get a sane client response. - let mut allowed_tries = 60usize; - loop { - let result = view_client.request(0, 0, Default::default()).unwrap(); - if result.highest_processed_block_count == 20 && result.last_known_block_count == 30 { - break; - } - - if allowed_tries == 0 { - panic!("Server did not catch up to database! highest_processed_block_count = {}, last_known_block_count = {}", result.highest_processed_block_count, result.last_known_block_count); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - - // Ensure all ETxOutRecords are searchable - assert_e_tx_out_records_sanity(&mut view_client, &expected_records, &logger); -} - -/// Test that view server behaves correctly when there is a missing range before -/// any ingest invocations. -#[test_with_logger] -fn test_start_with_missing_range(logger: Logger) { - let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let (db_context, server, mut view_client) = get_test_environment(512, logger.clone()); - let db = db_context.get_db_instance(); - - let ingress_key = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); - db.new_ingress_key(&ingress_key, 5).unwrap(); - - // invoc_id1 starts at block 0, but the initial blocks reported are 10-15 - let invoc_id1 = db - .new_ingest_invocation(None, &ingress_key, &random_kex_rng_pubkey(&mut rng), 5) - .unwrap(); - - // Add 5 blocks to invoc_id1. - let mut expected_records = Vec::new(); - for i in 10..15 { - let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block - db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); - expected_records.extend(records); - } - - // Give server time to process some more blocks, although it shouldn't. - sleep(Duration::from_millis(1000)); - assert_eq!(server.highest_processed_block_count(), 0); - - // See that we get a sane client response. - let mut allowed_tries = 60usize; - loop { - let result = view_client.request(0, 0, Default::default()).unwrap(); - if result.highest_processed_block_count == 0 && result.last_known_block_count == 0 { - break; - } - - if allowed_tries == 0 { - panic!("Server did not catch up to database! highest_processed_block_count = {}, last_known_block_count = {}", result.highest_processed_block_count, result.last_known_block_count); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - - // Adding the first 5 blocks that were the gap - for i in 5..10 { - let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block - db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); - expected_records.extend(records); - } - - let mut allowed_tries = 60usize; - loop { - let result = view_client.request(0, 0, Default::default()).unwrap(); - if result.highest_processed_block_count == 15 && result.last_known_block_count == 15 { - break; - } - - if allowed_tries == 0 { - panic!("Server did not catch up to database! highest_processed_block_count = {}, last_known_block_count = {}", result.highest_processed_block_count, result.last_known_block_count); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - assert_eq!(server.highest_processed_block_count(), 15); - - assert_e_tx_out_records_sanity(&mut view_client, &expected_records, &logger); -} - -/// Test that view server behaves correctly when there is a missing range -/// between two ingest invocations. -#[test_with_logger] -fn test_middle_missing_range_with_decommission(logger: Logger) { - let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); - let (db_context, server, mut view_client) = get_test_environment(512, logger.clone()); - let db = db_context.get_db_instance(); - - let ingress_key1 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); - db.new_ingress_key(&ingress_key1, 0).unwrap(); - db.set_report( - &ingress_key1, - "", - &ReportData { - pubkey_expiry: 10, - ingest_invocation_id: None, - report: Default::default(), - }, - ) - .unwrap(); - - let ingress_key2 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); - db.new_ingress_key(&ingress_key2, 10).unwrap(); - - // invoc_id1 starts at block 0 - let invoc_id1 = db - .new_ingest_invocation(None, &ingress_key1, &random_kex_rng_pubkey(&mut rng), 0) - .unwrap(); - - // Add 5 blocks to invoc_id1. - let mut expected_records = Vec::new(); - for i in 0..5 { - let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block - db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); - expected_records.extend(records); - } - - // At this point we should be at highest processed block 5, and highest known 5, - // because ingress key 2 doesn't start until 10, and doesn't have any blocks - // associated to it yet. - let mut allowed_tries = 60usize; - loop { - let result = view_client.request(0, 0, Default::default()).unwrap(); - if result.highest_processed_block_count == 5 && result.last_known_block_count == 5 { - break; - } - - if allowed_tries == 0 { - panic!("Server did not catch up to database! highest_processed_block_count = {}, last_known_block_count = {}", result.highest_processed_block_count, result.last_known_block_count); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - assert_eq!(server.highest_processed_block_count(), 5); - - // Ingress key 1 is lost - db.report_lost_ingress_key(ingress_key1).unwrap(); - assert_eq!( - db.get_missed_block_ranges().unwrap(), - vec![BlockRange { - start_block: 5, - end_block: 10 - }] - ); - - // invoc_id2 starts at block 10 - let invoc_id2 = db - .new_ingest_invocation(None, &ingress_key2, &random_kex_rng_pubkey(&mut rng), 10) - .unwrap(); - - // Add 5 blocks to invoc_id2. - for i in 10..15 { - let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block - db.add_block_data(&invoc_id2, &block, 0, &records).unwrap(); - expected_records.extend(records); - } - - // At this point invoc_id1 is marked lost, so we should be at highest processed - // block 10 but the last known block should be 15. - let mut allowed_tries = 60usize; - loop { - let result = view_client.request(0, 0, Default::default()).unwrap(); - if result.highest_processed_block_count == 15 && result.last_known_block_count == 15 { - break; - } - - if allowed_tries == 0 { - panic!("Server did not catch up to database! highest_processed_block_count = {}, last_known_block_count = {}", result.highest_processed_block_count, result.last_known_block_count); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - assert_eq!(server.highest_processed_block_count(), 15); - - // Decommissioning invoc_id1 should allow us to advance to the last block - // invoc_id2 has processed. - db.decommission_ingest_invocation(&invoc_id1).unwrap(); - - let mut allowed_tries = 60usize; - loop { - let result = view_client.request(0, 0, Default::default()).unwrap(); - if result.highest_processed_block_count == 15 && result.last_known_block_count == 15 { - break; - } - - if allowed_tries == 0 { - panic!("Server did not catch up to database! highest_processed_block_count = {}, last_known_block_count = {}", result.highest_processed_block_count, result.last_known_block_count); - } - allowed_tries -= 1; - sleep(Duration::from_millis(1000)); - } - assert_eq!(server.highest_processed_block_count(), 15); - - assert_e_tx_out_records_sanity(&mut view_client, &expected_records, &logger); -} diff --git a/fog/view/server/tests/streaming_smoke_tests.rs b/fog/view/server/tests/streaming_smoke_tests.rs new file mode 100644 index 0000000000..5c11a5ed5a --- /dev/null +++ b/fog/view/server/tests/streaming_smoke_tests.rs @@ -0,0 +1,436 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use futures::executor::block_on; +use mc_common::logger::{create_app_logger, o}; +use mc_crypto_keys::{CompressedRistrettoPublic, RistrettoPublic}; +use mc_fog_kex_rng::KexRngPubkey; +use mc_fog_recovery_db_iface::{RecoveryDb, ReportData, ReportDb}; +use mc_fog_types::{common::BlockRange, view::TxOutSearchResultCode, ETxOutRecord}; +use mc_fog_view_server_test_utils::RouterTestEnvironment; +use mc_util_from_random::FromRandom; +use rand::{rngs::StdRng, SeedableRng}; +use yare::parameterized; + +#[parameterized( +small_omap_one_store = { 512, 1, 6 }, +small_omap_multiple_stores = { 512, 6, 1 }, +large_omap_one_store = { 1048576, 1, 6 }, +large_omap_multiple_stores = { 1048576, 6, 1 }, +)] +fn test_streaming_integration(omap_capacity: u64, store_count: usize, blocks_per_store: u64) { + let (logger, _global_logger_guard) = create_app_logger(o!()); + let store_block_ranges = + mc_fog_view_server_test_utils::create_block_ranges(store_count, blocks_per_store); + let mut test_environment = + RouterTestEnvironment::new(omap_capacity, store_block_ranges, logger.clone()); + + let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); + let db = test_environment + .db_test_context + .as_ref() + .unwrap() + .get_db_instance(); + let store_servers = test_environment.store_servers.as_ref().unwrap(); + + let ingress_key = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + let accepted_block_1 = db.new_ingress_key(&ingress_key, 0).unwrap(); + assert_eq!(accepted_block_1, 0); + + // First add some data to the database + let txs: Vec = (1u8..21u8) + .map(|x| ETxOutRecord { + search_key: vec![x; 16], + payload: vec![x; 232], + }) + .collect(); + + let egress_public_key_1 = KexRngPubkey { + public_key: vec![1; 32], + version: 0, + }; + + db.set_report( + &ingress_key, + "", + &ReportData { + pubkey_expiry: 6, + ingest_invocation_id: None, + report: Default::default(), + }, + ) + .unwrap(); + + let invoc_id1 = db + .new_ingest_invocation(None, &ingress_key, &egress_public_key_1, 0) + .unwrap(); + + mc_fog_view_server_test_utils::add_block_data(&db, &invoc_id1, 0, 2, &txs[..2]); + mc_fog_view_server_test_utils::add_block_data(&db, &invoc_id1, 1, 6, &txs[2..6]); + + let egress_public_key_2 = KexRngPubkey { + public_key: [2; 32].to_vec(), + version: 0, + }; + let invoc_id2 = db + .new_ingest_invocation(None, &ingress_key, &egress_public_key_2, 2) + .unwrap(); + + mc_fog_view_server_test_utils::add_block_data(&db, &invoc_id2, 2, 12, &txs[6..12]); + + let ingress_key2 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + let accepted_block_2 = db.new_ingress_key(&ingress_key2, 3).unwrap(); + assert_eq!(accepted_block_2, 3); + + db.set_report( + &ingress_key2, + "", + &ReportData { + pubkey_expiry: 4, + ingest_invocation_id: None, + report: Default::default(), + }, + ) + .unwrap(); + db.report_lost_ingress_key(ingress_key2).unwrap(); + // Block 3 has no data for the original key. This tests mocks this behavior by + // adding an empty slice of tx outs for the block. + // + // Note: View server must support this behavior, ingest skips some TxOuts if the + // decrypted fog hint is junk. + mc_fog_view_server_test_utils::add_block_data(&db, &invoc_id2, 3, 12, &[]); + mc_fog_view_server_test_utils::add_block_data(&db, &invoc_id2, 4, 16, &txs[12..16]); + db.decommission_ingest_invocation(&invoc_id1).unwrap(); + mc_fog_view_server_test_utils::add_block_data(&db, &invoc_id2, 5, 20, &txs[16..20]); + + mc_fog_view_server_test_utils::wait_for_highest_block_to_load(&db, store_servers, &logger); + + let router_client = test_environment.router_streaming_client.as_mut().unwrap(); + let nonsense_search_keys = vec![vec![50u8]]; + + // Query 1 should yield 4 events: + // - 1 new rng record (for invoc_id1) + // - 1 new rng record (for invoc_id2) + // - 1 missing block range + // - 1 ingest decommissioning (for invoc_id1) + let result = block_on(router_client.query(0, 0, nonsense_search_keys.clone())); + assert!(result.is_ok()); + let mut result = result.unwrap(); + + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 2); + result + .rng_records + .sort_by_key(|rng_record| rng_record.ingest_invocation_id); + assert_eq!(result.rng_records[0].pubkey, egress_public_key_1); + assert_eq!(result.rng_records[1].pubkey, egress_public_key_2); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 1); + assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); + assert_eq!(result.last_known_block_count, 6); + + // Query 2 is the same as Query 1 and tests that identical queries (when no + // blocks have been added etc.) should yield identical results. + let result = block_on(router_client.query(0, 0, nonsense_search_keys.clone())); + assert!(result.is_ok()); + let mut result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 2); + result + .rng_records + .sort_by_key(|rng_record| rng_record.ingest_invocation_id); + assert_eq!(result.rng_records[0].pubkey, egress_public_key_1); + assert_eq!(result.rng_records[1].pubkey, egress_public_key_2); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 1); + assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); + assert_eq!(result.last_known_block_count, 6); + + // Query 3 starts at user event id 1, which skips the invoc_id1 new rng record + // event (which has a user event id of 0). + let result = block_on(router_client.query(1, 0, nonsense_search_keys.clone())); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 1); + assert_eq!(result.rng_records[0].pubkey, egress_public_key_2); + assert_eq!(result.rng_records[0].start_block, 2); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 1); + assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); + assert_eq!(result.last_known_block_count, 6); + + // Query 4 starts at user event id 4, which skips all events. + let result = block_on(router_client.query(4, 0, nonsense_search_keys.clone())); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 0); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 0); + assert_eq!(result.last_known_block_count, 6); + + // Query 5 starts at a user event id that is much larger than the last known + // event id. This should skip all events and return this large user event + // id. + let result = block_on(router_client.query(80, 0, nonsense_search_keys)); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 80); + assert_eq!(result.rng_records.len(), 0); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 0); + assert_eq!(result.last_known_block_count, 6); + + // Query 6 starts at user event id 4, and supplies search keys that correspond + // to TxOuts. We expect to find these TxOuts. + let result = + block_on(router_client.query(4, 0, vec![vec![1u8; 16], vec![2u8; 16], vec![3u8; 16]])); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 0); + assert_eq!(result.fixed_tx_out_search_results.len(), 3); + { + let mut sort_txs = result.tx_out_search_results.clone(); + sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + assert_eq!(sort_txs[0].search_key, vec![1u8; 16]); + assert_eq!(sort_txs[0].result_code, 1); + assert_eq!(sort_txs[0].ciphertext, vec![1u8; 232]); + assert_eq!(sort_txs[0].padding, vec![0u8; 23]); + + assert_eq!(sort_txs[1].search_key, vec![2u8; 16]); + assert_eq!(sort_txs[1].result_code, 1); + assert_eq!(sort_txs[1].ciphertext, vec![2u8; 232]); + assert_eq!(sort_txs[1].padding, vec![0u8; 23]); + + assert_eq!(sort_txs[2].search_key, vec![3u8; 16]); + assert_eq!(sort_txs[2].result_code, 1); + assert_eq!(sort_txs[2].ciphertext, vec![3u8; 232]); + assert_eq!(sort_txs[2].padding, vec![0u8; 23]); + } + { + let mut sort_fixed_txs = result.fixed_tx_out_search_results.clone(); + sort_fixed_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + let expected_payload_length = 232; + + assert_eq!(sort_fixed_txs[0].search_key, vec![1u8; 16]); + assert_eq!(sort_fixed_txs[0].result_code, 1); + assert_eq!( + sort_fixed_txs[0].ciphertext[..expected_payload_length], + vec![1u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[0].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[0].payload_length as usize, + expected_payload_length + ); + + assert_eq!(sort_fixed_txs[1].search_key, vec![2u8; 16]); + assert_eq!(sort_fixed_txs[1].result_code, 1); + assert_eq!( + sort_fixed_txs[1].ciphertext[..expected_payload_length], + vec![2u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[1].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[1].payload_length as usize, + expected_payload_length + ); + + assert_eq!(sort_fixed_txs[2].search_key, vec![3u8; 16]); + assert_eq!(sort_fixed_txs[2].result_code, 1); + assert_eq!( + sort_fixed_txs[2].ciphertext[..expected_payload_length], + vec![3u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[2].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[2].payload_length as usize, + expected_payload_length + ); + } + assert_eq!(result.missed_block_ranges.len(), 0); // no range reported since we started at event id 4 + assert_eq!(result.last_known_block_count, 6); + + // Query 7 starts at user event id 4, and supplies 2 search keys that correspond + // to TxOuts and 1 search key that doesn't correspond to any TxOuts. We to + // find the TxOuts for the first 2 search keys and to not find TxOuts for + // the last search key. + let result = + block_on(router_client.query(4, 0, vec![vec![5u8; 16], vec![8u8; 16], vec![200u8; 16]])); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 0); + assert_eq!(result.fixed_tx_out_search_results.len(), 3); + { + let mut sort_txs = result.tx_out_search_results.clone(); + sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + + assert_eq!(sort_txs[0].search_key, vec![5u8; 16]); + assert_eq!(sort_txs[0].result_code, 1); + assert_eq!(sort_txs[0].ciphertext, vec![5u8; 232]); + assert_eq!(sort_txs[0].padding, vec![0u8; 23]); + + assert_eq!(sort_txs[1].search_key, vec![8u8; 16]); + assert_eq!(sort_txs[1].result_code, 1); + assert_eq!(sort_txs[1].ciphertext, vec![8u8; 232]); + assert_eq!(sort_txs[1].padding, vec![0u8; 23]); + + assert_eq!(sort_txs[2].search_key, vec![200u8; 16]); + assert_eq!(sort_txs[2].result_code, 2); + assert_eq!(sort_txs[2].ciphertext, Vec::::new()); + assert_eq!(sort_txs[2].padding, vec![0u8; 255]); + } + { + let mut sort_fixed_txs = result.fixed_tx_out_search_results.clone(); + sort_fixed_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + let expected_payload_length = 232; + + assert_eq!(sort_fixed_txs[0].search_key, vec![5u8; 16]); + assert_eq!(sort_fixed_txs[0].result_code, 1); + assert_eq!( + sort_fixed_txs[0].ciphertext[..expected_payload_length], + vec![5u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[0].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[0].payload_length as usize, + expected_payload_length + ); + + assert_eq!(sort_fixed_txs[1].search_key, vec![8u8; 16]); + assert_eq!(sort_fixed_txs[1].result_code, 1); + assert_eq!( + sort_fixed_txs[1].ciphertext[..expected_payload_length], + vec![8u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[1].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[1].payload_length as usize, + expected_payload_length + ); + + assert_eq!(sort_fixed_txs[2].search_key, vec![200u8; 16]); + assert_eq!(sort_fixed_txs[2].result_code, 2); + assert_eq!(sort_fixed_txs[2].ciphertext, vec![0u8; 255]); + assert_eq!(sort_fixed_txs[2].payload_length, 0); + } + + assert_eq!(result.missed_block_ranges.len(), 0); // no range reported since we started at event id 4 + assert_eq!(result.last_known_block_count, 6); + + // Query 8 supplies an ill-formed seach key, so we expect to find that the TxOut + // that's returned indicates this. + let result = block_on(router_client.query(0, 0, vec![vec![200u8; 17]])); + assert!(result.is_ok()); + let mut result = result.unwrap(); + result + .rng_records + .sort_by_key(|rng_record| rng_record.ingest_invocation_id); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 2); + assert_eq!(result.rng_records[0].pubkey, egress_public_key_1); + assert_eq!(result.rng_records[1].pubkey, egress_public_key_2); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + { + let mut sort_txs = result.tx_out_search_results.clone(); + sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + + assert_eq!(sort_txs[0].search_key, vec![200u8; 17]); + assert_eq!(sort_txs[0].result_code, 3); + assert_eq!(sort_txs[0].ciphertext, vec![0u8; 0]); + assert_eq!(sort_txs[0].padding, vec![0u8; 255]); + } + { + let mut sort_fixed_txs = result.fixed_tx_out_search_results.clone(); + sort_fixed_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + let expected_payload_length = 0; + + assert_eq!(sort_fixed_txs[0].search_key, vec![200u8; 17]); + assert_eq!(sort_fixed_txs[0].result_code, 3); + assert_eq!( + sort_fixed_txs[0].ciphertext[..expected_payload_length], + vec![0u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[0].ciphertext[expected_payload_length..], + vec![0u8; 255] + ); + assert_eq!( + sort_fixed_txs[0].payload_length as usize, + expected_payload_length + ); + } + assert_eq!(result.missed_block_ranges.len(), 1); + assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); + assert_eq!(result.last_known_block_count, 6); +} diff --git a/fog/view/server/tests/unary_smoke_tests.rs b/fog/view/server/tests/unary_smoke_tests.rs new file mode 100644 index 0000000000..4db2df1f66 --- /dev/null +++ b/fog/view/server/tests/unary_smoke_tests.rs @@ -0,0 +1,793 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +// This integration-level test mocks out consensus and tries to show +// that the users are able to recover their transactions. +// +// This is a rewrite of what was historically called test_ingest_view and was an +// end-to-end integration tests of ingest+view+fog-client. +// It exercises both the ingest enclave, and the fog-related crypto that makes +// its way into the client. + +use mc_blockchain_types::{Block, BlockID, BlockVersion}; +use mc_common::logger::{create_app_logger, o}; +use mc_crypto_keys::{CompressedRistrettoPublic, RistrettoPublic}; +use mc_fog_kex_rng::KexRngPubkey; +use mc_fog_recovery_db_iface::{RecoveryDb, ReportData, ReportDb}; +use mc_fog_test_infra::db_tests::{random_block, random_kex_rng_pubkey}; +use mc_fog_types::{common::BlockRange, view::TxOutSearchResultCode, ETxOutRecord}; +use mc_fog_view_protocol::FogViewConnection; +use mc_fog_view_server_test_utils::RouterTestEnvironment; +use mc_util_from_random::FromRandom; +use rand::{rngs::StdRng, SeedableRng}; +use std::{thread::sleep, time::Duration}; +use yare::parameterized; + +/// Smoke tests that if we add stuff to recovery database, client can see +/// results when they hit a view server. +#[parameterized( +small_omap_one_store = { 512, 1, 6 }, +small_omap_multiple_stores = { 512, 6, 1 }, +large_omap_one_store = { 1048576, 1, 6 }, +large_omap_multiple_stores = { 1048576, 6, 1 }, +)] +fn test_view_integration(view_omap_capacity: u64, store_count: usize, blocks_per_store: u64) { + let (logger, _global_logger_guard) = create_app_logger(o!()); + let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); + let store_block_ranges = + mc_fog_view_server_test_utils::create_block_ranges(store_count, blocks_per_store); + let mut test_environment = + RouterTestEnvironment::new_unary(view_omap_capacity, store_block_ranges, logger.clone()); + let db = test_environment + .db_test_context + .as_ref() + .unwrap() + .get_db_instance(); + let view_client = test_environment.router_unary_client.as_mut().unwrap(); + let store_servers = test_environment.store_servers.as_ref().unwrap(); + + let ingress_key = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + let accepted_block_1 = db.new_ingress_key(&ingress_key, 0).unwrap(); + assert_eq!(accepted_block_1, 0); + + // First add some data to the database + let txs: Vec = (1u8..21u8) + .map(|x| ETxOutRecord { + search_key: vec![x; 16], + payload: vec![x; 232], + }) + .collect(); + + let pubkey1 = KexRngPubkey { + public_key: [1; 32].to_vec(), + version: 0, + }; + let invoc_id1 = db + .new_ingest_invocation(None, &ingress_key, &pubkey1, 0) + .unwrap(); + + db.add_block_data( + &invoc_id1, + &Block::new( + BlockVersion::ZERO, + &BlockID::default(), + 0, + 2, + &Default::default(), + &Default::default(), + ), + 0, + &txs[..2], + ) + .unwrap(); + + db.add_block_data( + &invoc_id1, + &Block::new( + BlockVersion::ZERO, + &BlockID::default(), + 1, + 6, + &Default::default(), + &Default::default(), + ), + 0, + &txs[2..6], + ) + .unwrap(); + + let pubkey2 = KexRngPubkey { + public_key: [2; 32].to_vec(), + version: 0, + }; + let invoc_id2 = db + .new_ingest_invocation(None, &ingress_key, &pubkey2, 2) + .unwrap(); + + db.add_block_data( + &invoc_id2, + &Block::new( + BlockVersion::ZERO, + &BlockID::default(), + 2, + 12, + &Default::default(), + &Default::default(), + ), + 0, + &txs[6..12], + ) + .unwrap(); + + // Block 3 is missing (on a different key) + let ingress_key2 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + let accepted_block_2 = db.new_ingress_key(&ingress_key2, 3).unwrap(); + assert_eq!(accepted_block_2, 3); + + db.set_report( + &ingress_key2, + "", + &ReportData { + pubkey_expiry: 4, + ingest_invocation_id: None, + report: Default::default(), + }, + ) + .unwrap(); + db.report_lost_ingress_key(ingress_key2).unwrap(); + + // Block 3 has no data for the original key + // (view server must support this, ingest skips some TxOuts if the decrypted fog + // hint is junk) + db.add_block_data( + &invoc_id2, + &Block::new( + BlockVersion::ZERO, + &BlockID::default(), + 3, + 12, + &Default::default(), + &Default::default(), + ), + 0, + &[], + ) + .unwrap(); + + db.add_block_data( + &invoc_id2, + &Block::new( + BlockVersion::ZERO, + &BlockID::default(), + 4, + 16, + &Default::default(), + &Default::default(), + ), + 0, + &txs[12..16], + ) + .unwrap(); + + db.decommission_ingest_invocation(&invoc_id1).unwrap(); + + db.add_block_data( + &invoc_id2, + &Block::new( + BlockVersion::ZERO, + &BlockID::default(), + 5, + 20, + &Default::default(), + &Default::default(), + ), + 0, + &txs[16..20], + ) + .unwrap(); + + mc_fog_view_server_test_utils::wait_for_highest_block_to_load(&db, store_servers, &logger); + // Now make some requests against view_client + + let nonsense_search_keys = vec![vec![50u8]]; + + // Query 1 should yield 4 events: + // - 1 new rng record (for invoc_id1) + // - 1 new rng record (for invoc_id2) + // - 1 missing block range + // - 1 ingest decommissioning (for invoc_id1) + let result = view_client.request(0, 0, nonsense_search_keys.clone()); + assert!(result.is_ok()); + let mut result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 2); + result + .rng_records + .sort_by_key(|rng_record| rng_record.ingest_invocation_id); + assert_eq!(result.rng_records[0].pubkey, pubkey1); + assert_eq!(result.rng_records[1].pubkey, pubkey2); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 1); + assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); + assert_eq!(result.last_known_block_count, 6); + + // Query 2 is the same as Query 1 and tests that identical queries (when no + // blocks have been added etc.) should yield identical results. + let result = view_client.request(0, 0, nonsense_search_keys.clone()); + assert!(result.is_ok()); + let mut result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 2); + result + .rng_records + .sort_by_key(|rng_record| rng_record.ingest_invocation_id); + assert_eq!(result.rng_records[0].pubkey, pubkey1); + assert_eq!(result.rng_records[1].pubkey, pubkey2); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 1); + assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); + assert_eq!(result.last_known_block_count, 6); + + // Query 3 starts at user event id 1, which skips the invoc_id1 new rng record + // event (which has a user event id of 0). + let result = view_client.request(1, 0, nonsense_search_keys.clone()); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 1); + assert_eq!(result.rng_records[0].pubkey, pubkey2); + assert_eq!(result.rng_records[0].start_block, 2); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 1); + assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); + assert_eq!(result.last_known_block_count, 6); + + // Query 4 starts at user event id 4, which skips all events. + let result = view_client.request(4, 0, nonsense_search_keys.clone()); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 0); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 0); + assert_eq!(result.last_known_block_count, 6); + + // Query 5 starts at a user event id that is much larger than the last known + // event id. This should skip all events and return this large user event + // id. + let result = view_client.request(80, 0, nonsense_search_keys).unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 80); + assert_eq!(result.rng_records.len(), 0); + assert_eq!(result.tx_out_search_results.len(), 1); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + assert_eq!( + TxOutSearchResultCode::try_from(result.tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!( + TxOutSearchResultCode::try_from(result.fixed_tx_out_search_results[0].result_code).unwrap(), + TxOutSearchResultCode::BadSearchKey + ); + assert_eq!(result.missed_block_ranges.len(), 0); + assert_eq!(result.last_known_block_count, 6); + + // Query 6 starts at user event id 4, and supplies search keys that correspond + // to TxOuts. We expect to find these TxOuts. + let result = view_client.request(4, 0, vec![vec![1u8; 16], vec![2u8; 16], vec![3u8; 16]]); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 0); + assert_eq!(result.fixed_tx_out_search_results.len(), 3); + { + let mut sort_txs = result.tx_out_search_results.clone(); + sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + assert_eq!(sort_txs[0].search_key, vec![1u8; 16]); + assert_eq!(sort_txs[0].result_code, 1); + assert_eq!(sort_txs[0].ciphertext, vec![1u8; 232]); + assert_eq!(sort_txs[0].padding, vec![0u8; 23]); + + assert_eq!(sort_txs[1].search_key, vec![2u8; 16]); + assert_eq!(sort_txs[1].result_code, 1); + assert_eq!(sort_txs[1].ciphertext, vec![2u8; 232]); + assert_eq!(sort_txs[1].padding, vec![0u8; 23]); + + assert_eq!(sort_txs[2].search_key, vec![3u8; 16]); + assert_eq!(sort_txs[2].result_code, 1); + assert_eq!(sort_txs[2].ciphertext, vec![3u8; 232]); + assert_eq!(sort_txs[2].padding, vec![0u8; 23]); + } + { + let mut sort_fixed_txs = result.fixed_tx_out_search_results.clone(); + sort_fixed_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + let expected_payload_length = 232; + + assert_eq!(sort_fixed_txs[0].search_key, vec![1u8; 16]); + assert_eq!(sort_fixed_txs[0].result_code, 1); + assert_eq!( + sort_fixed_txs[0].ciphertext[..expected_payload_length], + vec![1u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[0].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[0].payload_length as usize, + expected_payload_length + ); + + assert_eq!(sort_fixed_txs[1].search_key, vec![2u8; 16]); + assert_eq!(sort_fixed_txs[1].result_code, 1); + assert_eq!( + sort_fixed_txs[1].ciphertext[..expected_payload_length], + vec![2u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[1].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[1].payload_length as usize, + expected_payload_length + ); + + assert_eq!(sort_fixed_txs[2].search_key, vec![3u8; 16]); + assert_eq!(sort_fixed_txs[2].result_code, 1); + assert_eq!( + sort_fixed_txs[2].ciphertext[..expected_payload_length], + vec![3u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[2].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[2].payload_length as usize, + expected_payload_length + ); + } + assert_eq!(result.missed_block_ranges.len(), 0); // no range reported since we started at event id 4 + assert_eq!(result.last_known_block_count, 6); + + // Query 7 starts at user event id 4, and supplies 2 search keys that correspond + // to TxOuts and 1 search key that doesn't correspond to any TxOuts. We to + // find the TxOuts for the first 2 search keys and to not find TxOuts for + // the last search key. + let result = view_client.request(4, 0, vec![vec![5u8; 16], vec![8u8; 16], vec![200u8; 16]]); + assert!(result.is_ok()); + let result = result.unwrap(); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 0); + assert_eq!(result.fixed_tx_out_search_results.len(), 3); + { + let mut sort_txs = result.tx_out_search_results.clone(); + sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + assert_eq!(sort_txs[0].search_key, vec![5u8; 16]); + assert_eq!(sort_txs[0].result_code, 1); + assert_eq!(sort_txs[0].ciphertext, vec![5u8; 232]); + assert_eq!(sort_txs[0].padding, vec![0u8; 23]); + + assert_eq!(sort_txs[1].search_key, vec![8u8; 16]); + assert_eq!(sort_txs[1].result_code, 1); + assert_eq!(sort_txs[1].ciphertext, vec![8u8; 232]); + assert_eq!(sort_txs[1].padding, vec![0u8; 23]); + + assert_eq!(sort_txs[2].search_key, vec![200u8; 16]); + assert_eq!(sort_txs[2].result_code, 2); + assert_eq!(sort_txs[2].ciphertext, Vec::::new()); + assert_eq!(sort_txs[2].padding, vec![0u8; 255]); + } + { + let mut sort_fixed_txs = result.fixed_tx_out_search_results.clone(); + sort_fixed_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + let expected_payload_length = 232; + + assert_eq!(sort_fixed_txs[0].search_key, vec![5u8; 16]); + assert_eq!(sort_fixed_txs[0].result_code, 1); + assert_eq!( + sort_fixed_txs[0].ciphertext[..expected_payload_length], + vec![5u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[0].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[0].payload_length as usize, + expected_payload_length + ); + + assert_eq!(sort_fixed_txs[1].search_key, vec![8u8; 16]); + assert_eq!(sort_fixed_txs[1].result_code, 1); + assert_eq!( + sort_fixed_txs[1].ciphertext[..expected_payload_length], + vec![8u8; expected_payload_length] + ); + assert_eq!( + sort_fixed_txs[1].ciphertext[expected_payload_length..], + vec![0; 23] + ); + assert_eq!( + sort_fixed_txs[1].payload_length as usize, + expected_payload_length + ); + + assert_eq!(sort_fixed_txs[2].search_key, vec![200u8; 16]); + assert_eq!(sort_fixed_txs[2].result_code, 2); + assert_eq!(sort_fixed_txs[2].ciphertext, vec![0u8; 255]); + assert_eq!(sort_fixed_txs[2].payload_length, 0); + } + assert_eq!(result.missed_block_ranges.len(), 0); // no range reported since we started at event id 4 + assert_eq!(result.last_known_block_count, 6); + + // Query 8 supplies an ill-formed search key, so we expect to find that the + // TxOut that's returned indicates this. + let result = view_client.request(0, 0, vec![vec![200u8; 17]]); + assert!(result.is_ok()); + let mut result = result.unwrap(); + result + .rng_records + .sort_by_key(|rng_record| rng_record.ingest_invocation_id); + assert_eq!(result.highest_processed_block_count, 6); + assert_eq!(result.next_start_from_user_event_id, 4); + assert_eq!(result.rng_records.len(), 2); + assert_eq!(result.rng_records[0].pubkey, pubkey1); + assert_eq!(result.rng_records[1].pubkey, pubkey2); + assert_eq!(result.fixed_tx_out_search_results.len(), 1); + { + let mut sort_txs = result.tx_out_search_results.clone(); + sort_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + assert_eq!(sort_txs[0].search_key, vec![200u8; 17]); + assert_eq!(sort_txs[0].result_code, 3); + assert_eq!(sort_txs[0].ciphertext, vec![0u8; 0]); + assert_eq!(sort_txs[0].padding, vec![0u8; 255]); + } + { + let mut sort_fixed_txs = result.fixed_tx_out_search_results.clone(); + sort_fixed_txs.sort_by(|x, y| x.search_key.cmp(&y.search_key)); + + assert_eq!(sort_fixed_txs[0].search_key, vec![200u8; 17]); + assert_eq!(sort_fixed_txs[0].result_code, 3); + assert_eq!(sort_fixed_txs[0].ciphertext, vec![0u8; 255]); + assert_eq!(sort_fixed_txs[0].payload_length, 0); + } + assert_eq!(result.missed_block_ranges.len(), 1); + assert_eq!(result.missed_block_ranges[0], BlockRange::new(3, 4)); + assert_eq!(result.last_known_block_count, 6); + + // Sleep before exiting to give view server threads time to join + sleep(Duration::from_millis(1000)); +} + +/// Test that view server behaves correctly when there is some overlap between +/// two currently active ingest invocations. +#[parameterized( +one_store = { 1, 40 }, +multiple_stores = { 5, 8 }, +)] +fn test_overlapping_ingest_ranges(store_count: usize, blocks_per_store: u64) { + let (logger, _global_logger_guard) = create_app_logger(o!()); + let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); + const VIEW_OMAP_CAPACITY: u64 = 512; + let store_block_ranges = + mc_fog_view_server_test_utils::create_block_ranges(store_count, blocks_per_store); + let mut test_environment = + RouterTestEnvironment::new_unary(VIEW_OMAP_CAPACITY, store_block_ranges, logger.clone()); + let view_client = test_environment.router_unary_client.as_mut().unwrap(); + let db = test_environment + .db_test_context + .as_ref() + .unwrap() + .get_db_instance(); + let store_servers = test_environment.store_servers.as_ref().unwrap(); + + let ingress_key1 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + db.new_ingress_key(&ingress_key1, 0).unwrap(); + + let ingress_key2 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + db.new_ingress_key(&ingress_key2, 10).unwrap(); + + // invoc_id1 starts at block 0 + let invoc_id1 = db + .new_ingest_invocation(None, &ingress_key1, &random_kex_rng_pubkey(&mut rng), 0) + .unwrap(); + + // invoc_id2 starts at block 10 + let invoc_id2 = db + .new_ingest_invocation(None, &ingress_key2, &random_kex_rng_pubkey(&mut rng), 10) + .unwrap(); + + // Add 5 blocks to both invocations. This will add blocks 0-4 to invoc1 and + // blocks 10-14 to invoc2. Since we're missing blocks 5-9, we should only + // see blocks 0-4 for now. + let mut expected_records = Vec::new(); + for i in 0..5 { + let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block + db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); + expected_records.extend(records); + + let (block, records) = random_block(&mut rng, i + 10, 5); // start block is 10 + db.add_block_data(&invoc_id2, &block, 0, &records).unwrap(); + expected_records.extend(records); + } + + let block_count = 5; + mc_fog_view_server_test_utils::wait_for_block_to_load(block_count, store_servers, &logger); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, block_count); + + mc_fog_view_server_test_utils::assert_e_tx_out_records(view_client, &expected_records); + + // Give server time to process some more blocks, although it shouldn't. + sleep(Duration::from_millis(1000)); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, block_count); + + // See that we get a sane client response. + let nonsense_search_keys = vec![vec![50u8]]; + let result = view_client + .request(0, 0, nonsense_search_keys.clone()) + .unwrap(); + assert_eq!(result.highest_processed_block_count, block_count); + assert_eq!(result.last_known_block_count, 15); // The last known block is not tied to the serial processing of blocks. + + // Add blocks 5-19 to invoc_id1. This will allow us to query blocks 0-14, since + // invoc_id2 only has blocks 10-14. + for i in 5..20 { + let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block + db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); + + expected_records.extend(records); + } + + let block_count = 15; + mc_fog_view_server_test_utils::wait_for_block_to_load(block_count, store_servers, &logger); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, block_count); + + // Give server time to process some more blocks, although it shouldn't. + mc_fog_view_server_test_utils::wait_for_highest_processed_and_last_known( + view_client, + block_count, + 20, + ); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, block_count); + + // See that we get a sane client response. + let result = view_client.request(0, 0, nonsense_search_keys).unwrap(); + assert_eq!(result.highest_processed_block_count, 15); + assert_eq!(result.last_known_block_count, 20); // The last known block is not tied to the serial processing of blocks. + + // Add blocks 15-30 to invoc_id2, this should bring us to block 20. + for i in 15..30 { + let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block + db.add_block_data(&invoc_id2, &block, 0, &records).unwrap(); + expected_records.extend(records); + } + + mc_fog_view_server_test_utils::wait_for_block_to_load(20, store_servers, &logger); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, 20); + + // Give server time to process some more blocks, although it shouldn't. + sleep(Duration::from_millis(1000)); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, 20); + + // See that we get a sane client response. + mc_fog_view_server_test_utils::wait_for_highest_processed_and_last_known(view_client, 20, 30); + // Ensure all ETxOutRecords are searchable + mc_fog_view_server_test_utils::assert_e_tx_out_records(view_client, &expected_records); +} + +/// Test that view server behaves correctly when there is a missing range before +/// any ingest invocations. +#[parameterized( +one_store = { 1, 40 }, +multiple_stores = { 5, 8 }, +)] +fn test_start_with_missing_range(store_count: usize, blocks_per_store: u64) { + let (logger, _global_logger_guard) = create_app_logger(o!()); + let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); + const VIEW_OMAP_CAPACITY: u64 = 512; + let store_block_ranges = + mc_fog_view_server_test_utils::create_block_ranges(store_count, blocks_per_store); + let mut test_environment = + RouterTestEnvironment::new_unary(VIEW_OMAP_CAPACITY, store_block_ranges, logger); + let db = test_environment + .db_test_context + .as_ref() + .unwrap() + .get_db_instance(); + let store_servers = test_environment.store_servers.as_ref().unwrap(); + let view_client = test_environment.router_unary_client.as_mut().unwrap(); + + let ingress_key = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + db.new_ingress_key(&ingress_key, 5).unwrap(); + + // invoc_id1 starts at block 0, but the initial blocks reported are 10-15 + let invoc_id1 = db + .new_ingest_invocation(None, &ingress_key, &random_kex_rng_pubkey(&mut rng), 5) + .unwrap(); + + // Add 5 blocks to invoc_id1. + let mut expected_records = Vec::new(); + for i in 10..15 { + let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block + db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); + expected_records.extend(records); + } + + // Give server time to process some more blocks, although it shouldn't. + sleep(Duration::from_millis(1000)); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, 0); + + mc_fog_view_server_test_utils::wait_for_highest_processed_and_last_known(view_client, 0, 0); + + // Adding the first 5 blocks that were the gap + for i in 5..10 { + let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block + db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); + expected_records.extend(records); + } + + mc_fog_view_server_test_utils::wait_for_highest_processed_and_last_known(view_client, 15, 15); + + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, 15); + + mc_fog_view_server_test_utils::assert_e_tx_out_records(view_client, &expected_records); +} + +/// Test that view server behaves correctly when there is a missing range +/// between two ingest invocations. +#[parameterized( +one_store = { 1, 40 }, +multiple_stores = { 5, 8 }, +)] +fn test_middle_missing_range_with_decommission(store_count: usize, blocks_per_store: u64) { + let (logger, _global_logger_guard) = create_app_logger(o!()); + let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); + const VIEW_OMAP_CAPACITY: u64 = 512; + let store_block_ranges = + mc_fog_view_server_test_utils::create_block_ranges(store_count, blocks_per_store); + let mut test_environment = + RouterTestEnvironment::new_unary(VIEW_OMAP_CAPACITY, store_block_ranges, logger); + let db = test_environment + .db_test_context + .as_ref() + .unwrap() + .get_db_instance(); + let store_servers = test_environment.store_servers.as_ref().unwrap(); + let view_client = test_environment.router_unary_client.as_mut().unwrap(); + + let ingress_key1 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + db.new_ingress_key(&ingress_key1, 0).unwrap(); + db.set_report( + &ingress_key1, + "", + &ReportData { + pubkey_expiry: 10, + ingest_invocation_id: None, + report: Default::default(), + }, + ) + .unwrap(); + + let ingress_key2 = CompressedRistrettoPublic::from(RistrettoPublic::from_random(&mut rng)); + db.new_ingress_key(&ingress_key2, 10).unwrap(); + + // invoc_id1 starts at block 0 + let invoc_id1 = db + .new_ingest_invocation(None, &ingress_key1, &random_kex_rng_pubkey(&mut rng), 0) + .unwrap(); + + // Add 5 blocks to invoc_id1. + let mut expected_records = Vec::new(); + for i in 0..5 { + let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block + db.add_block_data(&invoc_id1, &block, 0, &records).unwrap(); + expected_records.extend(records); + } + + // At this point we should be at highest processed block 5, and highest known 5, + // because ingress key 2 doesn't start until 10, and doesn't have any blocks + // associated to it yet. + mc_fog_view_server_test_utils::wait_for_highest_processed_and_last_known(view_client, 5, 5); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, 5); + + // Ingress key 1 is lost + db.report_lost_ingress_key(ingress_key1).unwrap(); + assert_eq!( + db.get_missed_block_ranges().unwrap(), + vec![BlockRange { + start_block: 5, + end_block: 10 + }] + ); + + // invoc_id2 starts at block 10 + let invoc_id2 = db + .new_ingest_invocation(None, &ingress_key2, &random_kex_rng_pubkey(&mut rng), 10) + .unwrap(); + + // Add 5 blocks to invoc_id2. + for i in 10..15 { + let (block, records) = random_block(&mut rng, i, 5); // 5 outputs per block + db.add_block_data(&invoc_id2, &block, 0, &records).unwrap(); + expected_records.extend(records); + } + + // At this point invoc_id1 is marked lost, so we should be at highest processed + // block 15 and the last known block should be 15. + mc_fog_view_server_test_utils::wait_for_highest_processed_and_last_known(view_client, 15, 15); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, 15); + + // Decommissioning invoc_id1 should allow us to advance to the last block + // invoc_id2 has processed. + db.decommission_ingest_invocation(&invoc_id1).unwrap(); + + mc_fog_view_server_test_utils::wait_for_highest_processed_and_last_known(view_client, 15, 15); + let highest_processed_block_count = + mc_fog_view_server_test_utils::get_highest_processed_block_count(store_servers); + assert_eq!(highest_processed_block_count, 15); + + mc_fog_view_server_test_utils::assert_e_tx_out_records(view_client, &expected_records); +} diff --git a/tools/fog-local-network/fog_conformance_tests.py b/tools/fog-local-network/fog_conformance_tests.py index 9709e3030b..b33cf7d184 100755 --- a/tools/fog-local-network/fog_conformance_tests.py +++ b/tools/fog-local-network/fog_conformance_tests.py @@ -389,7 +389,9 @@ def __init__(self, work_dir, args): self.fog_nginx = None self.fog_ingest = None self.fog_ingest2 = None - self.fog_view = None + self.fog_view_router = None + # TODO: Add more fog view instances with sharding. + self.fog_view_stores = None self.fog_ledger = None self.fog_report = None self.multi_balance_checker = None @@ -476,15 +478,39 @@ def run(self, args): ) self.fog_ingest.start() - self.fog_view = FogView( + fog_view_store_1 = FogViewStore( name = 'view1', + client_port = BASE_VIEW_STORE_1_PORT, + admin_port = BASE_VIEW_STORE_1_ADMIN_PORT, + admin_http_gateway_port = BASE_VIEW_STORE_1_ADMIN_HTTP_GATEWAY_PORT, + release = self.release, + sharding_strategy= '0-6' + ) + fog_view_store_2 = FogViewStore( + name = 'view2', + client_port = BASE_VIEW_STORE_2_PORT, + admin_port = BASE_VIEW_STORE_2_ADMIN_PORT, + admin_http_gateway_port = BASE_VIEW_STORE_2_ADMIN_HTTP_GATEWAY_PORT, + release = self.release, + sharding_strategy= '5-12' + ) + self.fog_view_stores = [fog_view_store_1, fog_view_store_2] + for store in self.fog_view_stores: + store.start() + + + client_listen_uris = list(map(lambda x: x.get_client_listen_uri(), self.fog_view_stores)) + + self.fog_view_router = FogViewRouter( + name = 'router1', client_responder_id = f'localhost:{BASE_NGINX_CLIENT_PORT}', client_port = BASE_VIEW_CLIENT_PORT, admin_port = BASE_VIEW_ADMIN_PORT, admin_http_gateway_port = BASE_VIEW_ADMIN_HTTP_GATEWAY_PORT, release = self.release, + shard_uris = client_listen_uris ) - self.fog_view.start() + self.fog_view_router.start() self.fog_ledger = FogLedger( name = 'ledger_server1', @@ -897,10 +923,10 @@ def run(self, args): # Test what happens when we restart the view server ####################################################################### - # Restarting the view server should not impact things. - print("Restarting fog view server") - self.fog_view.stop() - self.fog_view.start() + # Restarting the view store should not impact things. + print("Restarting fog view store 1") + self.fog_view_stores[0].stop() + self.fog_view_stores[0].start() time.sleep(10 if self.release else 30) # We will encounter 0: 0 while we wait for the view server to come up. @@ -1022,8 +1048,11 @@ def stop(self): if self.fog_report: self.fog_report.stop() - if self.fog_view: - self.fog_view.stop() + if self.fog_view_router: + self.fog_view_router.stop() + + for store in self.fog_view_stores: + store.stop() if self.fog_ingest: self.fog_ingest.stop() diff --git a/tools/fog-local-network/fog_local_network.py b/tools/fog-local-network/fog_local_network.py index cb42478e27..9ef4db3241 100644 --- a/tools/fog-local-network/fog_local_network.py +++ b/tools/fog-local-network/fog_local_network.py @@ -91,15 +91,40 @@ def start(self): ) self.fog_ingest.start() - self.fog_view = FogView( - 'view1', - f'localhost:{BASE_NGINX_CLIENT_PORT}', - BASE_VIEW_CLIENT_PORT, - BASE_VIEW_ADMIN_PORT, - BASE_VIEW_ADMIN_HTTP_GATEWAY_PORT, - release=True, + fog_view_store_1 = FogViewStore( + name = 'view1', + client_port = BASE_VIEW_STORE_1_PORT, + admin_port = BASE_VIEW_STORE_1_ADMIN_PORT, + admin_http_gateway_port = BASE_VIEW_STORE_1_ADMIN_HTTP_GATEWAY_PORT, + release = True, + # Todo: see if we need to reconfigure this... + sharding_strategy= '0-6' + ) + fog_view_store_2 = FogViewStore( + name = 'view2', + client_port = BASE_VIEW_STORE_2_PORT, + admin_port = BASE_VIEW_STORE_2_ADMIN_PORT, + admin_http_gateway_port = BASE_VIEW_STORE_2_ADMIN_HTTP_GATEWAY_PORT, + release = True, + # Todo: see if we need to reconfigure this... + sharding_strategy= '5-12' + ) + + self.fog_view_stores = [fog_view_store_1, fog_view_store_2] + for store in self.fog_view_stores: + store.start() + client_listen_uris = list(map(lambda x: x.get_client_listen_uri(), self.fog_view_stores)) + + self.fog_view_router = FogViewRouter( + name = 'router1', + client_responder_id = f'localhost:{BASE_NGINX_CLIENT_PORT}', + client_port = BASE_VIEW_CLIENT_PORT, + admin_port = BASE_VIEW_ADMIN_PORT, + admin_http_gateway_port = BASE_VIEW_ADMIN_HTTP_GATEWAY_PORT, + release = True, + shard_uris = client_listen_uris ) - self.fog_view.start() + self.fog_view_router.start() self.fog_report = FogReport( 'report1', @@ -142,7 +167,9 @@ def stop_server(name): stop_server("fog_ledger") stop_server("fog_report") - stop_server("fog_view") + stop_server("fog_view_store_1") + stop_server("fog_view_store_2") + stop_server("fog_view_router") stop_server("fog_ingest") if __name__ == '__main__': diff --git a/tools/fog-local-network/local_fog.py b/tools/fog-local-network/local_fog.py index 36696a523c..8e2c3a6e2d 100644 --- a/tools/fog-local-network/local_fog.py +++ b/tools/fog-local-network/local_fog.py @@ -14,6 +14,12 @@ BASE_VIEW_CLIENT_PORT = 5200 BASE_VIEW_ADMIN_PORT = 5400 BASE_VIEW_ADMIN_HTTP_GATEWAY_PORT = 5500 +BASE_VIEW_STORE_1_PORT = 5600 +BASE_VIEW_STORE_2_PORT = 5601 +BASE_VIEW_STORE_1_ADMIN_PORT = 5700 +BASE_VIEW_STORE_2_ADMIN_PORT = 5701 +BASE_VIEW_STORE_1_ADMIN_HTTP_GATEWAY_PORT = 5800 +BASE_VIEW_STORE_2_ADMIN_HTTP_GATEWAY_PORT = 5801 BASE_REPORT_CLIENT_PORT = 6200 BASE_REPORT_ADMIN_PORT = 6400 @@ -172,17 +178,71 @@ def retire(self): def report_lost_ingress_key(self, lost_key): return self.run_client_command(f'report-lost-ingress-key -k "{lost_key}"') -class FogView: - def __init__(self, name, client_responder_id, client_port, admin_port, admin_http_gateway_port, release): + +class FogViewRouter: + def __init__(self, name, client_responder_id, client_port, admin_port, admin_http_gateway_port, shard_uris, release): self.name = name self.client_responder_id = client_responder_id self.client_port = client_port + # Use the unary API for now. self.client_listen_url = f'insecure-fog-view://{LISTEN_HOST}:{self.client_port}/' self.admin_port = admin_port self.admin_http_gateway_port = admin_http_gateway_port + self.shard_uris = shard_uris + + self.release = release + self.target_dir = target_dir(self.release) + + self.view_router_process = None + self.admin_http_gateway_process = None + + def __repr__(self): + return self.name + + def start(self): + self.stop() + + print(f'Starting fog view router {self.name}') + cmd = ' '.join([ + DATABASE_URL_ENV, + f'exec {self.target_dir}/fog_view_router', + f'--client-listen-uri={self.client_listen_url}', + f'--client-responder-id={self.client_responder_id}', + f'--ias-api-key={IAS_API_KEY}', + f'--shard-uris={",".join(self.shard_uris)}', + f'--ias-spid={IAS_SPID}', + f'--admin-listen-uri=insecure-mca://{LISTEN_HOST}:{self.admin_port}/', + ]) + self.view_router_process = log_and_popen_shell(cmd) + + print(f'Starting admin http gateway for fog view router') + self.admin_http_gateway_process = start_admin_http_gateway(self.admin_http_gateway_port, self.admin_port, self.target_dir) + + def stop(self): + if self.view_router_process and self.view_router_process.poll() is None: + self.view_router_process.terminate() + self.view_router_process = None + + if self.admin_http_gateway_process and self.admin_http_gateway_process.poll() is None: + self.admin_http_gateway_process.terminate() + self.admin_http_gateway_process = None + +class FogViewStore: + def __init__(self, name, client_port, admin_port, admin_http_gateway_port, release, sharding_strategy): + self.name = name + + self.client_port = client_port + self.client_responder_id = f'{LISTEN_HOST}:{self.client_port}' + self.sharding_strategy = sharding_strategy + self.client_listen_url = f'insecure-fog-view-store://{LISTEN_HOST}:{self.client_port}/?sharding_strategy={self.sharding_strategy}' + self.sharding_strategy = sharding_strategy + + self.admin_port = admin_port + self.admin_http_gateway_port = admin_http_gateway_port + self.release = release self.target_dir = target_dir(self.release) @@ -192,15 +252,19 @@ def __init__(self, name, client_responder_id, client_port, admin_port, admin_htt def __repr__(self): return self.name + def get_client_listen_uri(self): + return self.client_listen_url + def start(self): self.stop() - print(f'Starting fog view {self.name}') + print(f'Starting fog view store {self.name}') cmd = ' '.join([ DATABASE_URL_ENV, f'exec {self.target_dir}/fog_view_server', f'--client-listen-uri={self.client_listen_url}', f'--client-responder-id={self.client_responder_id}', + f'--sharding-strategy={self.sharding_strategy}', f'--ias-api-key={IAS_API_KEY}', f'--ias-spid={IAS_SPID}', f'--admin-listen-uri=insecure-mca://{LISTEN_HOST}:{self.admin_port}/', diff --git a/tools/fog-local-network/requirements.txt b/tools/fog-local-network/requirements.txt index 2b790e68da..06b4f17d14 100644 --- a/tools/fog-local-network/requirements.txt +++ b/tools/fog-local-network/requirements.txt @@ -1,2 +1,2 @@ -grpcio==1.36.1 -grpcio-tools==1.36.1 +grpcio==1.51.3 +grpcio-tools==1.51.3 diff --git a/util/grpc/src/lib.rs b/util/grpc/src/lib.rs index c0216af9b5..e7a3d8761c 100644 --- a/util/grpc/src/lib.rs +++ b/util/grpc/src/lib.rs @@ -84,11 +84,9 @@ pub fn send_result( logger: &Logger, ) { let logger = logger.clone(); - let success = resp.is_ok(); - let code = match &resp { - Ok(_) => RpcStatusCode::OK, - Err(e) => e.code(), - }; + let response_status = ResponseStatus::from(&resp); + let is_success = response_status.is_success; + let code = response_status.code; match resp { Ok(ok) => ctx.spawn( @@ -103,10 +101,31 @@ pub fn send_result( ), } - SVC_COUNTERS.resp(&ctx, success); + SVC_COUNTERS.resp(&ctx, is_success); SVC_COUNTERS.status_code(&ctx, code); } +/// Helper struct that provides information related to a gRPC response. +pub struct ResponseStatus { + /// True if the gRPC response is ok. + pub is_success: bool, + + /// RpcStatusCode that corresponds to the response. + pub code: RpcStatusCode, +} + +impl From<&Result> for ResponseStatus { + fn from(response: &Result) -> Self { + let is_success = response.is_ok(); + let code = match response { + Ok(_) => RpcStatusCode::OK, + Err(e) => e.code(), + }; + + Self { is_success, code } + } +} + macro_rules! report_err_with_code( ($context:expr, $err:expr, $code:expr, $logger:expr, $log_level:expr) => {{ let err_str = format!("{}: {}", $context, $err); diff --git a/util/metrics/src/lib.rs b/util/metrics/src/lib.rs index 1bf8fa9d53..bf94ac94e3 100644 --- a/util/metrics/src/lib.rs +++ b/util/metrics/src/lib.rs @@ -19,4 +19,4 @@ pub use prometheus::{ register, register_histogram, Histogram, HistogramOpts, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec, Opts, }; -pub use service_metrics::ServiceMetrics; +pub use service_metrics::{GrpcMethodName, ServiceMetrics}; diff --git a/util/metrics/src/service_metrics.rs b/util/metrics/src/service_metrics.rs index f2c068d46f..42dfb7d063 100644 --- a/util/metrics/src/service_metrics.rs +++ b/util/metrics/src/service_metrics.rs @@ -47,6 +47,10 @@ use std::{path::Path, str}; /// from GRPC context. /// e.g., calc_service.req{method = "add"} = +1 /// e.g., calc_service.duration_sum{method="add"} = 6 + +/// Corresponds to which gRPC method is being called. +pub type GrpcMethodName = String; + #[derive(Clone)] pub struct ServiceMetrics { /// Count of requests made by each gRPC method tracked @@ -141,11 +145,13 @@ impl ServiceMetrics { /// name and increments counters tracking the number of calls to and /// returns a counter to track the duration of the method pub fn req(&self, ctx: &RpcContext) -> Option { - let mut method_name = "unknown_method".to_string(); - if let Some(name) = path_from_ctx(ctx) { - method_name = name; - } + let method_name = Self::get_method_name(ctx); + self.req_impl(&method_name) + } + /// Increments counters tracking the number of calls to and + /// returns a counter to track the duration of the method + pub fn req_impl(&self, method_name: &GrpcMethodName) -> Option { self.num_req .with_label_values(&[method_name.as_str()]) .inc(); @@ -156,26 +162,40 @@ impl ServiceMetrics { ) } + /// Gets the method name from a gRPC RpcContext. + pub fn get_method_name(ctx: &RpcContext) -> GrpcMethodName { + match path_from_ctx(ctx) { + Some(method_name) => method_name, + None => "unknown_method".to_string(), + } + } + /// Takes the RpcContext used during a gRPC method call to get the method /// name and increments an error counter if the method resulted in an /// error pub fn resp(&self, ctx: &RpcContext, success: bool) { - if let Some(name) = path_from_ctx(ctx) { - self.num_error - .with_label_values(&[name.as_str()]) - .inc_by(if success { 0 } else { 1 }); - } + let method_name = Self::get_method_name(ctx); + self.resp_impl(&method_name, success); + } + + pub fn resp_impl(&self, method_name: &GrpcMethodName, success: bool) { + self.num_error + .with_label_values(&[method_name.as_str()]) + .inc_by(if success { 0 } else { 1 }); } /// Takes the RpcContext used during a gRPC method call to get the method /// name as well as the gRPC status code that method returned and /// increments a counter for the status code reported pub fn status_code(&self, ctx: &RpcContext, response_code: RpcStatusCode) { - if let Some(name) = path_from_ctx(ctx) { - self.num_status_code - .with_label_values(&[name.as_str(), response_code.to_string().as_str()]) - .inc(); - } + let method_name = Self::get_method_name(ctx); + self.status_code_impl(&method_name, response_code); + } + + pub fn status_code_impl(&self, method_name: &GrpcMethodName, response_code: RpcStatusCode) { + self.num_status_code + .with_label_values(&[method_name.as_str(), response_code.to_string().as_str()]) + .inc(); } /// Tracks gRPC message name and size for aggregation into a Prometheus diff --git a/util/telemetry/src/lib.rs b/util/telemetry/src/lib.rs index 24a9914499..d6cc2278a8 100644 --- a/util/telemetry/src/lib.rs +++ b/util/telemetry/src/lib.rs @@ -3,12 +3,13 @@ //! OpenTelemetry wrappers and helper utilities. pub use opentelemetry::{ - trace::{mark_span_as_active, Span, SpanKind, TraceContextExt, Tracer}, + global::BoxedTracer, + trace::{mark_span_as_active, FutureExt, Span, SpanKind, TraceContextExt, Tracer}, Context, Key, }; use opentelemetry::{ - global::{tracer_provider, BoxedTracer}, + global::tracer_provider, trace::{SpanBuilder, TraceId, TracerProvider}, }; use std::borrow::Cow; @@ -50,6 +51,16 @@ pub fn versioned_tracer( tracer_provider().versioned_tracer(name, version, schema_url) } +/// Creates a context when an explicit context is required. Useful when tracing +/// inside an `async` method. +pub fn create_context(tracer: &BoxedTracer, name: T) -> Context +where + T: Into>, +{ + let span = tracer.start(name); + Context::current_with_span(span) +} + /// A utility method to create a predictable trace ID out of a block index. /// This is used to group traces by block index. pub fn block_index_to_trace_id(block_index: u64) -> TraceId { diff --git a/util/uri/src/uri.rs b/util/uri/src/uri.rs index 2cc23b57cb..501a8dd8cb 100644 --- a/util/uri/src/uri.rs +++ b/util/uri/src/uri.rs @@ -2,6 +2,7 @@ use crate::traits::{ConnectionUri, UriScheme}; use displaydoc::Display; +use mc_common::ResponderId; use percent_encoding::percent_decode_str; use std::{ fmt::{Display, Formatter, Result as FmtResult}, @@ -90,6 +91,22 @@ impl ConnectionUri for Uri { } } +impl Uri { + /// Creates a `Uri` from a `ResponderId` + pub fn try_from_responder_id( + responder_id: ResponderId, + use_tls: bool, + ) -> Result { + let scheme = match use_tls { + true => Scheme::SCHEME_SECURE, + false => Scheme::SCHEME_INSECURE, + }; + let uri_string = format!("{scheme}://{responder_id}"); + + Self::from_str(&uri_string) + } +} + impl Display for Uri { fn fmt(&self, f: &mut Formatter) -> FmtResult { let scheme = if self.use_tls {