diff --git a/Cargo.toml b/Cargo.toml index f508d3d3a..61af74618 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,5 +9,5 @@ members = [ resolver = "2" [workspace.dependencies] -miden-crypto = { git = "https://github.com/0xPolygonMiden/crypto", branch = "next", default-features = false, features = ["std"] } +miden-crypto = { package = "miden-crypto", git = "https://github.com/0xPolygonMiden/crypto.git", branch = "next", features = ["std"] } miden_objects = { package = "miden-objects", git = "https://github.com/0xPolygonMiden/miden-base.git", branch = "main", default-features = false } diff --git a/proto/Cargo.toml b/proto/Cargo.toml index 61047711e..f8b63a2e4 100644 --- a/proto/Cargo.toml +++ b/proto/Cargo.toml @@ -12,6 +12,7 @@ rust-version = "1.67" [dependencies] hex = { version = "0.4" } miden-crypto = { workspace = true } +miden_objects = { workspace = true } prost = { version = "0.12" } tonic = { version = "0.10" } diff --git a/proto/proto/account_id.proto b/proto/proto/account_id.proto new file mode 100644 index 000000000..5c68880c9 --- /dev/null +++ b/proto/proto/account_id.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package account_id; + +message AccountId { + // A miden account is defined with a little bit of proof-of-work, the id itself + // is defined as the first word of a hash digest. For this reason account ids + // can be considered as random values, because of that the encoding bellow uses + // fixed 64 bits, instead of zig-zag encoding. + fixed64 id = 1; +} diff --git a/proto/proto/block_header.proto b/proto/proto/block_header.proto index 89d02084f..341aafadb 100644 --- a/proto/proto/block_header.proto +++ b/proto/proto/block_header.proto @@ -4,24 +4,24 @@ package block_header; import "digest.proto"; message BlockHeader { - /// the hash of the previous blocks header. + // the hash of the previous blocks header. digest.Digest prev_hash = 1; - /// a unique sequential number of the current block. + // a unique sequential number of the current block. uint32 block_num = 2; - /// a commitment to an MMR of the entire chain where each block is a leaf. + // a commitment to an MMR of the entire chain where each block is a leaf. digest.Digest chain_root = 3; - /// a commitment to account database. + // a commitment to account database. digest.Digest account_root = 4; - /// a commitment to the nullifier database. + // a commitment to the nullifier database. digest.Digest nullifier_root = 5; - /// a commitment to all notes created in the current block. + // a commitment to all notes created in the current block. digest.Digest note_root = 6; - /// a commitment to a set of transaction batches executed as a part of this block. + // a commitment to a set of transaction batches executed as a part of this block. digest.Digest batch_root = 7; - /// a hash of a STARK proof attesting to the correct state transition. + // a hash of a STARK proof attesting to the correct state transition. digest.Digest proof_hash = 8; - /// specifies the version of the protocol. + // specifies the version of the protocol. uint32 version = 9; - /// the time when the block was created. + // the time when the block was created. uint64 timestamp = 10; } diff --git a/proto/proto/merkle.proto b/proto/proto/merkle.proto new file mode 100644 index 000000000..abded7231 --- /dev/null +++ b/proto/proto/merkle.proto @@ -0,0 +1,8 @@ +syntax = "proto3"; +package merkle; + +import "digest.proto"; + +message MerklePath { + repeated digest.Digest siblings = 1; +} diff --git a/proto/proto/mmr.proto b/proto/proto/mmr.proto new file mode 100644 index 000000000..baaced2c9 --- /dev/null +++ b/proto/proto/mmr.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; +package mmr; + +import "digest.proto"; + +message MmrDelta { + uint64 forest = 1; + repeated digest.Digest data = 2; +} diff --git a/proto/proto/note.proto b/proto/proto/note.proto new file mode 100644 index 000000000..13980b817 --- /dev/null +++ b/proto/proto/note.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package note; + +import "digest.proto"; +import "merkle.proto"; + +message Note { + uint32 block_num = 1; + uint32 note_index = 2; + digest.Digest note_hash = 3; + uint64 sender = 4; + uint64 tag = 5; + uint32 num_assets = 6; + merkle.MerklePath merkle_path = 7; +} diff --git a/proto/proto/requests.proto b/proto/proto/requests.proto new file mode 100644 index 000000000..f9b98e787 --- /dev/null +++ b/proto/proto/requests.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; +package requests; + +import "account_id.proto"; +import "digest.proto"; + +message CheckNullifiersRequest { + repeated digest.Digest nullifiers = 1; +} + +message FetchBlockHeaderByNumberRequest { + // The block number of the target block. + // + // If not provided, means latest know block. + optional uint32 block_num = 1; +} + +// State synchronization request. +message SyncStateRequest { + // Send updates to the client starting at this block. + uint32 block_num = 1; + + repeated account_id.AccountId account_ids = 2; + + // Tags and nullifiers are filters, both filters correspond to the high + // 16bits of the real values shifted to the right `>> 48`. + repeated uint32 note_tags = 3; + repeated uint32 nullifiers = 4; +} diff --git a/proto/proto/responses.proto b/proto/proto/responses.proto new file mode 100644 index 000000000..a73c8327e --- /dev/null +++ b/proto/proto/responses.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; +package responses; + +import "account_id.proto"; +import "block_header.proto"; +import "digest.proto"; +import "merkle.proto"; +import "mmr.proto"; +import "tsmt.proto"; + +message CheckNullifiersResponse { + // Each requested nullifier has its corresponding nullifier proof at the + // same position. + repeated tsmt.NullifierProof proofs = 1; +} + +message FetchBlockHeaderByNumberResponse { + block_header.BlockHeader block_header = 1; +} + +message AccountHashUpdate { + account_id.AccountId account_id = 1; + digest.Digest account_hash = 2; + uint32 block_num = 3; +} + +message NullifierUpdate { + digest.Digest nullifier = 1; + uint32 block_num = 2; +} + +message NoteSyncRecord { + uint32 note_index = 2; + digest.Digest note_hash = 3; + uint64 sender = 4; + uint64 tag = 5; + uint32 num_assets = 6; + merkle.MerklePath merkle_path = 7; +} + +message SyncStateResponse { + // number of the latest block in the chain + uint32 chain_tip = 1; + + // block header of the block with the first note matching the specified criteria + block_header.BlockHeader block_header = 2; + + // data needed to update the partial MMR from `block_ref` to `block_header.block_num` + mmr.MmrDelta mmr_delta = 3; + + // Merkle path in the updated chain MMR to the block at `block_header.block_num` + merkle.MerklePath block_path = 4; + + // a list of account hashes updated after `block_ref` but not after `block_header.block_num` + repeated AccountHashUpdate accounts = 5; + + // a list of all notes together with the Merkle paths from `block_header.note_root` + repeated NoteSyncRecord notes = 6; + + // a list of nullifiers created between `block_ref` and `block_header.block_num` + repeated NullifierUpdate nullifiers = 7; +} diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index be92a4b7c..ccaf04936 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -2,32 +2,11 @@ syntax = "proto3"; package rpc; -import "block_header.proto"; -import "digest.proto"; -import "tsmt.proto"; - -message CheckNullifiersRequest { - repeated digest.Digest nullifiers = 1; -} - -message CheckNullifiersResponse { - // Each requested nullifier has its corresponding nullifier proof at the - // same position. - repeated tsmt.NullifierProof proofs = 1; -} - -message FetchBlockHeaderByNumberRequest { - // The block number of the target block. - // - // If not provided, means latest know block. - optional uint64 block_num = 1; -} - -message FetchBlockHeaderByNumberResponse { - block_header.BlockHeader block_header = 1; -} +import "requests.proto"; +import "responses.proto"; service Api { - rpc CheckNullifiers(CheckNullifiersRequest) returns (CheckNullifiersResponse) {} - rpc FetchBlockHeaderByNumber(FetchBlockHeaderByNumberRequest) returns (FetchBlockHeaderByNumberResponse) {} + rpc CheckNullifiers(requests.CheckNullifiersRequest) returns (responses.CheckNullifiersResponse) {} + rpc FetchBlockHeaderByNumber(requests.FetchBlockHeaderByNumberRequest) returns (responses.FetchBlockHeaderByNumberResponse) {} + rpc SyncState(requests.SyncStateRequest) returns (responses.SyncStateResponse) {} } diff --git a/proto/proto/store.proto b/proto/proto/store.proto index 3f284b72f..55a9ebcea 100644 --- a/proto/proto/store.proto +++ b/proto/proto/store.proto @@ -4,32 +4,11 @@ syntax = "proto3"; package store; -import "block_header.proto"; -import "digest.proto"; -import "tsmt.proto"; - -message CheckNullifiersRequest { - repeated digest.Digest nullifiers = 1; -} - -message CheckNullifiersResponse { - // Each requested nullifier has its corresponding nullifier proof at the - // same position. - repeated tsmt.NullifierProof proofs = 1; -} - -message FetchBlockHeaderByNumberRequest { - // The block number of the target block. - // - // If not provided, means latest know block. - optional uint64 block_num = 1; -} - -message FetchBlockHeaderByNumberResponse { - block_header.BlockHeader block_header = 1; -} +import "requests.proto"; +import "responses.proto"; service Api { - rpc CheckNullifiers(CheckNullifiersRequest) returns (CheckNullifiersResponse) {} - rpc FetchBlockHeaderByNumber(FetchBlockHeaderByNumberRequest) returns (FetchBlockHeaderByNumberResponse) {} + rpc CheckNullifiers(requests.CheckNullifiersRequest) returns (responses.CheckNullifiersResponse) {} + rpc FetchBlockHeaderByNumber(requests.FetchBlockHeaderByNumberRequest) returns (responses.FetchBlockHeaderByNumberResponse) {} + rpc SyncState(requests.SyncStateRequest) returns (responses.SyncStateResponse) {} } diff --git a/proto/src/conversion.rs b/proto/src/conversion.rs index a679b4ca5..7ce16e4ee 100644 --- a/proto/src/conversion.rs +++ b/proto/src/conversion.rs @@ -1,13 +1,10 @@ -use crate::digest; -use crate::error; -use crate::tsmt; -use miden_crypto::hash::rpo::RpoDigest; -use miden_crypto::merkle::MerklePath; -use miden_crypto::merkle::TieredSmtProof; -use miden_crypto::Felt; -use miden_crypto::FieldElement; -use miden_crypto::StarkField; -use miden_crypto::Word; +use crate::{account_id, block_header, digest, error, merkle, mmr, note, responses, tsmt}; +use miden_crypto::{ + hash::rpo::RpoDigest, + merkle::{MerklePath, MmrDelta, TieredSmtProof}, + Felt, FieldElement, StarkField, Word, +}; +use miden_objects::BlockHeader; impl From<[u64; 4]> for digest::Digest { fn from(value: [u64; 4]) -> Self { @@ -117,3 +114,87 @@ impl TryFrom<&digest::Digest> for RpoDigest { value.clone().try_into() } } + +impl TryFrom for BlockHeader { + type Error = error::ParseError; + + fn try_from(value: block_header::BlockHeader) -> Result { + Ok(BlockHeader::new( + value.prev_hash.ok_or(error::ParseError::ProtobufMissingData)?.try_into()?, + value.block_num.into(), + value.chain_root.ok_or(error::ParseError::ProtobufMissingData)?.try_into()?, + value.account_root.ok_or(error::ParseError::ProtobufMissingData)?.try_into()?, + value.nullifier_root.ok_or(error::ParseError::ProtobufMissingData)?.try_into()?, + value.note_root.ok_or(error::ParseError::ProtobufMissingData)?.try_into()?, + value.batch_root.ok_or(error::ParseError::ProtobufMissingData)?.try_into()?, + value.proof_hash.ok_or(error::ParseError::ProtobufMissingData)?.try_into()?, + value.version.into(), + value.timestamp.into(), + )) + } +} + +impl TryFrom<&block_header::BlockHeader> for BlockHeader { + type Error = error::ParseError; + + fn try_from(value: &block_header::BlockHeader) -> Result { + value.clone().try_into() + } +} + +impl TryFrom for MmrDelta { + type Error = error::ParseError; + + fn try_from(value: mmr::MmrDelta) -> Result { + let data: Result, error::ParseError> = + value.data.into_iter().map(|v| v.try_into()).collect(); + + Ok(MmrDelta { + forest: value.forest as usize, + data: data?, + }) + } +} + +impl From for mmr::MmrDelta { + fn from(value: MmrDelta) -> Self { + let data: Vec = value.data.into_iter().map(|v| v.into()).collect(); + + mmr::MmrDelta { + forest: value.forest as u64, + data, + } + } +} + +impl From for merkle::MerklePath { + fn from(value: MerklePath) -> Self { + let siblings: Vec = value.nodes().iter().map(|v| (*v).into()).collect(); + merkle::MerklePath { siblings } + } +} + +impl From for responses::NoteSyncRecord { + fn from(value: note::Note) -> Self { + Self { + note_index: value.note_index, + note_hash: value.note_hash, + sender: value.sender, + tag: value.tag, + num_assets: value.num_assets, + merkle_path: value.merkle_path, + } + } +} + +impl From for u64 { + fn from(value: account_id::AccountId) -> Self { + value.id + } +} + +impl From for account_id::AccountId { + fn from(value: u64) -> Self { + account_id::AccountId { id: value } + } +} diff --git a/proto/src/error.rs b/proto/src/error.rs index d6062eafa..31c542314 100644 --- a/proto/src/error.rs +++ b/proto/src/error.rs @@ -6,6 +6,7 @@ pub enum ParseError { MissingLeafKey, NotAValidFelt, InvalidProof, + ProtobufMissingData, } impl std::error::Error for ParseError {} @@ -32,6 +33,7 @@ impl std::fmt::Display for ParseError { ParseError::InvalidProof => { write!(f, "Received TSMT proof is invalid") }, + ParseError::ProtobufMissingData => write!(f, "Protobuf message missing data"), } } } diff --git a/proto/src/generated/account_id.rs b/proto/src/generated/account_id.rs new file mode 100644 index 000000000..46a43c396 --- /dev/null +++ b/proto/src/generated/account_id.rs @@ -0,0 +1,11 @@ +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountId { + /// A miden account is defined with a little bit of proof-of-work, the id itself + /// is defined as the first word of a hash digest. For this reason account ids + /// can be considered as random values, because of that the encoding bellow uses + /// fixed 64 bits, instead of zig-zag encoding. + #[prost(fixed64, tag = "1")] + pub id: u64, +} diff --git a/proto/src/generated/block_header.rs b/proto/src/generated/block_header.rs index ba43af683..ae8cf4d37 100644 --- a/proto/src/generated/block_header.rs +++ b/proto/src/generated/block_header.rs @@ -2,34 +2,34 @@ #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockHeader { - /// / the hash of the previous blocks header. + /// the hash of the previous blocks header. #[prost(message, optional, tag = "1")] pub prev_hash: ::core::option::Option, - /// / a unique sequential number of the current block. + /// a unique sequential number of the current block. #[prost(uint32, tag = "2")] pub block_num: u32, - /// / a commitment to an MMR of the entire chain where each block is a leaf. + /// a commitment to an MMR of the entire chain where each block is a leaf. #[prost(message, optional, tag = "3")] pub chain_root: ::core::option::Option, - /// / a commitment to account database. + /// a commitment to account database. #[prost(message, optional, tag = "4")] pub account_root: ::core::option::Option, - /// / a commitment to the nullifier database. + /// a commitment to the nullifier database. #[prost(message, optional, tag = "5")] pub nullifier_root: ::core::option::Option, - /// / a commitment to all notes created in the current block. + /// a commitment to all notes created in the current block. #[prost(message, optional, tag = "6")] pub note_root: ::core::option::Option, - /// / a commitment to a set of transaction batches executed as a part of this block. + /// a commitment to a set of transaction batches executed as a part of this block. #[prost(message, optional, tag = "7")] pub batch_root: ::core::option::Option, - /// / a hash of a STARK proof attesting to the correct state transition. + /// a hash of a STARK proof attesting to the correct state transition. #[prost(message, optional, tag = "8")] pub proof_hash: ::core::option::Option, - /// / specifies the version of the protocol. + /// specifies the version of the protocol. #[prost(uint32, tag = "9")] pub version: u32, - /// / the time when the block was created. + /// the time when the block was created. #[prost(uint64, tag = "10")] pub timestamp: u64, } diff --git a/proto/src/generated/merkle.rs b/proto/src/generated/merkle.rs new file mode 100644 index 000000000..4cf424f6f --- /dev/null +++ b/proto/src/generated/merkle.rs @@ -0,0 +1,7 @@ +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MerklePath { + #[prost(message, repeated, tag = "1")] + pub siblings: ::prost::alloc::vec::Vec, +} diff --git a/proto/src/generated/mmr.rs b/proto/src/generated/mmr.rs new file mode 100644 index 000000000..388cf8335 --- /dev/null +++ b/proto/src/generated/mmr.rs @@ -0,0 +1,9 @@ +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MmrDelta { + #[prost(uint64, tag = "1")] + pub forest: u64, + #[prost(message, repeated, tag = "2")] + pub data: ::prost::alloc::vec::Vec, +} diff --git a/proto/src/generated/mod.rs b/proto/src/generated/mod.rs index a8d6cfde3..a8ae76f70 100644 --- a/proto/src/generated/mod.rs +++ b/proto/src/generated/mod.rs @@ -1,5 +1,11 @@ +pub mod account_id; pub mod block_header; pub mod digest; +pub mod merkle; +pub mod mmr; +pub mod note; +pub mod requests; +pub mod responses; pub mod rpc; pub mod store; pub mod tsmt; diff --git a/proto/src/generated/note.rs b/proto/src/generated/note.rs new file mode 100644 index 000000000..e85c50af9 --- /dev/null +++ b/proto/src/generated/note.rs @@ -0,0 +1,19 @@ +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Note { + #[prost(uint32, tag = "1")] + pub block_num: u32, + #[prost(uint32, tag = "2")] + pub note_index: u32, + #[prost(message, optional, tag = "3")] + pub note_hash: ::core::option::Option, + #[prost(uint64, tag = "4")] + pub sender: u64, + #[prost(uint64, tag = "5")] + pub tag: u64, + #[prost(uint32, tag = "6")] + pub num_assets: u32, + #[prost(message, optional, tag = "7")] + pub merkle_path: ::core::option::Option, +} diff --git a/proto/src/generated/requests.rs b/proto/src/generated/requests.rs new file mode 100644 index 000000000..3cc7a95ba --- /dev/null +++ b/proto/src/generated/requests.rs @@ -0,0 +1,34 @@ +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckNullifiersRequest { + #[prost(message, repeated, tag = "1")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FetchBlockHeaderByNumberRequest { + /// The block number of the target block. + /// + /// If not provided, means latest know block. + #[prost(uint32, optional, tag = "1")] + pub block_num: ::core::option::Option, +} +/// State synchronization request. +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStateRequest { + /// Send updates to the client starting at this block. + #[prost(uint32, tag = "1")] + pub block_num: u32, + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, + /// Tags and nullifiers are filters, both filters correspond to the high + /// 16bits of the real values shifted to the right `>> 48`. + #[prost(uint32, repeated, tag = "3")] + pub note_tags: ::prost::alloc::vec::Vec, + #[prost(uint32, repeated, tag = "4")] + pub nullifiers: ::prost::alloc::vec::Vec, +} diff --git a/proto/src/generated/responses.rs b/proto/src/generated/responses.rs new file mode 100644 index 000000000..c487ed706 --- /dev/null +++ b/proto/src/generated/responses.rs @@ -0,0 +1,79 @@ +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckNullifiersResponse { + /// Each requested nullifier has its corresponding nullifier proof at the + /// same position. + #[prost(message, repeated, tag = "1")] + pub proofs: ::prost::alloc::vec::Vec, +} +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FetchBlockHeaderByNumberResponse { + #[prost(message, optional, tag = "1")] + pub block_header: ::core::option::Option, +} +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountHashUpdate { + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub account_hash: ::core::option::Option, + #[prost(uint32, tag = "3")] + pub block_num: u32, +} +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NullifierUpdate { + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + #[prost(uint32, tag = "2")] + pub block_num: u32, +} +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NoteSyncRecord { + #[prost(uint32, tag = "2")] + pub note_index: u32, + #[prost(message, optional, tag = "3")] + pub note_hash: ::core::option::Option, + #[prost(uint64, tag = "4")] + pub sender: u64, + #[prost(uint64, tag = "5")] + pub tag: u64, + #[prost(uint32, tag = "6")] + pub num_assets: u32, + #[prost(message, optional, tag = "7")] + pub merkle_path: ::core::option::Option, +} +#[derive(Eq, PartialOrd, Ord, Hash)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStateResponse { + /// number of the latest block in the chain + #[prost(uint32, tag = "1")] + pub chain_tip: u32, + /// block header of the block with the first note matching the specified criteria + #[prost(message, optional, tag = "2")] + pub block_header: ::core::option::Option, + /// data needed to update the partial MMR from `block_ref` to `block_header.block_num` + #[prost(message, optional, tag = "3")] + pub mmr_delta: ::core::option::Option, + /// Merkle path in the updated chain MMR to the block at `block_header.block_num` + #[prost(message, optional, tag = "4")] + pub block_path: ::core::option::Option, + /// a list of account hashes updated after `block_ref` but not after `block_header.block_num` + #[prost(message, repeated, tag = "5")] + pub accounts: ::prost::alloc::vec::Vec, + /// a list of all notes together with the Merkle paths from `block_header.note_root` + #[prost(message, repeated, tag = "6")] + pub notes: ::prost::alloc::vec::Vec, + /// a list of nullifiers created between `block_ref` and `block_header.block_num` + #[prost(message, repeated, tag = "7")] + pub nullifiers: ::prost::alloc::vec::Vec, +} diff --git a/proto/src/generated/rpc.rs b/proto/src/generated/rpc.rs index a87f1c3dc..219f399bc 100644 --- a/proto/src/generated/rpc.rs +++ b/proto/src/generated/rpc.rs @@ -1,36 +1,3 @@ -#[derive(Eq, PartialOrd, Ord, Hash)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersRequest { - #[prost(message, repeated, tag = "1")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -#[derive(Eq, PartialOrd, Ord, Hash)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersResponse { - /// Each requested nullifier has its corresponding nullifier proof at the - /// same position. - #[prost(message, repeated, tag = "1")] - pub proofs: ::prost::alloc::vec::Vec, -} -#[derive(Eq, PartialOrd, Ord, Hash)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FetchBlockHeaderByNumberRequest { - /// The block number of the target block. - /// - /// If not provided, means latest know block. - #[prost(uint64, optional, tag = "1")] - pub block_num: ::core::option::Option, -} -#[derive(Eq, PartialOrd, Ord, Hash)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FetchBlockHeaderByNumberResponse { - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, -} /// Generated client implementations. pub mod api_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] @@ -118,9 +85,11 @@ pub mod api_client { } pub async fn check_nullifiers( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::super::requests::CheckNullifiersRequest, + >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -140,9 +109,11 @@ pub mod api_client { } pub async fn fetch_block_header_by_number( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::super::requests::FetchBlockHeaderByNumberRequest, + >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -163,6 +134,28 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "FetchBlockHeaderByNumber")); self.inner.unary(req, path, codec).await } + pub async fn sync_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncState"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncState")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -174,16 +167,25 @@ pub mod api_server { pub trait Api: Send + Sync + 'static { async fn check_nullifiers( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; async fn fetch_block_header_by_number( &self, - request: tonic::Request, + request: tonic::Request< + super::super::requests::FetchBlockHeaderByNumberRequest, + >, ) -> std::result::Result< - tonic::Response, + tonic::Response, + tonic::Status, + >; + async fn sync_state( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, tonic::Status, >; } @@ -271,16 +273,19 @@ pub mod api_server { struct CheckNullifiersSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::CheckNullifiersResponse; + > tonic::server::UnaryService< + super::super::requests::CheckNullifiersRequest, + > for CheckNullifiersSvc { + type Response = super::super::responses::CheckNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request< + super::super::requests::CheckNullifiersRequest, + >, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -317,9 +322,10 @@ pub mod api_server { struct FetchBlockHeaderByNumberSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService - for FetchBlockHeaderByNumberSvc { - type Response = super::FetchBlockHeaderByNumberResponse; + > tonic::server::UnaryService< + super::super::requests::FetchBlockHeaderByNumberRequest, + > for FetchBlockHeaderByNumberSvc { + type Response = super::super::responses::FetchBlockHeaderByNumberResponse; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -327,7 +333,7 @@ pub mod api_server { fn call( &mut self, request: tonic::Request< - super::FetchBlockHeaderByNumberRequest, + super::super::requests::FetchBlockHeaderByNumberRequest, >, ) -> Self::Future { let inner = Arc::clone(&self.0); @@ -361,6 +367,55 @@ pub mod api_server { }; Box::pin(fut) } + "/rpc.Api/SyncState" => { + #[allow(non_camel_case_types)] + struct SyncStateSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService< + super::super::requests::SyncStateRequest, + > for SyncStateSvc { + type Response = super::super::responses::SyncStateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::requests::SyncStateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_state(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SyncStateSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( diff --git a/proto/src/generated/store.rs b/proto/src/generated/store.rs index 175e3d030..f49697531 100644 --- a/proto/src/generated/store.rs +++ b/proto/src/generated/store.rs @@ -1,36 +1,3 @@ -#[derive(Eq, PartialOrd, Ord, Hash)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersRequest { - #[prost(message, repeated, tag = "1")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -#[derive(Eq, PartialOrd, Ord, Hash)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersResponse { - /// Each requested nullifier has its corresponding nullifier proof at the - /// same position. - #[prost(message, repeated, tag = "1")] - pub proofs: ::prost::alloc::vec::Vec, -} -#[derive(Eq, PartialOrd, Ord, Hash)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FetchBlockHeaderByNumberRequest { - /// The block number of the target block. - /// - /// If not provided, means latest know block. - #[prost(uint64, optional, tag = "1")] - pub block_num: ::core::option::Option, -} -#[derive(Eq, PartialOrd, Ord, Hash)] -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FetchBlockHeaderByNumberResponse { - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, -} /// Generated client implementations. pub mod api_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] @@ -118,9 +85,11 @@ pub mod api_client { } pub async fn check_nullifiers( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::super::requests::CheckNullifiersRequest, + >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -142,9 +111,11 @@ pub mod api_client { } pub async fn fetch_block_header_by_number( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::super::requests::FetchBlockHeaderByNumberRequest, + >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -165,6 +136,28 @@ pub mod api_client { .insert(GrpcMethod::new("store.Api", "FetchBlockHeaderByNumber")); self.inner.unary(req, path, codec).await } + pub async fn sync_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Api/SyncState"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Api", "SyncState")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -176,16 +169,25 @@ pub mod api_server { pub trait Api: Send + Sync + 'static { async fn check_nullifiers( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; async fn fetch_block_header_by_number( &self, - request: tonic::Request, + request: tonic::Request< + super::super::requests::FetchBlockHeaderByNumberRequest, + >, ) -> std::result::Result< - tonic::Response, + tonic::Response, + tonic::Status, + >; + async fn sync_state( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, tonic::Status, >; } @@ -273,16 +275,19 @@ pub mod api_server { struct CheckNullifiersSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::CheckNullifiersResponse; + > tonic::server::UnaryService< + super::super::requests::CheckNullifiersRequest, + > for CheckNullifiersSvc { + type Response = super::super::responses::CheckNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request< + super::super::requests::CheckNullifiersRequest, + >, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -319,9 +324,10 @@ pub mod api_server { struct FetchBlockHeaderByNumberSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService - for FetchBlockHeaderByNumberSvc { - type Response = super::FetchBlockHeaderByNumberResponse; + > tonic::server::UnaryService< + super::super::requests::FetchBlockHeaderByNumberRequest, + > for FetchBlockHeaderByNumberSvc { + type Response = super::super::responses::FetchBlockHeaderByNumberResponse; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -329,7 +335,7 @@ pub mod api_server { fn call( &mut self, request: tonic::Request< - super::FetchBlockHeaderByNumberRequest, + super::super::requests::FetchBlockHeaderByNumberRequest, >, ) -> Self::Future { let inner = Arc::clone(&self.0); @@ -363,6 +369,55 @@ pub mod api_server { }; Box::pin(fut) } + "/store.Api/SyncState" => { + #[allow(non_camel_case_types)] + struct SyncStateSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService< + super::super::requests::SyncStateRequest, + > for SyncStateSvc { + type Response = super::super::responses::SyncStateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::requests::SyncStateRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_state(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = SyncStateSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( diff --git a/proto/src/lib.rs b/proto/src/lib.rs index 606825407..ec070c609 100644 --- a/proto/src/lib.rs +++ b/proto/src/lib.rs @@ -5,8 +5,14 @@ pub mod hex; // RE-EXPORTS // ------------------------------------------------------------------------------------------------ +pub use generated::account_id; pub use generated::block_header; pub use generated::digest; +pub use generated::merkle; +pub use generated::mmr; +pub use generated::note; +pub use generated::requests; +pub use generated::responses; pub use generated::rpc; pub use generated::store; pub use generated::tsmt; diff --git a/rpc/src/main.rs b/rpc/src/main.rs index 6f9f5ad13..6e5e2e573 100644 --- a/rpc/src/main.rs +++ b/rpc/src/main.rs @@ -3,10 +3,7 @@ pub mod config; pub mod server; use hex::ToHex; use miden_crypto::merkle::{path_to_text, TieredSmtProof}; -use miden_node_proto::{ - rpc::{api_client, CheckNullifiersRequest}, - tsmt::NullifierProof, -}; +use miden_node_proto::{requests::CheckNullifiersRequest, rpc::api_client, tsmt::NullifierProof}; use miden_node_utils::Config; use anyhow::Result; diff --git a/rpc/src/server/api.rs b/rpc/src/server/api.rs index 6703ff9fe..da45d54bd 100644 --- a/rpc/src/server/api.rs +++ b/rpc/src/server/api.rs @@ -2,11 +2,10 @@ use crate::config::RpcConfig; use anyhow::Result; use miden_crypto::hash::rpo::RpoDigest; use miden_node_proto::{ - rpc::{ - api_server, CheckNullifiersRequest, CheckNullifiersResponse, - FetchBlockHeaderByNumberRequest, FetchBlockHeaderByNumberResponse, - }, - store::{self, api_client}, + requests::{CheckNullifiersRequest, FetchBlockHeaderByNumberRequest, SyncStateRequest}, + responses::{CheckNullifiersResponse, FetchBlockHeaderByNumberResponse, SyncStateResponse}, + rpc::api_server, + store::api_client, }; use std::net::ToSocketAddrs; use tonic::{ @@ -33,47 +32,28 @@ impl api_server::Api for RpcApi { &self, request: Request, ) -> Result, Status> { - let user_request = request.into_inner(); - // validate all the nullifiers from the user request - for nullifier in user_request.nullifiers.iter() { + for nullifier in request.get_ref().nullifiers.iter() { let _: RpoDigest = nullifier .try_into() .or(Err(Status::invalid_argument("Digest field is not in the modulos range")))?; } - let store_response = self - .store - .clone() - .check_nullifiers(Request::new(store::CheckNullifiersRequest { - nullifiers: user_request.nullifiers, - })) - .await? - .into_inner(); - - Ok(Response::new(CheckNullifiersResponse { - proofs: store_response.proofs, - })) + self.store.clone().check_nullifiers(request).await } async fn fetch_block_header_by_number( &self, request: Request, ) -> Result, Status> { - let user_request = request.into_inner(); - - let store_response = self - .store - .clone() - .fetch_block_header_by_number(Request::new(store::FetchBlockHeaderByNumberRequest { - block_num: user_request.block_num, - })) - .await? - .into_inner(); + self.store.clone().fetch_block_header_by_number(request).await + } - Ok(Response::new(FetchBlockHeaderByNumberResponse { - block_header: store_response.block_header, - })) + async fn sync_state( + &self, + request: tonic::Request, + ) -> Result, Status> { + self.store.clone().sync_state(request).await } } diff --git a/store/Cargo.toml b/store/Cargo.toml index e66d08870..44d67575d 100644 --- a/store/Cargo.toml +++ b/store/Cargo.toml @@ -27,6 +27,7 @@ miden-node-utils = { path = "../utils" } miden_objects = { workspace = true } once_cell = { version = "1.18.0" } prost = { version = "0.12" } +rusqlite = { version = "0.29", features = ["array", "buildtime_bindgen"] } rusqlite_migration = { version = "1.0" } serde = { version = "1.0" , features = ["derive"] } tokio = { version = "1.29", features = ["rt-multi-thread", "net", "macros"] } diff --git a/store/src/db.rs b/store/src/db.rs deleted file mode 100644 index b29f5a6d9..000000000 --- a/store/src/db.rs +++ /dev/null @@ -1,95 +0,0 @@ -use crate::{config::StoreConfig, migrations, types::BlockNumber}; -use anyhow::anyhow; -use deadpool_sqlite::{rusqlite::types::ValueRef, Config as SqliteConfig, Pool, Runtime}; -use miden_crypto::{ - hash::rpo::RpoDigest, - utils::{Deserializable, SliceReader}, -}; -use miden_node_proto::block_header::BlockHeader; -use prost::Message; -use std::fs::create_dir_all; -use tracing::info; - -pub struct Db { - pool: Pool, -} - -impl Db { - /// Open a connection to the DB and apply any pending migrations. - pub async fn get_conn(config: StoreConfig) -> Result { - if let Some(p) = config.sqlite.parent() { - create_dir_all(p)?; - } - - let pool = SqliteConfig::new(config.sqlite.clone()).create_pool(Runtime::Tokio1)?; - - let conn = pool.get().await?; - - info!(sqlite = format!("{}", config.sqlite.display()), "Connected to the DB"); - - conn.interact(|conn| migrations::MIGRATIONS.to_latest(conn)) - .await - .map_err(|_| anyhow!("Migration task failed with a panic"))??; - - Ok(Db { pool }) - } - - pub async fn get_nullifiers(&self) -> Result, anyhow::Error> { - self.pool - .get() - .await? - .interact(|conn| { - let mut stmt = conn.prepare("SELECT nullifier, block_number FROM nullifiers;")?; - let mut rows = stmt.query([])?; - let mut result = vec![]; - while let Some(row) = rows.next()? { - let nullifier = match row.get_ref_unwrap(0) { - ValueRef::Blob(data) => { - let mut reader = SliceReader::new(data); - RpoDigest::read_from(&mut reader) - .map_err(|_| anyhow!("Decoding nullifier from DB failed"))? - }, - _ => unreachable!(), - }; - let block_number = row.get(1)?; - result.push((nullifier, block_number)); - } - - Ok(result) - }) - .await - .map_err(|_| anyhow!("Get nullifiers task failed with a panic"))? - } - - pub async fn get_block_header( - &self, - block_number: Option, - ) -> Result, anyhow::Error> { - self.pool - .get() - .await? - .interact(move |conn| { - let mut stmt; - let mut rows = match block_number { - Some(block_number) => { - stmt = conn.prepare("SELECT block_header FROM block_header WHERE block_number = ?1")?; - stmt.query([block_number])? - }, - None => { - stmt = conn.prepare("SELECT block_header FROM block_header ORDER BY block_number DESC LIMIT 1")?; - stmt.query([])? - }, - }; - - match rows.next()? { - Some(row) => { - let data = row.get_ref_unwrap(0).as_blob()?; - Ok(Some(BlockHeader::decode(data)?)) - }, - None => Ok(None), - } - }) - .await - .map_err(|_| anyhow!("Get block header task failed with a panic"))? - } -} diff --git a/store/src/db/mod.rs b/store/src/db/mod.rs new file mode 100644 index 000000000..f848af335 --- /dev/null +++ b/store/src/db/mod.rs @@ -0,0 +1,540 @@ +use crate::{ + config::StoreConfig, + migrations, + types::{AccountId, BlockNumber}, +}; +use anyhow::anyhow; +use deadpool_sqlite::{Config as SqliteConfig, Pool, Runtime}; +use miden_crypto::{ + hash::rpo::RpoDigest, + utils::{Deserializable, SliceReader}, +}; +use miden_node_proto::{ + account_id, + block_header::BlockHeader, + digest::Digest, + merkle::MerklePath, + note::Note, + responses::{AccountHashUpdate, NullifierUpdate}, +}; +use prost::Message; +use rusqlite::{params, types::Value, vtab::array}; +use std::{fs::create_dir_all, rc::Rc}; +use tracing::info; + +#[cfg(test)] +mod tests; + +pub struct Db { + pool: Pool, +} + +#[derive(Debug, PartialEq)] +pub struct StateSyncUpdate { + pub notes: Vec, + pub block_header: BlockHeader, + pub chain_tip: BlockNumber, + pub account_updates: Vec, + pub nullifiers: Vec, +} + +impl Db { + /// Open a connection to the DB and apply any pending migrations. + pub async fn get_conn(config: StoreConfig) -> Result { + if let Some(p) = config.sqlite.parent() { + create_dir_all(p)?; + } + + let pool = SqliteConfig::new(config.sqlite.clone()).create_pool(Runtime::Tokio1)?; + + let conn = pool.get().await?; + + info!(sqlite = format!("{}", config.sqlite.display()), "Connected to the DB"); + + // Feature used to support `IN` and `NOT IN` queries + conn.interact(|conn| array::load_module(conn)) + .await + .map_err(|_| anyhow!("Loading carray module failed"))??; + + conn.interact(|conn| migrations::MIGRATIONS.to_latest(conn)) + .await + .map_err(|_| anyhow!("Migration task failed with a panic"))??; + + Ok(Db { pool }) + } + + /// Inserts a new nullifier to the DB. + /// + /// This method may be called multiple times with the same nullifier. + #[cfg(test)] + pub async fn add_nullifier( + &self, + nullifier: RpoDigest, + block_number: BlockNumber, + ) -> Result { + use miden_crypto::StarkField; + + let num_rows = self + .pool + .get() + .await? + .interact(move |conn| { + let mut stmt = conn + .prepare("INSERT INTO nullifiers (nullifier, nullifier_prefix, block_number) VALUES (?1, ?2, ?3);")?; + stmt.execute(params![nullifier.as_bytes(), u64_to_prefix(nullifier[0].as_int()), block_number]) + }) + .await + .map_err(|_| anyhow!("Add nullifier task failed with a panic"))??; + + Ok(num_rows) + } + + /// Loads all the nullifiers from the DB. + pub async fn get_nullifiers(&self) -> Result, anyhow::Error> { + self.pool + .get() + .await? + .interact(|conn| { + let mut stmt = conn.prepare("SELECT nullifier, block_number FROM nullifiers;")?; + let mut rows = stmt.query([])?; + let mut result = vec![]; + while let Some(row) = rows.next()? { + let nullifier_data = row.get_ref(0)?.as_blob()?; + let nullifier = decode_rpo_digest(nullifier_data)?; + let block_number = row.get(1)?; + result.push((nullifier, block_number)); + } + + Ok(result) + }) + .await + .map_err(|_| anyhow!("Get nullifiers task failed with a panic"))? + } + + /// Returns nullifiers created in the `(block_start, block_end]` range which also match the + /// `nullifiers` filter. + /// + /// Each value of the `nullifiers` is only the 16 most significat bits of the nullifier of + /// interest to the client. This hides the details of the specific nullifier being requested. + pub async fn get_nullifiers_by_block_range( + &self, + block_start: BlockNumber, + block_end: BlockNumber, + nullifiers: &[u32], + ) -> Result, anyhow::Error> { + let nullifiers: Vec<_> = nullifiers.iter().copied().map(u32_to_value).collect(); + + self.pool + .get() + .await? + .interact(move |conn| { + let mut stmt = conn.prepare( + " + SELECT + nullifier, + block_number + FROM + nullifiers + WHERE + block_number > ?1 AND + block_number <= ?2 AND + nullifier_prefix IN rarray(?3) + ", + )?; + + let mut rows = stmt.query(params![block_start, block_end, Rc::new(nullifiers)])?; + + let mut result = Vec::new(); + while let Some(row) = rows.next()? { + let nullifier_data = row.get_ref(0)?.as_blob()?; + let nullifier: Digest = decode_rpo_digest(nullifier_data)?.into(); + let block_num = row.get(1)?; + + result.push(NullifierUpdate { + nullifier: Some(nullifier), + block_num, + }); + } + + Ok(result) + }) + .await + .map_err(|_| anyhow!("Get nullifiers by block number task failed with a panic"))? + } + + /// Save a [BlockHeader] to the DB. + #[cfg(test)] + pub async fn add_block_header( + &self, + block_header: BlockHeader, + ) -> Result { + let num_rows = self + .pool + .get() + .await? + .interact(move |conn| { + let mut stmt = conn.prepare( + "INSERT INTO block_headers (block_num, block_header) VALUES (?1, ?2);", + )?; + stmt.execute(params![block_header.block_num, block_header.encode_to_vec()]) + }) + .await + .map_err(|_| anyhow!("Add block header task failed with a panic"))??; + + Ok(num_rows) + } + + /// Search for a [BlockHeader] from the DB by its `block_num`. + /// + /// When `block_number` is [None], the latest block header is returned. + pub async fn get_block_header( + &self, + block_number: Option, + ) -> Result, anyhow::Error> { + self.pool + .get() + .await? + .interact(move |conn| { + let mut stmt; + let mut rows = match block_number { + Some(block_number) => { + stmt = conn.prepare("SELECT block_header FROM block_headers WHERE block_num = ?1")?; + stmt.query([block_number])? + }, + None => { + stmt = conn.prepare("SELECT block_header FROM block_headers ORDER BY block_num DESC LIMIT 1")?; + stmt.query([])? + }, + }; + + match rows.next()? { + Some(row) => { + let data = row.get_ref(0)?.as_blob()?; + Ok(Some(BlockHeader::decode(data)?)) + }, + None => Ok(None), + } + }) + .await + .map_err(|_| anyhow!("Get block header task failed with a panic"))? + } + + /// Loads all the block headers from the DB. + pub async fn get_block_headers(&self) -> Result, anyhow::Error> { + self.pool + .get() + .await? + .interact(|conn| { + let mut stmt = conn.prepare("SELECT block_header FROM block_headers;")?; + let mut rows = stmt.query([])?; + let mut result = vec![]; + while let Some(row) = rows.next()? { + let block_header_data = row.get_ref(0)?.as_blob()?; + let block_header = BlockHeader::decode(block_header_data)?; + result.push(block_header); + } + + Ok(result) + }) + .await + .map_err(|_| anyhow!("Get block headers task failed with a panic"))? + } + + /// Save a [Note] to the DB. + #[cfg(test)] + pub async fn add_note( + &self, + note: Note, + ) -> Result { + let num_rows = self + .pool + .get() + .await? + .interact(move |conn| -> Result<_, anyhow::Error> { + use miden_node_store::errors::DbError; + + let mut stmt = conn.prepare( + " + INSERT INTO + notes + ( + block_num, + note_index, + note_hash, + sender, + tag, + num_assets, + merkle_path + ) + VALUES + ( + ?1, ?2, ?3, ?4, ?5, ?6, ?7 + );", + )?; + let res = stmt.execute(params![ + note.block_num, + note.note_index, + note.note_hash.ok_or(DbError::NoteMissingHash)?.encode_to_vec(), + note.sender, + note.tag, + note.num_assets, + note.merkle_path.ok_or(DbError::NoteMissingMerklePath)?.encode_to_vec(), + ])?; + Ok(res) + }) + .await + .map_err(|_| anyhow!("Add note task failed with a panic"))??; + + Ok(num_rows) + } + + /// Return notes matching the tag and account_ids search criteria. + /// + /// # Returns + /// + /// - Empty vector if no tag created after `block_num` match `tags` or `account_ids`. + /// - Otherwise, notes which the 16 high bits match `tags`, or the `sender` is one of the + /// `account_ids`. + /// + /// # Note + /// + /// This method returns notes from a single block. To fetch all notes up to the chain tip, + /// multiple requests are necessary. + pub async fn get_notes_since_block_by_tag_and_sender( + &self, + tags: &[u32], + account_ids: &[u64], + block_num: BlockNumber, + ) -> Result, anyhow::Error> { + let tags: Vec = tags.iter().copied().map(u32_to_value).collect(); + let account_ids = account_ids + .iter() + .copied() + .map(u64_to_value) + .collect::, anyhow::Error>>()?; + + let notes = self + .pool + .get() + .await? + .interact(move |conn| -> Result<_, anyhow::Error> { + let mut stmt = conn.prepare( + " + SELECT + block_num, + note_index, + note_hash, + sender, + tag, + num_assets, + merkle_path + FROM + notes + WHERE + -- find the next block which contains at least one note with a matching tag + block_num = ( + SELECT + block_num + FROM + notes + WHERE + ((tag >> 48) IN rarray(?1) OR sender IN rarray(?2)) AND + block_num > ?3 + ORDER BY + block_num ASC + LIMIT + 1 + ) AND + -- load notes that matches any of tags + (tag >> 48) IN rarray(?1); + ", + )?; + let mut rows = + stmt.query(params![Rc::new(tags), Rc::new(account_ids), block_num])?; + + let mut res = Vec::new(); + while let Some(row) = rows.next()? { + let block_num = row.get(0)?; + let note_index = row.get(1)?; + let note_hash_data = row.get_ref(2)?.as_blob()?; + let note_hash = Some(decode_protobuf_digest(note_hash_data)?); + let sender = row.get(3)?; + let tag = row.get(4)?; + let num_assets = row.get(5)?; + let merkle_path_data = row.get_ref(6)?.as_blob()?; + let merkle_path = Some(MerklePath::decode(merkle_path_data)?); + + let note = Note { + block_num, + note_index, + note_hash, + sender, + tag, + num_assets, + merkle_path, + }; + res.push(note); + } + Ok(res) + }) + .await + .map_err(|_| anyhow!("Get notes since block by tag task failed with a panic"))??; + + Ok(notes) + } + + /// Inserts or updates an account's hash in the DB. + #[cfg(test)] + pub async fn update_account_hash( + &self, + account_id: AccountId, + account_hash: Digest, + block_num: BlockNumber, + ) -> Result { + let num_rows = self + .pool + .get() + .await? + .interact(move |conn| { + let mut stmt = conn.prepare( + "INSERT OR REPLACE INTO accounts (account_id, account_hash, block_num) VALUES (?1, ?2, ?3);", + )?; + stmt.execute(params![account_id, account_hash.encode_to_vec(), block_num]) + }) + .await + .map_err(|_| anyhow!("Update account hash task failed with a panic"))??; + + Ok(num_rows) + } + + pub async fn get_account_hash_by_block_range( + &self, + block_start: BlockNumber, + block_end: BlockNumber, + account_ids: &[AccountId], + ) -> Result, anyhow::Error> { + let account_ids = account_ids + .iter() + .copied() + .map(u64_to_value) + .collect::, anyhow::Error>>()?; + + self.pool + .get() + .await? + .interact(move |conn| { + let mut stmt = conn.prepare( + " + SELECT + account_id, account_hash, block_num + FROM + accounts + WHERE + block_num > ?1 AND + block_num <= ?2 AND + account_id IN rarray(?3) + ", + )?; + + let mut rows = stmt.query(params![block_start, block_end, Rc::new(account_ids)])?; + + let mut result = Vec::new(); + while let Some(row) = rows.next()? { + let account_id_data: u64 = row.get(0)?; + let account_id: account_id::AccountId = account_id_data.into(); + let account_hash_data = row.get_ref(1)?.as_blob()?; + let account_hash = Digest::decode(account_hash_data)?; + let block_num = row.get(2)?; + + result.push(AccountHashUpdate { + account_id: Some(account_id), + account_hash: Some(account_hash), + block_num, + }); + } + + Ok(result) + }) + .await + .map_err(|_| anyhow!("Get account hash by block number task failed with a panic"))? + } + + pub async fn get_state_sync( + &self, + block_num: BlockNumber, + account_ids: &[u64], + note_tag_prefixes: &[u32], + nullifiers_prefix: &[u32], + ) -> Result { + let notes = self + .get_notes_since_block_by_tag_and_sender(¬e_tag_prefixes, &account_ids, block_num) + .await?; + + let (block_header, chain_tip) = if !notes.is_empty() { + let block_header = self + .get_block_header(Some(notes[0].block_num)) + .await? + .ok_or(anyhow!("Block db is empty"))?; + let tip = self.get_block_header(None).await?.ok_or(anyhow!("Block db is empty"))?; + + (block_header, tip.block_num) + } else { + let block_header = + self.get_block_header(None).await?.ok_or(anyhow!("Block db is empty"))?; + + let block_num = block_header.block_num; + (block_header, block_num) + }; + + let account_updates = self + .get_account_hash_by_block_range(block_num, block_header.block_num, &account_ids) + .await?; + + let nullifiers = self + .get_nullifiers_by_block_range(block_num, block_header.block_num, &nullifiers_prefix) + .await?; + + Ok(StateSyncUpdate { + notes, + block_header, + chain_tip, + account_updates, + nullifiers, + }) + } +} + +// UTILITIES +// ================================================================================================ + +/// Decodes a blob from the database into a [RpoDigest]. +fn decode_rpo_digest(data: &[u8]) -> Result { + let mut reader = SliceReader::new(data); + RpoDigest::read_from(&mut reader).map_err(|_| anyhow!("Decoding nullifier from DB failed")) +} + +/// Decodes a blob from the database into a [Digest]. +fn decode_protobuf_digest(data: &[u8]) -> Result { + Ok(Digest::decode(data)?) +} + +/// Converts a `u64` into a [Value]. +/// +/// Sqlite uses `i64` as its internal representation format. +fn u64_to_value(v: u64) -> Result { + let v: i64 = v.try_into()?; + Ok(Value::Integer(v)) +} + +/// Converts a `u32` into a [Value]. +/// +/// Sqlite uses `i64` as its internal representation format. +fn u32_to_value(v: u32) -> Value { + let v: i64 = v.into(); + Value::Integer(v) +} + +/// Returns the high bits of the `u64` value used during searches. +fn u64_to_prefix(v: u64) -> u32 { + (v >> 48) as u32 +} diff --git a/store/src/db/tests.rs b/store/src/db/tests.rs new file mode 100644 index 000000000..c66999452 --- /dev/null +++ b/store/src/db/tests.rs @@ -0,0 +1,350 @@ +use super::{u64_to_prefix, Db}; +use crate::config::{Endpoint, StoreConfig}; +use miden_crypto::{hash::rpo::RpoDigest, merkle::NodeIndex, StarkField}; +use miden_node_proto::{ + block_header::BlockHeader as ProtobufBlockHeader, + digest::Digest as ProtobufDigest, + merkle::MerklePath, + note::Note, + responses::{AccountHashUpdate, NullifierUpdate}, +}; +use miden_objects::{crypto::merkle::SimpleSmt, notes::NOTE_LEAF_DEPTH, Felt, FieldElement}; + +#[tokio::test] +async fn test_db_nullifiers() { + let db = Db::get_conn(StoreConfig { + endpoint: Endpoint { + host: "0.0.0.0".to_string(), + port: 8080, + }, + sqlite: ":memory:".into(), + }) + .await + .unwrap(); + + // test querying empty table + let nullifiers = db.get_nullifiers().await.unwrap(); + assert!(nullifiers.is_empty()); + + let nullifiers = db.get_nullifiers_by_block_range(0, u32::MAX, &[]).await.unwrap(); + assert!(nullifiers.is_empty()); + + // test inserion + let nullifier1 = num_to_rpo_digest(1 << 48); + let block_number1 = 1; + db.add_nullifier(nullifier1, block_number1).await.unwrap(); + + // test load + let nullifiers = db.get_nullifiers().await.unwrap(); + assert_eq!(nullifiers, vec![(nullifier1, block_number1)]); + + let nullifiers = db + .get_nullifiers_by_block_range(0, u32::MAX, &[u64_to_prefix(nullifier1[0].as_int())]) + .await + .unwrap(); + assert_eq!( + nullifiers, + vec![NullifierUpdate { + nullifier: Some(nullifier1.into()), + block_num: block_number1 + }] + ); + + // test additional element + let nullifier2 = num_to_rpo_digest(2 << 48); + let block_number2 = 2; + db.add_nullifier(nullifier2, block_number2).await.unwrap(); + + let nullifiers = db.get_nullifiers().await.unwrap(); + assert_eq!(nullifiers, vec![(nullifier1, block_number1), (nullifier2, block_number2)]); + + // only the nullifiers matching the prefix are included + let nullifiers = db + .get_nullifiers_by_block_range(0, u32::MAX, &[u64_to_prefix(nullifier1[0].as_int())]) + .await + .unwrap(); + assert_eq!( + nullifiers, + vec![NullifierUpdate { + nullifier: Some(nullifier1.into()), + block_num: block_number1 + }] + ); + let nullifiers = db + .get_nullifiers_by_block_range(0, u32::MAX, &[u64_to_prefix(nullifier2[0].as_int())]) + .await + .unwrap(); + assert_eq!( + nullifiers, + vec![NullifierUpdate { + nullifier: Some(nullifier2.into()), + block_num: block_number2 + }] + ); + + // Nullifiers created at block_end are included + let nullifiers = db + .get_nullifiers_by_block_range( + 0, + 1, + &[u64_to_prefix(nullifier1[0].as_int()), u64_to_prefix(nullifier2[0].as_int())], + ) + .await + .unwrap(); + assert_eq!( + nullifiers, + vec![NullifierUpdate { + nullifier: Some(nullifier1.into()), + block_num: block_number1 + }] + ); + + // Nullifiers created at block_start are not included + let nullifiers = db + .get_nullifiers_by_block_range( + 1, + u32::MAX, + &[u64_to_prefix(nullifier1[0].as_int()), u64_to_prefix(nullifier2[0].as_int())], + ) + .await + .unwrap(); + assert_eq!( + nullifiers, + vec![NullifierUpdate { + nullifier: Some(nullifier2.into()), + block_num: block_number2 + }] + ); +} + +#[tokio::test] +async fn test_db_block_header() { + let db = Db::get_conn(StoreConfig { + endpoint: Endpoint { + host: "0.0.0.0".to_string(), + port: 8080, + }, + sqlite: ":memory:".into(), + }) + .await + .unwrap(); + + // test querying empty table + let block_number = 1; + let res = db.get_block_header(Some(block_number)).await.unwrap(); + assert!(res.is_none()); + + let res = db.get_block_header(None).await.unwrap(); + assert!(res.is_none()); + + let res = db.get_block_headers().await.unwrap(); + assert!(res.is_empty()); + + let block_header = ProtobufBlockHeader { + prev_hash: Some(num_to_protobuf_digest(1)), + block_num: 2, + chain_root: Some(num_to_protobuf_digest(3)), + account_root: Some(num_to_protobuf_digest(4)), + nullifier_root: Some(num_to_protobuf_digest(5)), + note_root: Some(num_to_protobuf_digest(6)), + batch_root: Some(num_to_protobuf_digest(7)), + proof_hash: Some(num_to_protobuf_digest(8)), + version: 9, + timestamp: 10, + }; + + // test insertion + db.add_block_header(block_header.clone()).await.unwrap(); + + // test fetch unknown block header + let block_number = 1; + let res = db.get_block_header(Some(block_number)).await.unwrap(); + assert!(res.is_none()); + + // test fetch block header by block number + let res = db.get_block_header(Some(block_header.block_num)).await.unwrap(); + assert_eq!(res.unwrap(), block_header); + + // test fetch latest block header + let res = db.get_block_header(None).await.unwrap(); + assert_eq!(res.unwrap(), block_header); + + let block_header2 = ProtobufBlockHeader { + prev_hash: Some(num_to_protobuf_digest(11)), + block_num: 12, + chain_root: Some(num_to_protobuf_digest(13)), + account_root: Some(num_to_protobuf_digest(14)), + nullifier_root: Some(num_to_protobuf_digest(15)), + note_root: Some(num_to_protobuf_digest(16)), + batch_root: Some(num_to_protobuf_digest(17)), + proof_hash: Some(num_to_protobuf_digest(18)), + version: 19, + timestamp: 20, + }; + db.add_block_header(block_header2.clone()).await.unwrap(); + let res = db.get_block_header(None).await.unwrap(); + assert_eq!(res.unwrap(), block_header2); + + let res = db.get_block_headers().await.unwrap(); + assert_eq!(res, [block_header, block_header2]); +} + +#[tokio::test] +async fn test_db_account() { + let db = Db::get_conn(StoreConfig { + endpoint: Endpoint { + host: "0.0.0.0".to_string(), + port: 8080, + }, + sqlite: ":memory:".into(), + }) + .await + .unwrap(); + + // test empty table + let account_ids = vec![0, 1, 2, 3, 4, 5]; + let res = db.get_account_hash_by_block_range(0, u32::MAX, &account_ids).await.unwrap(); + assert!(res.is_empty()); + + // test insertion + let block_num = 1; + let account_id = 0; + let account_hash = num_to_protobuf_digest(0); + let row_count = db + .update_account_hash(account_id, account_hash.clone(), block_num) + .await + .unwrap(); + assert_eq!(row_count, 1); + + // test successful query + let res = db.get_account_hash_by_block_range(0, u32::MAX, &account_ids).await.unwrap(); + assert_eq!( + res, + vec![AccountHashUpdate { + account_id: Some(account_id.into()), + account_hash: Some(account_hash.into()), + block_num + }] + ); + + // test query for update outside of the block range + let res = db + .get_account_hash_by_block_range(block_num + 1, u32::MAX, &account_ids) + .await + .unwrap(); + assert!(res.is_empty()); + + // test query with unknown accounts + let res = db + .get_account_hash_by_block_range(block_num + 1, u32::MAX, &[6, 7, 8]) + .await + .unwrap(); + assert!(res.is_empty()); +} + +#[tokio::test] +async fn test_notes() { + let db = Db::get_conn(StoreConfig { + endpoint: Endpoint { + host: "0.0.0.0".to_string(), + port: 8080, + }, + sqlite: ":memory:".into(), + }) + .await + .unwrap(); + + // test empty table + let res = db.get_notes_since_block_by_tag_and_sender(&[], &[], 0).await.unwrap(); + assert!(res.is_empty()); + + let res = db.get_notes_since_block_by_tag_and_sender(&[1, 2, 3], &[], 0).await.unwrap(); + assert!(res.is_empty()); + + // test insertion + let block_num = 1; + let note_index = 2u32; + let tag = 5; + let note_hash = num_to_rpo_digest(3); + let values = [(note_index as u64, *note_hash)]; + let notes_db = SimpleSmt::with_leaves(NOTE_LEAF_DEPTH, values.iter().cloned()).unwrap(); + let idx = NodeIndex::new(NOTE_LEAF_DEPTH, note_index as u64).unwrap(); + let merkle_path = notes_db.get_path(idx).unwrap(); + + let merkle_path: Vec = + merkle_path.nodes().iter().map(|n| (*n).into()).collect(); + + let note = Note { + block_num, + note_index, + note_hash: Some(num_to_protobuf_digest(3)), + sender: 4, + tag, + num_assets: 6, + merkle_path: Some(MerklePath { + siblings: merkle_path.clone(), + }), + }; + db.add_note(note.clone()).await.unwrap(); + + // test empty tags + let res = db.get_notes_since_block_by_tag_and_sender(&[], &[], 0).await.unwrap(); + assert!(res.is_empty()); + + // test no updates + let res = db + .get_notes_since_block_by_tag_and_sender(&[(tag >> 48) as u32], &[], block_num) + .await + .unwrap(); + assert!(res.is_empty()); + + // test match + let res = db + .get_notes_since_block_by_tag_and_sender(&[(tag >> 48) as u32], &[], block_num - 1) + .await + .unwrap(); + assert_eq!(res, vec![note.clone()]); + + // insertion second note with same tag, but on higher block + let note2 = Note { + block_num: note.block_num + 1, + note_index: note.note_index, + note_hash: Some(num_to_protobuf_digest(3)), + sender: note.sender, + tag: note.tag, + num_assets: note.num_assets, + merkle_path: Some(MerklePath { + siblings: merkle_path, + }), + }; + db.add_note(note2.clone()).await.unwrap(); + + // only first note is returned + let res = db + .get_notes_since_block_by_tag_and_sender(&[(tag >> 48) as u32], &[], block_num - 1) + .await + .unwrap(); + assert_eq!(res, vec![note.clone()]); + + // only the second note is returned + let res = db + .get_notes_since_block_by_tag_and_sender(&[(tag >> 48) as u32], &[], block_num) + .await + .unwrap(); + assert_eq!(res, vec![note2.clone()]); +} + +// UTILITIES +// ------------------------------------------------------------------------------------------- +fn num_to_rpo_digest(n: u64) -> RpoDigest { + RpoDigest::new([Felt::new(n), Felt::ZERO, Felt::ZERO, Felt::ZERO]) +} + +fn num_to_protobuf_digest(n: u64) -> ProtobufDigest { + ProtobufDigest { + d0: 0, + d1: 0, + d2: 0, + d3: n, + } +} diff --git a/store/src/errors.rs b/store/src/errors.rs new file mode 100644 index 000000000..17ad117a8 --- /dev/null +++ b/store/src/errors.rs @@ -0,0 +1,19 @@ +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum DbError { + NoteMissingMerklePath, + NoteMissingHash, +} + +impl std::error::Error for DbError {} + +impl std::fmt::Display for DbError { + fn fmt( + &self, + f: &mut std::fmt::Formatter<'_>, + ) -> std::fmt::Result { + match self { + DbError::NoteMissingMerklePath => write!(f, "Note message is missing the merkle path"), + DbError::NoteMissingHash => write!(f, "Note message is missing the note's hash"), + } + } +} diff --git a/store/src/lib.rs b/store/src/lib.rs index ef68c3694..8ad1ae2c8 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -1 +1,2 @@ pub mod config; +pub mod errors; diff --git a/store/src/migrations.rs b/store/src/migrations.rs index f99d284da..9ba9606fe 100644 --- a/store/src/migrations.rs +++ b/store/src/migrations.rs @@ -5,24 +5,59 @@ pub static MIGRATIONS: Lazy = Lazy::new(|| { Migrations::new(vec![M::up( " CREATE TABLE - block_header + block_headers ( block_num INTEGER NOT NULL, block_header BLOB NOT NULL, PRIMARY KEY (block_num), - CONSTRAINT block_header_block_num_positive CHECK (block_num >= 0) + CONSTRAINT block_header_block_num_is_u32 CHECK (block_num >= 0 AND block_num < 4294967296) + ) STRICT, WITHOUT ROWID; + + CREATE TABLE + notes + ( + block_num INTEGER NOT NULL, + note_index INTEGER NOT NULL, + note_hash BLOB NOT NULL, + sender INTEGER NOT NULL, + tag INTEGER NOT NULL, + num_assets INTEGER NOT NULL, + merkle_path BLOB NOT NULL, + + PRIMARY KEY (block_num, note_index), + CONSTRAINT notes_block_number_is_u32 CHECK (block_num >= 0 AND block_num < 4294967296), + CONSTRAINT notes_note_index_is_u32 CHECK (note_index >= 0 AND note_index < 4294967296), + CONSTRAINT notes_sender_is_felt CHECK (sender >= 0 AND sender <= 18446744069414584321), + CONSTRAINT notes_tag_is_felt CHECK (tag >= 0 AND tag <= 18446744069414584321), + CONSTRAINT notes_num_assets_is_u8 CHECK (tag >= 0 AND tag < 256), + FOREIGN KEY (block_num) REFERENCES block_header (block_num) + ) STRICT, WITHOUT ROWID; + + CREATE TABLE + accounts + ( + account_id INTEGER NOT NULL, + account_hash BLOB NOT NULL, + block_num INTEGER NOT NULL, + + PRIMARY KEY (account_id), + CONSTRAINT accounts_block_account_id_is_felt CHECK (account_id >= 0 AND account_id < 18446744069414584321), + CONSTRAINT accounts_block_num_is_u32 CHECK (block_num >= 0 AND block_num < 4294967296), + FOREIGN KEY (block_num) REFERENCES block_header (block_num) ) STRICT, WITHOUT ROWID; CREATE TABLE nullifiers ( nullifier BLOB NOT NULL, + nullifier_prefix INTEGER NOT NULL, block_number INTEGER NOT NULL, PRIMARY KEY (nullifier), - CONSTRAINT nullifiers_nullifier_valid_digest CHECK (length(nullifier) = 32), - CONSTRAINT nullifiers_block_number_positive CHECK (block_number >= 0), + CONSTRAINT nullifiers_nullifier_is_digest CHECK (length(nullifier) = 32), + CONSTRAINT nullifiers_nullifier_prefix_is_u16 CHECK (nullifier_prefix >= 0 AND nullifier_prefix < 65536), + CONSTRAINT nullifiers_block_number_is_u32 CHECK (block_number >= 0 AND block_number < 4294967296), FOREIGN KEY (block_number) REFERENCES block_header (block_num) ) STRICT, WITHOUT ROWID; ", diff --git a/store/src/server/api.rs b/store/src/server/api.rs index 16d1c254d..15a83b851 100644 --- a/store/src/server/api.rs +++ b/store/src/server/api.rs @@ -1,15 +1,22 @@ use crate::config::StoreConfig; use crate::db::Db; use anyhow::Result; -use miden_crypto::{hash::rpo::RpoDigest, merkle::TieredSmt, Felt, FieldElement, StarkField}; +use miden_crypto::{ + hash::rpo::RpoDigest, + merkle::{Mmr, TieredSmt}, + Felt, FieldElement, StarkField, +}; use miden_node_proto::{ digest::Digest, - store::{ - api_server, CheckNullifiersRequest, CheckNullifiersResponse, - FetchBlockHeaderByNumberRequest, FetchBlockHeaderByNumberResponse, - }, + error::ParseError, + merkle::MerklePath, + mmr::MmrDelta, + requests::{CheckNullifiersRequest, FetchBlockHeaderByNumberRequest, SyncStateRequest}, + responses::{CheckNullifiersResponse, FetchBlockHeaderByNumberResponse, SyncStateResponse}, + store::api_server, tsmt::{self, NullifierLeaf}, }; +use miden_objects::BlockHeader; use std::{net::ToSocketAddrs, sync::Arc}; use tokio::{sync::RwLock, time::Instant}; use tonic::{transport::Server, Response, Status}; @@ -18,6 +25,7 @@ use tracing::{info, instrument}; pub struct StoreApi { db: Db, nullifier_tree: Arc>, + mmr: Arc>, } #[tonic::async_trait] @@ -71,30 +79,61 @@ impl api_server::Api for StoreApi { request: tonic::Request, ) -> Result, Status> { let request = request.into_inner(); - let block_header = self - .db - .get_block_header(request.block_num) - .await - .map_err(|err| Status::internal(format!("{:?}", err)))?; + let block_header = + self.db.get_block_header(request.block_num).await.map_err(internal_error)?; Ok(Response::new(FetchBlockHeaderByNumberResponse { block_header })) } -} -#[instrument(skip(db))] -async fn load_nullifier_tree(db: &mut Db) -> Result { - let nullifiers = db.get_nullifiers().await?; - let len = nullifiers.len(); - let leaves = nullifiers.into_iter().map(|(nullifier, block)| { - (nullifier, [Felt::new(block), Felt::ZERO, Felt::ZERO, Felt::ZERO]) - }); + async fn sync_state( + &self, + request: tonic::Request, + ) -> Result, Status> { + let request = request.into_inner(); - let now = Instant::now(); - let nullifier_tree = TieredSmt::with_entries(leaves)?; - let elapsed = now.elapsed().as_secs(); + let account_ids: Vec = request.account_ids.iter().map(|e| e.id).collect(); - info!(num_of_leaves = len, tree_construction = elapsed, "Loaded nullifier tree"); - Ok(nullifier_tree) + let state_sync = self + .db + .get_state_sync( + request.block_num, + &account_ids, + &request.note_tags, + &request.nullifiers, + ) + .await + .map_err(internal_error)?; + + // scope to read from the mmr + let (delta, path): (MmrDelta, MerklePath) = { + let mmr = self.mmr.read().await; + let delta = mmr + .get_delta(request.block_num as usize, state_sync.block_header.block_num as usize) + .map_err(internal_error)? + .try_into() + .map_err(internal_error)?; + + let proof = mmr + .open( + state_sync.block_header.block_num as usize, + state_sync.block_header.block_num as usize, + ) + .map_err(internal_error)?; + + (delta, proof.merkle_path.into()) + }; + + let notes = state_sync.notes.into_iter().map(|v| v.into()).collect(); + Ok(Response::new(SyncStateResponse { + chain_tip: state_sync.chain_tip, + block_header: Some(state_sync.block_header), + mmr_delta: Some(delta), + block_path: Some(path), + accounts: state_sync.account_updates, + notes, + nullifiers: state_sync.nullifiers, + })) + } } pub async fn serve( @@ -104,11 +143,14 @@ pub async fn serve( let host_port = (config.endpoint.host.as_ref(), config.endpoint.port); let addrs: Vec<_> = host_port.to_socket_addrs()?.collect(); - let tree_data = load_nullifier_tree(&mut db).await?; - let tree_lock = Arc::new(RwLock::new(tree_data)); + let nullifier_data = load_nullifier_tree(&mut db).await?; + let nullifier_lock = Arc::new(RwLock::new(nullifier_data)); + let mmr_data = load_mmr(&mut db).await?; + let mmr_lock = Arc::new(RwLock::new(mmr_data)); let db = api_server::ApiServer::new(StoreApi { db, - nullifier_tree: tree_lock, + nullifier_tree: nullifier_lock, + mmr: mmr_lock, }); info!(host = config.endpoint.host, port = config.endpoint.port, "Server initialized",); @@ -116,3 +158,40 @@ pub async fn serve( Ok(()) } + +// UTILITIES +// ================================================================================================ + +#[instrument(skip(db))] +async fn load_nullifier_tree(db: &mut Db) -> Result { + let nullifiers = db.get_nullifiers().await?; + let len = nullifiers.len(); + let leaves = nullifiers.into_iter().map(|(nullifier, block)| { + (nullifier, [Felt::new(block as u64), Felt::ZERO, Felt::ZERO, Felt::ZERO]) + }); + + let now = Instant::now(); + let nullifier_tree = TieredSmt::with_entries(leaves)?; + let elapsed = now.elapsed().as_secs(); + + info!(num_of_leaves = len, tree_construction = elapsed, "Loaded nullifier tree"); + Ok(nullifier_tree) +} + +#[instrument(skip(db))] +async fn load_mmr(db: &mut Db) -> Result { + let block_hashes: Result, ParseError> = db + .get_block_headers() + .await? + .into_iter() + .map(|b| b.try_into().map(|b: BlockHeader| b.hash())) + .collect(); + + let mmr: Mmr = block_hashes?.into(); + Ok(mmr) +} + +/// Formats an error +fn internal_error(err: E) -> Status { + Status::internal(format!("{:?}", err)) +} diff --git a/store/src/types.rs b/store/src/types.rs index 234e77581..44702b127 100644 --- a/store/src/types.rs +++ b/store/src/types.rs @@ -1 +1,2 @@ -pub type BlockNumber = u64; +pub type BlockNumber = u32; +pub type AccountId = u64;