diff --git a/.gitignore b/.gitignore index 162dbe853..20ed84153 100644 --- a/.gitignore +++ b/.gitignore @@ -62,6 +62,9 @@ tmp/ *.info *.profraw +# p2p identity file +/identity.json + # Running madara with make and docker compose .secrets image.tar.gz diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..30fea2b4e --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "crates/madara/primitives/proto/starknet-p2p-specs"] + path = crates/madara/primitives/proto/starknet-p2p-specs + url = https://github.com/starknet-io/starknet-p2p-specs diff --git a/Cargo.lock b/Cargo.lock index 50ce03c38..d8f76a4b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17,6 +17,16 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +[[package]] +name = "aead" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" +dependencies = [ + "crypto-common", + "generic-array", +] + [[package]] name = "aes" version = "0.8.4" @@ -28,6 +38,20 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "aes-gcm" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" +dependencies = [ + "aead", + "aes", + "cipher", + "ctr", + "ghash", + "subtle", +] + [[package]] name = "ahash" version = "0.8.11" @@ -844,6 +868,12 @@ dependencies = [ "rand", ] +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + [[package]] name = "arrayvec" version = "0.7.6" @@ -859,6 +889,45 @@ dependencies = [ "term", ] +[[package]] +name = "asn1-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5493c3bedbacf7fd7382c6346bbd66687d12bbaad3a89a2d2c303ee6cf20b048" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits 0.2.19", + "rusticata-macros", + "thiserror 1.0.65", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -916,11 +985,22 @@ checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" dependencies = [ "async-task", "concurrent-queue", - "fastrand", - "futures-lite", + "fastrand 2.1.1", + "futures-lite 2.3.0", "slab", ] +[[package]] +name = "async-fs" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" +dependencies = [ + "async-lock 3.4.0", + "blocking", + "futures-lite 2.3.0", +] + [[package]] name = "async-global-executor" version = "2.4.1" @@ -929,32 +1009,61 @@ checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ "async-channel 2.3.1", "async-executor", - "async-io", - "async-lock", + "async-io 2.3.4", + "async-lock 3.4.0", "blocking", - "futures-lite", + "futures-lite 2.3.0", "once_cell", ] +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + [[package]] name = "async-io" version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ - "async-lock", + "async-lock 3.4.0", "cfg-if", "concurrent-queue", "futures-io", - "futures-lite", + "futures-lite 2.3.0", "parking", - "polling", - "rustix", + "polling 3.7.3", + "rustix 0.38.38", "slab", "tracing", "windows-sys 0.59.0", ] +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + [[package]] name = "async-lock" version = "3.4.0" @@ -966,6 +1075,17 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io 2.3.4", + "blocking", + "futures-lite 2.3.0", +] + [[package]] name = "async-object-pool" version = "0.1.5" @@ -982,15 +1102,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ "async-channel 2.3.1", - "async-io", - "async-lock", + "async-io 2.3.4", + "async-lock 3.4.0", "async-signal", "async-task", "blocking", "cfg-if", "event-listener 5.3.1", - "futures-lite", - "rustix", + "futures-lite 2.3.0", + "rustix 0.38.38", "tracing", ] @@ -1000,13 +1120,13 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" dependencies = [ - "async-io", - "async-lock", + "async-io 2.3.4", + "async-lock 3.4.0", "atomic-waker", "cfg-if", "futures-core", "futures-io", - "rustix", + "rustix 0.38.38", "signal-hook-registry", "slab", "windows-sys 0.59.0", @@ -1021,14 +1141,14 @@ dependencies = [ "async-attributes", "async-channel 1.9.0", "async-global-executor", - "async-io", - "async-lock", + "async-io 2.3.4", + "async-lock 3.4.0", "async-process", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 2.3.0", "gloo-timers 0.3.0", "kv-log-macro", "log", @@ -1092,12 +1212,36 @@ dependencies = [ "tungstenite", ] +[[package]] +name = "asynchronous-codec" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a860072022177f903e59730004fb5dc13db9275b79bb2aef7ba8ce831956c233" +dependencies = [ + "bytes", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite", +] + [[package]] name = "atomic-waker" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" +[[package]] +name = "attohttpc" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" +dependencies = [ + "http 0.2.12", + "log", + "url", +] + [[package]] name = "atty" version = "0.2.14" @@ -1188,6 +1332,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "base-x" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" + [[package]] name = "base16ct" version = "0.2.0" @@ -1240,9 +1390,9 @@ dependencies = [ [[package]] name = "bigdecimal" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" +checksum = "7f31f3af01c5c65a07985c804d3366560e6fa7883d640a122819b14ec327482c" dependencies = [ "autocfg", "libm", @@ -1328,6 +1478,15 @@ dependencies = [ "wyz", ] +[[package]] +name = "blake2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "block-buffer" version = "0.9.0" @@ -1397,7 +1556,7 @@ dependencies = [ "async-channel 2.3.1", "async-task", "futures-io", - "futures-lite", + "futures-lite 2.3.0", "piper", ] @@ -1431,6 +1590,15 @@ dependencies = [ "thiserror 2.0.3", ] +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "tinyvec", +] + [[package]] name = "bstr" version = "1.10.0" @@ -3260,6 +3428,36 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chacha20" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + +[[package]] +name = "chacha20poly1305" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +dependencies = [ + "aead", + "chacha20", + "cipher", + "poly1305", + "zeroize", +] + [[package]] name = "chrono" version = "0.4.38" @@ -3283,6 +3481,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] @@ -3456,6 +3655,15 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "core2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +dependencies = [ + "memchr", +] + [[package]] name = "cpufeatures" version = "0.2.14" @@ -3524,6 +3732,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", + "rand_core", "typenum", ] @@ -3536,6 +3745,33 @@ dependencies = [ "cipher", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest 0.10.7", + "fiat-crypto", + "rustc_version 0.4.1", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "darling" version = "0.14.4" @@ -3626,6 +3862,26 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" +[[package]] +name = "data-encoding-macro" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" +dependencies = [ + "data-encoding", + "data-encoding-macro-internal", +] + +[[package]] +name = "data-encoding-macro-internal" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" +dependencies = [ + "data-encoding", + "syn 1.0.109", +] + [[package]] name = "der" version = "0.7.9" @@ -3636,6 +3892,20 @@ dependencies = [ "zeroize", ] +[[package]] +name = "der-parser" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cd0a5c643689626bec213c4d8bd4d96acc8ffdb4ad4bb6bc16abf27d5f4b553" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits 0.2.19", + "rusticata-macros", +] + [[package]] name = "deranged" version = "0.3.11" @@ -3657,6 +3927,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive-where" +version = "1.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "derive_more" version = "0.99.18" @@ -3748,6 +4029,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "dotenv" version = "0.15.0" @@ -3760,6 +4052,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dtoa" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" + [[package]] name = "dunce" version = "1.0.5" @@ -3786,6 +4084,31 @@ dependencies = [ "spki", ] +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8", + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core", + "serde", + "sha2", + "subtle", + "zeroize", +] + [[package]] name = "either" version = "1.13.0" @@ -3835,6 +4158,18 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "enum-as-inner" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "env_logger" version = "0.9.3" @@ -3940,6 +4275,15 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + [[package]] name = "fastrand" version = "2.1.1" @@ -3977,6 +4321,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "fixed-hash" version = "0.8.0" @@ -4069,12 +4419,22 @@ dependencies = [ ] [[package]] -name = "futures-channel" -version = "0.3.31" +name = "futures-bounded" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "91f328e7fb845fc832912fb6a34f40cf6d1888c92f974d1893a54e97b5ff542e" dependencies = [ - "futures-core", + "futures-timer", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", "futures-sink", ] @@ -4102,13 +4462,28 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-lite" version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand", + "fastrand 2.1.1", "futures-core", "futures-io", "parking", @@ -4126,6 +4501,17 @@ dependencies = [ "syn 2.0.89", ] +[[package]] +name = "futures-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" +dependencies = [ + "futures-io", + "rustls 0.23.16", + "rustls-pki-types", +] + [[package]] name = "futures-sink" version = "0.3.31" @@ -4138,6 +4524,17 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +[[package]] +name = "futures-ticker" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9763058047f713632a52e916cc7f6a4b3fc6e9fc1ff8c5b1dc49e5a89041682e" +dependencies = [ + "futures", + "futures-timer", + "instant", +] + [[package]] name = "futures-timer" version = "3.0.3" @@ -4218,6 +4615,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "ghash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" +dependencies = [ + "opaque-debug", + "polyval", +] + [[package]] name = "gimli" version = "0.31.1" @@ -4456,6 +4863,67 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "hex_fmt" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" + +[[package]] +name = "hickory-proto" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand", + "socket2 0.5.7", + "thiserror 1.0.65", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "hickory-resolver" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.3", + "rand", + "resolv-conf", + "smallvec", + "thiserror 1.0.65", + "tokio", + "tracing", +] + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + [[package]] name = "hmac" version = "0.12.1" @@ -4465,6 +4933,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "hostname" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +dependencies = [ + "libc", + "match_cfg", + "winapi", +] + [[package]] name = "http" version = "0.2.12" @@ -4590,7 +5069,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -4693,7 +5172,7 @@ dependencies = [ "http-body 1.0.1", "hyper 1.5.0", "pin-project-lite", - "socket2", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -4734,6 +5213,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" +[[package]] +name = "idna" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "idna" version = "0.5.0" @@ -4744,6 +5233,59 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "if-addrs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" +dependencies = [ + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "if-watch" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf9d64cfcf380606e64f9a0bcf493616b65331199f984151a6fa11a7b3cde38" +dependencies = [ + "async-io 2.3.4", + "core-foundation", + "fnv", + "futures", + "if-addrs", + "ipnet", + "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-proto", + "netlink-sys", + "rtnetlink", + "smol", + "system-configuration 0.6.1", + "tokio", + "windows", +] + +[[package]] +name = "igd-next" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064d90fec10d541084e7b39ead8875a5a80d9114a2b18791565253bae25f49e4" +dependencies = [ + "async-trait", + "attohttpc", + "bytes", + "futures", + "http 0.2.12", + "hyper 0.14.31", + "log", + "rand", + "tokio", + "url", + "xmltree", +] + [[package]] name = "ignore" version = "0.4.23" @@ -4838,6 +5380,16 @@ version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" +[[package]] +name = "informalsystems-malachitebft-core-types" +version = "0.0.1" +source = "git+https://github.com/informalsystems/malachite.git#1d50e2bdf92bf90c58dd39ee9ba2660255b8f6b8" +dependencies = [ + "bytes", + "derive-where", + "thiserror 2.0.3", +] + [[package]] name = "inout" version = "0.1.3" @@ -4856,6 +5408,29 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.48.0", +] + +[[package]] +name = "ipconfig" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" +dependencies = [ + "socket2 0.5.7", + "widestring", + "windows-sys 0.48.0", + "winreg", +] + [[package]] name = "ipnet" version = "2.10.1" @@ -5232,7 +5807,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin", + "spin 0.9.8", ] [[package]] @@ -5270,78 +5845,651 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00419de735aac21d53b0de5ce2c03bd3627277cf471300f27ebc89f7d828047" [[package]] -name = "libredox" -version = "0.1.3" +name = "libp2p" +version = "0.54.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "bbbe80f9c7e00526cd6b838075b9c171919404a4732cb2fa8ece0a093223bfc4" dependencies = [ - "bitflags 2.6.0", - "libc", + "bytes", + "either", + "futures", + "futures-timer", + "getrandom", + "libp2p-allow-block-list", + "libp2p-autonat", + "libp2p-connection-limits", + "libp2p-core", + "libp2p-dcutr", + "libp2p-dns", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-identity", + "libp2p-kad", + "libp2p-mdns", + "libp2p-metrics", + "libp2p-noise", + "libp2p-ping", + "libp2p-quic", + "libp2p-relay", + "libp2p-swarm", + "libp2p-tcp", + "libp2p-tls", + "libp2p-upnp", + "libp2p-yamux", + "multiaddr", + "pin-project", + "rw-stream-sink", + "thiserror 1.0.65", ] [[package]] -name = "librocksdb-sys" -version = "0.17.0+9.0.0" -source = "git+https://github.com/madara-alliance/rust-rocksdb?branch=read-options-set-raw-snapshot#75f13c78fdf970b0afd2f21f52caf3317341341c" +name = "libp2p-allow-block-list" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1027ccf8d70320ed77e984f273bc8ce952f623762cb9bf2d126df73caef8041" dependencies = [ - "bindgen", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "void", ] [[package]] -name = "libz-sys" -version = "1.1.20" +name = "libp2p-autonat" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "a083675f189803d0682a2726131628e808144911dad076858bfbe30b13065499" dependencies = [ - "cc", - "pkg-config", - "vcpkg", + "async-trait", + "asynchronous-codec", + "bytes", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-request-response", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand", + "rand_core", + "thiserror 1.0.65", + "tracing", + "void", + "web-time", ] [[package]] -name = "linux-raw-sys" -version = "0.4.14" +name = "libp2p-connection-limits" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "8d003540ee8baef0d254f7b6bfd79bac3ddf774662ca0abf69186d517ef82ad8" +dependencies = [ + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "void", +] [[package]] -name = "lock_api" -version = "0.4.12" +name = "libp2p-core" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "a61f26c83ed111104cd820fe9bc3aaabbac5f1652a1d213ed6e900b7918a1298" dependencies = [ - "autocfg", - "scopeguard", + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-identity", + "multiaddr", + "multihash", + "multistream-select", + "once_cell", + "parking_lot 0.12.3", + "pin-project", + "quick-protobuf", + "rand", + "rw-stream-sink", + "smallvec", + "thiserror 1.0.65", + "tracing", + "unsigned-varint 0.8.0", + "void", + "web-time", ] [[package]] -name = "log" -version = "0.4.22" +name = "libp2p-dcutr" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "3236a2e24cbcf2d05b398b003ed920e1e8cedede13784d90fa3961b109647ce0" dependencies = [ - "value-bag", + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "lru", + "quick-protobuf", + "quick-protobuf-codec", + "thiserror 1.0.65", + "tracing", + "void", + "web-time", ] [[package]] -name = "lru" -version = "0.12.5" +name = "libp2p-dns" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +checksum = "97f37f30d5c7275db282ecd86e54f29dd2176bd3ac656f06abf43bedb21eb8bd" dependencies = [ - "hashbrown 0.15.0", + "async-trait", + "futures", + "hickory-resolver", + "libp2p-core", + "libp2p-identity", + "parking_lot 0.12.3", + "smallvec", + "tracing", ] [[package]] -name = "lz4-sys" -version = "1.11.1+lz4-1.10.0" +name = "libp2p-gossipsub" +version = "0.47.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4e830fdf24ac8c444c12415903174d506e1e077fbe3875c404a78c5935a8543" +dependencies = [ + "asynchronous-codec", + "base64 0.22.1", + "byteorder", + "bytes", + "either", + "fnv", + "futures", + "futures-ticker", + "getrandom", + "hex_fmt", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "prometheus-client", + "quick-protobuf", + "quick-protobuf-codec", + "rand", + "regex", + "sha2", + "smallvec", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-identify" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1711b004a273be4f30202778856368683bd9a83c4c7dcc8f848847606831a4e3" +dependencies = [ + "asynchronous-codec", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "lru", + "quick-protobuf", + "quick-protobuf-codec", + "smallvec", + "thiserror 1.0.65", + "tracing", + "void", +] + +[[package]] +name = "libp2p-identity" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "257b5621d159b32282eac446bed6670c39c7dc68a200a992d8f056afa0066f6d" +dependencies = [ + "bs58", + "ed25519-dalek", + "hkdf", + "multihash", + "quick-protobuf", + "rand", + "sha2", + "thiserror 1.0.65", + "tracing", + "zeroize", +] + +[[package]] +name = "libp2p-kad" +version = "0.46.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced237d0bd84bbebb7c2cad4c073160dacb4fe40534963c32ed6d4c6bb7702a3" +dependencies = [ + "arrayvec", + "asynchronous-codec", + "bytes", + "either", + "fnv", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand", + "sha2", + "smallvec", + "thiserror 1.0.65", + "tracing", + "uint", + "void", + "web-time", +] + +[[package]] +name = "libp2p-mdns" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b8546b6644032565eb29046b42744aee1e9f261ed99671b2c93fb140dba417" +dependencies = [ + "data-encoding", + "futures", + "hickory-proto", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand", + "smallvec", + "socket2 0.5.7", + "tokio", + "tracing", + "void", +] + +[[package]] +name = "libp2p-metrics" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ebafa94a717c8442d8db8d3ae5d1c6a15e30f2d347e0cd31d057ca72e42566" +dependencies = [ + "futures", + "libp2p-core", + "libp2p-dcutr", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-identity", + "libp2p-kad", + "libp2p-ping", + "libp2p-relay", + "libp2p-swarm", + "pin-project", + "prometheus-client", + "web-time", +] + +[[package]] +name = "libp2p-noise" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36b137cb1ae86ee39f8e5d6245a296518912014eaa87427d24e6ff58cfc1b28c" +dependencies = [ + "asynchronous-codec", + "bytes", + "curve25519-dalek", + "futures", + "libp2p-core", + "libp2p-identity", + "multiaddr", + "multihash", + "once_cell", + "quick-protobuf", + "rand", + "sha2", + "snow", + "static_assertions", + "thiserror 1.0.65", + "tracing", + "x25519-dalek", + "zeroize", +] + +[[package]] +name = "libp2p-ping" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "005a34420359223b974ee344457095f027e51346e992d1e0dcd35173f4cdd422" +dependencies = [ + "either", + "futures", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-plaintext" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63d926c6be56a2489e0e7316b17fe95a70bc5c4f3e85740bb3e67c0f3c6a44" +dependencies = [ + "asynchronous-codec", + "bytes", + "futures", + "libp2p-core", + "libp2p-identity", + "quick-protobuf", + "quick-protobuf-codec", + "tracing", +] + +[[package]] +name = "libp2p-quic" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46352ac5cd040c70e88e7ff8257a2ae2f891a4076abad2c439584a31c15fd24e" +dependencies = [ + "bytes", + "futures", + "futures-timer", + "if-watch", + "libp2p-core", + "libp2p-identity", + "libp2p-tls", + "parking_lot 0.12.3", + "quinn", + "rand", + "ring 0.17.8", + "rustls 0.23.16", + "socket2 0.5.7", + "thiserror 1.0.65", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-relay" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10df23d7f5b5adcc129f4a69d6fbd05209e356ccf9e8f4eb10b2692b79c77247" +dependencies = [ + "asynchronous-codec", + "bytes", + "either", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "quick-protobuf", + "quick-protobuf-codec", + "rand", + "static_assertions", + "thiserror 1.0.65", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-request-response" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1356c9e376a94a75ae830c42cdaea3d4fe1290ba409a22c809033d1b7dcab0a6" +dependencies = [ + "async-trait", + "futures", + "futures-bounded", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm", + "rand", + "smallvec", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-swarm" +version = "0.45.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7dd6741793d2c1fb2088f67f82cf07261f25272ebe3c0b0c311e0c6b50e851a" +dependencies = [ + "async-std", + "either", + "fnv", + "futures", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-swarm-derive", + "lru", + "multistream-select", + "once_cell", + "rand", + "smallvec", + "tokio", + "tracing", + "void", + "web-time", +] + +[[package]] +name = "libp2p-swarm-derive" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "libp2p-swarm-test" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea4e1d1d92421dc4c90cad42e3cd24f50fd210191c9f126d41bd483a09567f67" +dependencies = [ + "async-trait", + "futures", + "futures-timer", + "libp2p-core", + "libp2p-identity", + "libp2p-plaintext", + "libp2p-swarm", + "libp2p-tcp", + "libp2p-yamux", + "rand", + "tracing", +] + +[[package]] +name = "libp2p-tcp" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad964f312c59dcfcac840acd8c555de8403e295d39edf96f5240048b5fcaa314" +dependencies = [ + "async-io 2.3.4", + "futures", + "futures-timer", + "if-watch", + "libc", + "libp2p-core", + "libp2p-identity", + "socket2 0.5.7", + "tokio", + "tracing", +] + +[[package]] +name = "libp2p-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b23dddc2b9c355f73c1e36eb0c3ae86f7dc964a3715f0731cfad352db4d847" +dependencies = [ + "futures", + "futures-rustls", + "libp2p-core", + "libp2p-identity", + "rcgen", + "ring 0.17.8", + "rustls 0.23.16", + "rustls-webpki 0.101.7", + "thiserror 1.0.65", + "x509-parser", + "yasna", +] + +[[package]] +name = "libp2p-upnp" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01bf2d1b772bd3abca049214a3304615e6a36fa6ffc742bdd1ba774486200b8f" +dependencies = [ + "futures", + "futures-timer", + "igd-next", + "libp2p-core", + "libp2p-swarm", + "tokio", + "tracing", + "void", +] + +[[package]] +name = "libp2p-yamux" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "788b61c80789dba9760d8c669a5bedb642c8267555c803fabd8396e4ca5c5882" +dependencies = [ + "either", + "futures", + "libp2p-core", + "thiserror 1.0.65", + "tracing", + "yamux 0.12.1", + "yamux 0.13.4", +] + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] + +[[package]] +name = "librocksdb-sys" +version = "0.17.0+9.0.0" +source = "git+https://github.com/madara-alliance/rust-rocksdb?branch=read-options-set-raw-snapshot#75f13c78fdf970b0afd2f21f52caf3317341341c" +dependencies = [ + "bindgen", + "bzip2-sys", + "cc", + "glob", + "libc", + "libz-sys", + "lz4-sys", + "zstd-sys", +] + +[[package]] +name = "libz-sys" +version = "1.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +dependencies = [ + "value-bag", +] + +[[package]] +name = "lru" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +dependencies = [ + "hashbrown 0.15.0", +] + +[[package]] +name = "lru-cache" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" +dependencies = [ + "linked-hash-map", +] + +[[package]] +name = "lz4-sys" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ @@ -5379,7 +6527,6 @@ dependencies = [ "hyper 0.14.31", "jsonrpsee", "mc-analytics", - "mc-block-import", "mc-block-production", "mc-db", "mc-devnet", @@ -5387,13 +6534,15 @@ dependencies = [ "mc-gateway-client", "mc-gateway-server", "mc-mempool", + "mc-p2p", "mc-rpc", - "mc-sync", + "mc-sync2", "mc-telemetry", "mp-block", "mp-chain-config", "mp-oracle", "mp-utils", + "multiaddr", "opentelemetry", "opentelemetry-appender-tracing", "opentelemetry-otlp", @@ -5418,6 +6567,23 @@ dependencies = [ "url", ] +[[package]] +name = "malachite" +version = "0.7.0" +dependencies = [ + "informalsystems-malachitebft-core-types", + "mp-block", + "mp-proto", + "mp-transactions", + "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", +] + +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matchers" version = "0.1.0" @@ -5465,44 +6631,6 @@ dependencies = [ "url", ] -[[package]] -name = "mc-block-import" -version = "0.7.0" -dependencies = [ - "anyhow", - "bitvec", - "bonsai-trie", - "itertools 0.13.0", - "mc-analytics", - "mc-db", - "mp-block", - "mp-chain-config", - "mp-class", - "mp-convert", - "mp-receipt", - "mp-state-update", - "mp-transactions", - "num-traits 0.2.19", - "opentelemetry", - "opentelemetry-appender-tracing", - "opentelemetry-otlp", - "opentelemetry-semantic-conventions", - "opentelemetry-stdout", - "opentelemetry_sdk", - "rayon", - "rstest 0.18.2", - "serde", - "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", - "starknet_api", - "tempfile", - "thiserror 2.0.3", - "tokio", - "tracing", - "tracing-core", - "tracing-opentelemetry", - "tracing-subscriber", -] - [[package]] name = "mc-block-production" version = "0.7.0" @@ -5513,7 +6641,6 @@ dependencies = [ "blockifier", "lazy_static", "mc-analytics", - "mc-block-import", "mc-db", "mc-exec", "mc-mempool", @@ -5554,8 +6681,10 @@ version = "0.7.0" dependencies = [ "anyhow", "bincode 1.3.3", + "bitvec", "blockifier", "bonsai-trie", + "futures", "lazy_static", "librocksdb-sys", "mc-analytics", @@ -5574,6 +6703,7 @@ dependencies = [ "opentelemetry_sdk", "rayon", "rocksdb", + "rstest 0.18.2", "serde", "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", "starknet-types-rpc", @@ -5595,7 +6725,6 @@ dependencies = [ "assert_matches", "blockifier", "m-cairo-test-contracts", - "mc-block-import", "mc-block-production", "mc-db", "mc-mempool", @@ -5789,7 +6918,6 @@ dependencies = [ "blockifier", "lazy_static", "mc-analytics", - "mc-block-import", "mc-db", "mc-exec", "mockall", @@ -5828,6 +6956,43 @@ dependencies = [ "tracing-test", ] +[[package]] +name = "mc-p2p" +version = "0.7.0" +dependencies = [ + "anyhow", + "async-trait", + "base64 0.22.1", + "bytes", + "futures", + "futures-bounded", + "libp2p", + "mc-db", + "mc-rpc", + "mp-block", + "mp-chain-config", + "mp-class", + "mp-convert", + "mp-proto", + "mp-receipt", + "mp-state-update", + "mp-transactions", + "mp-utils", + "p2p_stream", + "prost", + "prost-build", + "serde", + "serde_json", + "starknet-core", + "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", + "thiserror 2.0.3", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", + "unsigned-varint 0.8.0", +] + [[package]] name = "mc-rpc" version = "0.7.0" @@ -5862,41 +7027,50 @@ dependencies = [ ] [[package]] -name = "mc-sync" +name = "mc-sync2" version = "0.7.0" dependencies = [ "anyhow", + "async-trait", + "bitvec", + "bonsai-trie", "futures", "httpmock", "hyper 1.5.0", - "jsonrpsee", "m-cairo-test-contracts", "mc-analytics", - "mc-block-import", "mc-db", + "mc-eth", "mc-gateway-client", - "mc-rpc", + "mc-p2p", "mc-telemetry", "mp-block", "mp-chain-config", "mp-class", + "mp-convert", "mp-gateway", + "mp-receipt", + "mp-state-update", + "mp-transactions", "mp-utils", + "num-traits 0.2.19", "opentelemetry", "opentelemetry-appender-tracing", "opentelemetry-otlp", "opentelemetry-semantic-conventions", "opentelemetry-stdout", "opentelemetry_sdk", + "rand", + "rayon", "regex", "rstest 0.18.2", "serde_json", + "starknet-core", "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", "starknet_api", "tempfile", "thiserror 2.0.3", "tokio", - "tokio-util", "tracing", "tracing-core", "tracing-opentelemetry", @@ -5999,9 +7173,14 @@ dependencies = [ name = "mp-block" version = "0.7.0" dependencies = [ + "bitvec", "blockifier", + "bonsai-trie", + "informalsystems-malachitebft-core-types", "mp-chain-config", + "mp-class", "mp-receipt", + "mp-state-update", "mp-transactions", "opentelemetry", "opentelemetry-appender-tracing", @@ -6010,6 +7189,7 @@ dependencies = [ "opentelemetry-stdout", "opentelemetry_sdk", "primitive-types", + "rayon", "serde", "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", "starknet-types-rpc", @@ -6028,6 +7208,7 @@ dependencies = [ "blockifier", "lazy_static", "mp-utils", + "multiaddr", "primitive-types", "rstest 0.18.2", "serde", @@ -6092,7 +7273,6 @@ dependencies = [ "base64 0.22.1", "http 1.1.0", "hyper 1.5.0", - "mc-block-import", "mp-block", "mp-chain-config", "mp-class", @@ -6123,6 +7303,39 @@ dependencies = [ "serde", ] +[[package]] +name = "mp-proto" +version = "0.7.0" +dependencies = [ + "anyhow", + "assert_matches", + "base64 0.22.1", + "bytes", + "m-proc-macros", + "mc-db", + "mp-block", + "mp-class", + "mp-convert", + "mp-receipt", + "mp-state-update", + "mp-transactions", + "mp-utils", + "proptest", + "proptest-derive", + "proptest-state-machine", + "prost", + "prost-build", + "rand", + "rstest 0.18.2", + "serde", + "serde_json", + "starknet-core", + "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", + "thiserror 2.0.3", + "tracing", + "unsigned-varint 0.8.0", +] + [[package]] name = "mp-receipt" version = "0.7.0" @@ -6130,9 +7343,12 @@ dependencies = [ "bincode 1.3.3", "blockifier", "cairo-vm", + "mp-chain-config", + "mp-convert", "primitive-types", "rstest 0.18.2", "serde", + "serde_with", "starknet-core", "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", "starknet-types-rpc", @@ -6200,7 +7416,7 @@ dependencies = [ "serde", "serde_yaml", "starknet-core", - "starknet-crypto 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "starknet-crypto 0.7.2", "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", "tokio", "tokio-util", @@ -6211,6 +7427,66 @@ dependencies = [ "url", ] +[[package]] +name = "multiaddr" +version = "0.18.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6351f60b488e04c1d21bc69e56b89cb3f5e8f5d22557d6e8031bdfd79b6961" +dependencies = [ + "arrayref", + "byteorder", + "data-encoding", + "libp2p-identity", + "multibase", + "multihash", + "percent-encoding", + "serde", + "static_assertions", + "unsigned-varint 0.8.0", + "url", +] + +[[package]] +name = "multibase" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +dependencies = [ + "base-x", + "data-encoding", + "data-encoding-macro", +] + +[[package]] +name = "multihash" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +dependencies = [ + "core2", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "multimap" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" + +[[package]] +name = "multistream-select" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea0df8e5eec2298a62b326ee4f0d7fe1a6b90a09dfcf9df37b38f947a8c42f19" +dependencies = [ + "bytes", + "futures", + "log", + "pin-project", + "smallvec", + "unsigned-varint 0.7.2", +] + [[package]] name = "native-tls" version = "0.2.12" @@ -6241,12 +7517,95 @@ dependencies = [ "rawpointer", ] +[[package]] +name = "netlink-packet-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72724faf704479d67b388da142b186f916188505e7e0b26719019c525882eda4" +dependencies = [ + "anyhow", + "byteorder", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-route" +version = "0.17.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053998cea5a306971f88580d0829e90f270f940befd7cf928da179d4187a5a66" +dependencies = [ + "anyhow", + "bitflags 1.3.2", + "byteorder", + "libc", + "netlink-packet-core", + "netlink-packet-utils", +] + +[[package]] +name = "netlink-packet-utils" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ede8a08c71ad5a95cdd0e4e52facd37190977039a4704eb82a283f713747d34" +dependencies = [ + "anyhow", + "byteorder", + "paste", + "thiserror 1.0.65", +] + +[[package]] +name = "netlink-proto" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b33524dc0968bfad349684447bfce6db937a9ac3332a1fe60c0c5a5ce63f21" +dependencies = [ + "bytes", + "futures", + "log", + "netlink-packet-core", + "netlink-sys", + "thiserror 1.0.65", + "tokio", +] + +[[package]] +name = "netlink-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" +dependencies = [ + "async-io 1.13.0", + "bytes", + "futures", + "libc", + "log", + "tokio", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", +] + +[[package]] +name = "nohash-hasher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" + [[package]] name = "nom" version = "7.1.3" @@ -6419,6 +7778,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d8034d9489cdaf79228eb9f6a3b8d7bb32ba00d6645ebd48eef4077ceb5bd9" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.20.2" @@ -6596,6 +7964,23 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p2p_stream" +version = "0.7.0" +dependencies = [ + "anyhow", + "async-trait", + "futures", + "futures-bounded", + "libp2p", + "libp2p-plaintext", + "libp2p-swarm-test", + "rstest 0.18.2", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "parity-scale-codec" version = "3.6.12" @@ -6717,6 +8102,16 @@ dependencies = [ "sha2", ] +[[package]] +name = "pem" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +dependencies = [ + "base64 0.22.1", + "serde", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -6840,7 +8235,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" dependencies = [ "atomic-waker", - "fastrand", + "fastrand 2.1.1", "futures-io", ] @@ -6860,6 +8255,22 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + [[package]] name = "polling" version = "3.7.3" @@ -6870,11 +8281,34 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix", + "rustix 0.38.38", "tracing", "windows-sys 0.59.0", ] +[[package]] +name = "poly1305" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" +dependencies = [ + "cpufeatures", + "opaque-debug", + "universal-hash", +] + +[[package]] +name = "polyval" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug", + "universal-hash", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -6932,6 +8366,16 @@ dependencies = [ "yansi", ] +[[package]] +name = "prettyplease" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" +dependencies = [ + "proc-macro2", + "syn 2.0.89", +] + [[package]] name = "primitive-types" version = "0.12.2" @@ -6978,13 +8422,36 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307e3004becf10f5a6e0d59d20f3cd28231b0e0827a96cd3e0ce6d14bc1e4bb3" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] +[[package]] +name = "prometheus-client" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +dependencies = [ + "dtoa", + "itoa", + "parking_lot 0.12.3", + "prometheus-client-derive-encode", +] + +[[package]] +name = "prometheus-client-derive-encode" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "proptest" version = "1.5.0" @@ -7036,23 +8503,128 @@ dependencies = [ ] [[package]] -name = "prost-derive" -version = "0.13.3" +name = "prost-build" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c1318b19085f08681016926435853bbf7858f9c082d0999b80550ff5d9abe15" +dependencies = [ + "bytes", + "heck 0.5.0", + "itertools 0.13.0", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn 2.0.89", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +dependencies = [ + "anyhow", + "itertools 0.13.0", + "proc-macro2", + "quote", + "syn 2.0.89", +] + +[[package]] +name = "prost-types" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4759aa0d3a6232fb8dbdb97b61de2c20047c68aca932c7ed76da9d788508d670" +dependencies = [ + "prost", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quick-protobuf" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +dependencies = [ + "byteorder", +] + +[[package]] +name = "quick-protobuf-codec" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15a0580ab32b169745d7a39db2ba969226ca16738931be152a3209b409de2474" +dependencies = [ + "asynchronous-codec", + "bytes", + "quick-protobuf", + "thiserror 1.0.65", + "unsigned-varint 0.8.0", +] + +[[package]] +name = "quinn" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +dependencies = [ + "bytes", + "futures-io", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash 2.0.0", + "rustls 0.23.16", + "socket2 0.5.7", + "thiserror 2.0.3", + "tokio", + "tracing", +] + +[[package]] +name = "quinn-proto" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" +checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ - "anyhow", - "itertools 0.13.0", - "proc-macro2", - "quote", - "syn 2.0.89", + "bytes", + "getrandom", + "rand", + "ring 0.17.8", + "rustc-hash 2.0.0", + "rustls 0.23.16", + "rustls-pki-types", + "slab", + "thiserror 2.0.3", + "tinyvec", + "tracing", + "web-time", ] [[package]] -name = "quick-error" -version = "1.2.3" +name = "quinn-udp" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.5.7", + "tracing", + "windows-sys 0.59.0", +] [[package]] name = "quote" @@ -7135,6 +8707,18 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rcgen" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" +dependencies = [ + "pem", + "ring 0.16.20", + "time", + "yasna", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -7316,6 +8900,16 @@ dependencies = [ "web-sys", ] +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", +] + [[package]] name = "rfc6979" version = "0.4.0" @@ -7326,6 +8920,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted 0.7.1", + "web-sys", + "winapi", +] + [[package]] name = "ring" version = "0.17.8" @@ -7336,8 +8945,8 @@ dependencies = [ "cfg-if", "getrandom", "libc", - "spin", - "untrusted", + "spin 0.9.8", + "untrusted 0.9.0", "windows-sys 0.52.0", ] @@ -7421,6 +9030,25 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "rtnetlink" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a552eb82d19f38c3beed3f786bd23aa434ceb9ac43ab44419ca6d67a7e186c0" +dependencies = [ + "async-global-executor", + "futures", + "log", + "netlink-packet-core", + "netlink-packet-route", + "netlink-packet-utils", + "netlink-proto", + "netlink-sys", + "nix", + "thiserror 1.0.65", + "tokio", +] + [[package]] name = "ruint" version = "1.12.3" @@ -7532,6 +9160,29 @@ dependencies = [ "semver 1.0.23", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + +[[package]] +name = "rustix" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + [[package]] name = "rustix" version = "0.38.38" @@ -7541,7 +9192,7 @@ dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.14", "windows-sys 0.52.0", ] @@ -7552,7 +9203,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring", + "ring 0.17.8", "rustls-webpki 0.101.7", "sct", ] @@ -7564,7 +9215,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" dependencies = [ "log", - "ring", + "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -7578,6 +9229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "once_cell", + "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.8", "subtle", @@ -7632,6 +9284,9 @@ name = "rustls-pki-types" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +dependencies = [ + "web-time", +] [[package]] name = "rustls-webpki" @@ -7639,8 +9294,8 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -7649,9 +9304,9 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "ring", + "ring 0.17.8", "rustls-pki-types", - "untrusted", + "untrusted 0.9.0", ] [[package]] @@ -7672,6 +9327,17 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "rw-stream-sink" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8c9026ff5d2f23da5e45bbc283f156383001bfb09c4e44256d02c1a685fe9a1" +dependencies = [ + "futures", + "pin-project", + "static_assertions", +] + [[package]] name = "ryu" version = "1.0.18" @@ -7783,8 +9449,8 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -8113,6 +9779,23 @@ dependencies = [ "serde", ] +[[package]] +name = "smol" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-fs", + "async-io 2.3.4", + "async-lock 3.4.0", + "async-net", + "async-process", + "blocking", + "futures-lite 2.3.0", +] + [[package]] name = "smol_str" version = "0.1.24" @@ -8131,6 +9814,33 @@ dependencies = [ "serde", ] +[[package]] +name = "snow" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" +dependencies = [ + "aes-gcm", + "blake2", + "chacha20poly1305", + "curve25519-dalek", + "rand_core", + "ring 0.17.8", + "rustc_version 0.4.1", + "sha2", + "subtle", +] + +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "socket2" version = "0.5.7" @@ -8157,6 +9867,12 @@ dependencies = [ "sha-1", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + [[package]] name = "spin" version = "0.9.8" @@ -8199,7 +9915,7 @@ dependencies = [ "starknet-accounts", "starknet-contract", "starknet-core", - "starknet-crypto 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "starknet-crypto 0.7.2", "starknet-macros", "starknet-providers", "starknet-signers", @@ -8214,7 +9930,7 @@ dependencies = [ "async-trait", "auto_impl", "starknet-core", - "starknet-crypto 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "starknet-crypto 0.7.2", "starknet-providers", "starknet-signers", "thiserror 1.0.65", @@ -8238,7 +9954,8 @@ dependencies = [ [[package]] name = "starknet-core" version = "0.12.0" -source = "git+https://github.com/kasarlabs/starknet-rs.git?branch=fork#70e1ee45dc701afc2a7629bf88bb5d90a93d51a7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538240cbe6663c673fe77465f294da707080f39678dd7066761554899e46100" dependencies = [ "base64 0.21.7", "crypto-bigint", @@ -8249,7 +9966,7 @@ dependencies = [ "serde_json_pythonic", "serde_with", "sha3", - "starknet-crypto 0.7.2 (git+https://github.com/kasarlabs/starknet-rs.git?branch=fork)", + "starknet-crypto 0.7.2", "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", ] @@ -8307,25 +10024,7 @@ dependencies = [ "num-traits 0.2.19", "rfc6979", "sha2", - "starknet-curve 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", - "zeroize", -] - -[[package]] -name = "starknet-crypto" -version = "0.7.2" -source = "git+https://github.com/kasarlabs/starknet-rs.git?branch=fork#70e1ee45dc701afc2a7629bf88bb5d90a93d51a7" -dependencies = [ - "crypto-bigint", - "hex", - "hmac", - "num-bigint", - "num-integer", - "num-traits 0.2.19", - "rfc6979", - "sha2", - "starknet-curve 0.5.1 (git+https://github.com/kasarlabs/starknet-rs.git?branch=fork)", + "starknet-curve 0.5.1", "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", "zeroize", ] @@ -8368,14 +10067,6 @@ dependencies = [ "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", ] -[[package]] -name = "starknet-curve" -version = "0.5.1" -source = "git+https://github.com/kasarlabs/starknet-rs.git?branch=fork#70e1ee45dc701afc2a7629bf88bb5d90a93d51a7" -dependencies = [ - "starknet-types-core 0.1.7 (git+https://github.com/kasarlabs/types-rs.git?branch=feat-deserialize-v0.1.7)", -] - [[package]] name = "starknet-ff" version = "0.3.7" @@ -8432,7 +10123,7 @@ dependencies = [ "getrandom", "rand", "starknet-core", - "starknet-crypto 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "starknet-crypto 0.7.2", "thiserror 1.0.65", ] @@ -8644,6 +10335,17 @@ dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.89", +] + [[package]] name = "sysinfo" version = "0.30.13" @@ -8714,9 +10416,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", - "fastrand", + "fastrand 2.1.1", "once_cell", - "rustix", + "rustix 0.38.38", "windows-sys 0.59.0", ] @@ -8896,7 +10598,7 @@ dependencies = [ "parking_lot 0.12.3", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -9044,7 +10746,7 @@ dependencies = [ "percent-encoding", "pin-project", "prost", - "socket2", + "socket2 0.5.7", "tokio", "tokio-stream", "tower 0.4.13", @@ -9331,12 +11033,44 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +[[package]] +name = "universal-hash" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" +dependencies = [ + "crypto-common", + "subtle", +] + [[package]] name = "unsafe-libyaml" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "unsigned-varint" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" + +[[package]] +name = "unsigned-varint" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb066959b24b5196ae73cb057f45598450d2c5f71460e98c49b738086eff9c06" +dependencies = [ + "futures-io", + "futures-util", +] + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + [[package]] name = "untrusted" version = "0.9.0" @@ -9350,7 +11084,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna", + "idna 0.5.0", "percent-encoding", "serde", ] @@ -9401,6 +11135,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + [[package]] name = "wait-timeout" version = "0.2.0" @@ -9410,6 +11150,12 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" + [[package]] name = "walkdir" version = "2.5.0" @@ -9537,6 +11283,12 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "widestring" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" + [[package]] name = "winapi" version = "0.3.9" @@ -9793,6 +11545,50 @@ dependencies = [ "tap", ] +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core", + "serde", + "zeroize", +] + +[[package]] +name = "x509-parser" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbc162f30700d6f3f82a24bf7cc62ffe7caea42c0b2cba8bf7f3ae50cf51f69" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror 1.0.65", + "time", +] + +[[package]] +name = "xml-rs" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" + +[[package]] +name = "xmltree" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7d8a75eaf6557bb84a65ace8609883db44a29951042ada9b393151532e41fcb" +dependencies = [ + "xml-rs", +] + [[package]] name = "xshell" version = "0.2.6" @@ -9808,12 +11604,52 @@ version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d422e8e38ec76e2f06ee439ccc765e9c6a9638b9e7c9f2e8255e4d41e8bd852" +[[package]] +name = "yamux" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed0164ae619f2dc144909a9f082187ebb5893693d8c0196e8085283ccd4b776" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot 0.12.3", + "pin-project", + "rand", + "static_assertions", +] + +[[package]] +name = "yamux" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" +dependencies = [ + "futures", + "log", + "nohash-hasher", + "parking_lot 0.12.3", + "pin-project", + "rand", + "static_assertions", + "web-time", +] + [[package]] name = "yansi" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + [[package]] name = "zerocopy" version = "0.7.35" diff --git a/Cargo.toml b/Cargo.toml index 9a8c743ff..de179fb55 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ members = [ # madara "crates/madara/client/db", "crates/madara/client/exec", - "crates/madara/client/sync", + "crates/madara/client/sync2", "crates/madara/client/eth", "crates/madara/client/rpc", "crates/madara/client/gateway/client", @@ -12,7 +12,9 @@ members = [ "crates/madara/client/telemetry", "crates/madara/client/devnet", "crates/madara/client/mempool", - "crates/madara/client/block_import", + "crates/madara/client/block_production", + "crates/madara/client/p2p", + "crates/madara/client/p2p_stream", "crates/madara/node", "crates/madara/primitives/block", "crates/madara/primitives/convert", @@ -26,24 +28,26 @@ members = [ "crates/madara/proc-macros", "crates/madara/tests", "crates/madara/cairo-test-contracts", - "crates/madara/client/block_production", + "crates/madara/primitives/malachite", + "crates/madara/primitives/proto", ] resolver = "2" # Everything except test-related packages, so that they are not compiled when doing `cargo build`. default-members = [ - # madara "crates/madara/client/db", "crates/madara/client/exec", - "crates/madara/client/sync", + "crates/madara/client/sync2", "crates/madara/client/eth", + "crates/madara/client/rpc", "crates/madara/client/gateway/client", "crates/madara/client/gateway/server", - "crates/madara/client/rpc", + "crates/madara/client/analytics", "crates/madara/client/telemetry", "crates/madara/client/devnet", "crates/madara/client/mempool", - "crates/madara/client/block_import", - "crates/madara/client/analytics", + "crates/madara/client/block_production", + "crates/madara/client/p2p", + "crates/madara/client/p2p_stream", "crates/madara/node", "crates/madara/primitives/block", "crates/madara/primitives/convert", @@ -119,6 +123,7 @@ mp-state-update = { path = "crates/madara/primitives/state_update", default-feat mp-utils = { path = "crates/madara/primitives/utils", default-features = false } mp-chain-config = { path = "crates/madara/primitives/chain_config", default-features = false } mp-oracle = { path = "crates/madara/primitives/oracle", default-features = false } +mp-proto = { path = "crates/madara/primitives/proto", default-features = false } # Madara client mc-analytics = { path = "crates/madara/client/analytics" } @@ -128,15 +133,16 @@ mc-exec = { path = "crates/madara/client/exec" } mc-rpc = { path = "crates/madara/client/rpc" } mc-gateway-client = { path = "crates/madara/client/gateway/client" } mc-gateway-server = { path = "crates/madara/client/gateway/server" } -mc-sync = { path = "crates/madara/client/sync" } +mc-sync2 = { path = "crates/madara/client/sync2" } +mc-p2p = { path = "crates/madara/client/p2p" } mc-eth = { path = "crates/madara/client/eth" } mc-mempool = { path = "crates/madara/client/mempool" } mc-block-production = { path = "crates/madara/client/block_production" } -mc-block-import = { path = "crates/madara/client/block_import" } mc-devnet = { path = "crates/madara/client/devnet" } # Madara misc m-cairo-test-contracts = { path = "crates/madara/cairo-test-contracts" } +p2p_stream = { path = "crates/madara/client/p2p_stream" } # Starknet dependencies cairo-vm = "=1.0.1" @@ -184,6 +190,10 @@ tracing-subscriber = { version = "0.3.18", features = [ ] } tracing-test = "0.2.5" tracing-opentelemetry = "0.26.0" +prost = "0.13.3" +prost-build = "0.13.3" +unsigned-varint = { version = "0.8.0", features = ["futures"] } +tokio-stream = "0.1" # Networking jsonrpsee = { version = "0.22", default-features = false, features = [ @@ -201,6 +211,27 @@ http-body-util = "0.1.2" ip_network = "0.4" reqwest = { version = "0.12", features = ["blocking", "json"] } url = { version = "2.4", features = ["serde"] } +libp2p-plaintext = "0.42.0" +libp2p-swarm-test = "0.4.0" +libp2p = { version = "0.54.1", features = [ + "tokio", + # "quic", + "tcp", + "tls", + "noise", + "yamux", + "ping", + "kad", + "gossipsub", + "autonat", + "dcutr", + "relay", + "kad", + "identify", + "macros", +] } +multiaddr = "0.18" +futures-bounded = "0.2.1" # Async tokio = { version = "1.34", features = ["signal", "rt"] } @@ -266,6 +297,12 @@ flate2 = "1.0" regex = "1.10.5" sha3 = "0.10" +# Starknet consensus + +[workspace.dependencies.malachite-core-types] +git = "https://github.com/informalsystems/malachite.git" +package = "informalsystems-malachitebft-core-types" + [patch.crates-io] rocksdb = { git = "https://github.com/madara-alliance/rust-rocksdb", branch = "read-options-set-raw-snapshot" } librocksdb-sys = { git = "https://github.com/madara-alliance/rust-rocksdb", branch = "read-options-set-raw-snapshot" } @@ -273,4 +310,4 @@ librocksdb-sys = { git = "https://github.com/madara-alliance/rust-rocksdb", bran starknet-types-core = { git = "https://github.com/kasarlabs/types-rs.git", branch = "feat-deserialize-v0.1.7" } starknet-types-rpc = { git = "https://github.com/jbcaron/types-rs.git", branch = "fork" } -starknet-core = { git = "https://github.com/kasarlabs/starknet-rs.git", branch = "fork" } +# starknet-core = { git = "https://github.com/kasarlabs/starknet-rs.git", branch = "fork" } diff --git a/crates/madara/client/block_import/src/pre_validate.rs b/crates/madara/client/block_import/src/pre_validate.rs index ac423e392..910de8220 100644 --- a/crates/madara/client/block_import/src/pre_validate.rs +++ b/crates/madara/client/block_import/src/pre_validate.rs @@ -22,9 +22,9 @@ pub async fn pre_validate( block: UnverifiedFullBlock, validation: BlockValidationContext, ) -> Result { - tracing::debug!("spawning pre_validate"); + tracing::trace!("spawning pre_validate"); let res = pool.spawn_rayon_task(move || pre_validate_inner(block, validation)).await; - tracing::debug!("finished pre_validate"); + tracing::trace!("finished pre_validate"); res } @@ -34,9 +34,9 @@ pub async fn pre_validate_pending( block: UnverifiedPendingFullBlock, validation: BlockValidationContext, ) -> Result { - tracing::debug!("spawning pre_validate (pending)"); + tracing::trace!("spawning pre_validate (pending)"); let res = pool.spawn_rayon_task(move || pre_validate_pending_inner(block, validation)).await; - tracing::debug!("finished pre_validate (pending)"); + tracing::trace!("finished pre_validate (pending)"); res } @@ -204,7 +204,7 @@ fn class_conversion( fn transaction_hashes( receipts: &[TransactionReceipt], transactions: &[Transaction], - starknet_version: StarknetVersion, + mut starknet_version: StarknetVersion, validation: &BlockValidationContext, ) -> Result, BlockImportError> { if receipts.len() != transactions.len() { @@ -214,9 +214,16 @@ fn transaction_hashes( }); } + // compute_v0_13_2_hashes mode + let compute_v0_13_2_hashes_mode = validation.compute_v0_13_2_hashes && starknet_version < StarknetVersion::V0_13_2; + if compute_v0_13_2_hashes_mode { + starknet_version = StarknetVersion::V0_13_2; + } + // mismatched block hash is allowed for block 1469 on mainnet // this block contains a part of transactions computed with the legacy hash function // and the other part with the post-legacy hash function + // compute_v0_13_2_hashes: we will remove this legacy check once we can verify <0.13.2 hashes. let is_special_trusted_case = validation.chain_id == ChainId::Mainnet && starknet_version.is_tx_hash_inconsistent(); if is_special_trusted_case || validation.trust_transaction_hashes { @@ -229,7 +236,8 @@ fn transaction_hashes( // Panic safety: receipt count was checked earlier let got = receipts[index].transaction_hash(); let expected = tx.compute_hash(validation.chain_id.to_felt(), starknet_version, false); - if got != expected { + // compute_v0_13_2_hashes: do not check tx hash + if !compute_v0_13_2_hashes_mode && got != expected { return Err(BlockImportError::TransactionHash { index, got, expected }); } Ok(got) @@ -243,7 +251,13 @@ fn transaction_commitment( block: &UnverifiedFullBlock, validation: &BlockValidationContext, ) -> Result { - let starknet_version = block.header.protocol_version; + let mut starknet_version = block.header.protocol_version; + + // compute_v0_13_2_hashes mode + let compute_v0_13_2_hashes_mode = validation.compute_v0_13_2_hashes && starknet_version < StarknetVersion::V0_13_2; + if compute_v0_13_2_hashes_mode { + starknet_version = StarknetVersion::V0_13_2; + } let transaction_hashes = transaction_hashes(&block.receipts, &block.transactions, starknet_version, validation)?; @@ -268,8 +282,12 @@ fn transaction_commitment( compute_merkle_root::(&tx_hashes_with_signature) }; - if let Some(expected) = block.commitments.transaction_commitment.filter(|&expected| expected != got) { - return Err(BlockImportError::TransactionCommitment { got, expected }); + // compute_v0_13_2_hashes: do not check old commitment + if let Some(expected) = block.commitments.transaction_commitment { + // compute_v0_13_2_hashes: do not check old commitment + if !compute_v0_13_2_hashes_mode && expected != got { + return Err(BlockImportError::TransactionCommitment { got, expected }); + } } Ok(got) @@ -278,8 +296,15 @@ fn transaction_commitment( /// Compute the events commitment for a block. fn event_commitment( block: &UnverifiedFullBlock, - _validation: &BlockValidationContext, + validation: &BlockValidationContext, ) -> Result { + let mut starknet_version = block.header.protocol_version; + // compute_v0_13_2_hashes mode + let compute_v0_13_2_hashes_mode = validation.compute_v0_13_2_hashes && starknet_version < StarknetVersion::V0_13_2; + if compute_v0_13_2_hashes_mode { + starknet_version = StarknetVersion::V0_13_2; + } + let events_with_tx_hash: Vec<_> = block .receipts .iter() @@ -294,7 +319,7 @@ fn event_commitment( let got = if events_with_tx_hash.is_empty() { Felt::ZERO - } else if block.header.protocol_version < StarknetVersion::V0_13_2 { + } else if starknet_version < StarknetVersion::V0_13_2 { let events_hash = events_with_tx_hash.into_par_iter().map(|(_, event)| event.compute_hash_pedersen()).collect::>(); compute_merkle_root::(&events_hash) @@ -307,7 +332,8 @@ fn event_commitment( }; if let Some(expected) = block.commitments.event_commitment { - if expected != got { + // compute_v0_13_2_hashes: do not check old commitment + if !compute_v0_13_2_hashes_mode && expected != got { return Err(BlockImportError::EventCommitment { got, expected }); } } diff --git a/crates/madara/client/block_import/src/rayon.rs b/crates/madara/client/block_import/src/rayon.rs index 12b73b4ec..0ae54c7e4 100644 --- a/crates/madara/client/block_import/src/rayon.rs +++ b/crates/madara/client/block_import/src/rayon.rs @@ -33,17 +33,17 @@ impl RayonPool { { let max_tasks = self.max_tasks; let permit_id = self.permit_id.fetch_add(1, std::sync::atomic::Ordering::SeqCst); - tracing::debug!("acquire permit {permit_id}"); + tracing::trace!("acquire permit {permit_id}"); let permit = self.semaphore.acquire().await.expect("Poisoned semaphore"); let n_acquired_permits = self.n_acquired_permits.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + 1; - tracing::debug!("acquired permit {permit_id} ({n_acquired_permits}/{max_tasks})"); + tracing::trace!("acquired permit {permit_id} ({n_acquired_permits}/{max_tasks})"); let res = global_spawn_rayon_task(func).await; drop(permit); let n_acquired_permits = self.n_acquired_permits.fetch_sub(1, std::sync::atomic::Ordering::SeqCst); - tracing::debug!("released permit {permit_id} ({n_acquired_permits}/{max_tasks})"); + tracing::trace!("released permit {permit_id} ({n_acquired_permits}/{max_tasks})"); res } } diff --git a/crates/madara/client/block_import/src/tests/block_import_utils.rs b/crates/madara/client/block_import/src/tests/block_import_utils.rs index 5d5d5ec76..4b9ecbb2f 100644 --- a/crates/madara/client/block_import/src/tests/block_import_utils.rs +++ b/crates/madara/client/block_import/src/tests/block_import_utils.rs @@ -57,6 +57,7 @@ pub fn create_validation_context(ignore_block_order: bool) -> BlockValidationCon trust_global_tries: false, trust_transaction_hashes: false, trust_class_hashes: false, + compute_v0_13_2_hashes: false, } } diff --git a/crates/madara/client/block_import/src/types.rs b/crates/madara/client/block_import/src/types.rs index 5808e502c..13328897a 100644 --- a/crates/madara/client/block_import/src/types.rs +++ b/crates/madara/client/block_import/src/types.rs @@ -35,6 +35,8 @@ pub struct UnverifiedHeader { #[derive(Clone, Debug, Eq, PartialEq)] pub struct BlockValidationContext { + /// The chain id of the current block. + pub chain_id: ChainId, /// Use the transaction hashes from the transaction receipts instead of computing them. pub trust_transaction_hashes: bool, /// Trust class hashes. @@ -45,8 +47,11 @@ pub struct BlockValidationContext { pub trust_global_tries: bool, /// Ignore the order of the blocks to allow starting at some height. pub ignore_block_order: bool, - /// The chain id of the current block. - pub chain_id: ChainId, + + /// Used for experimental p2p support. When p2p will be merged, this field will go away, and we will always + /// compute v0.13.2 hashes. However, we can't verify the old Self { diff --git a/crates/madara/client/block_import/src/verify_apply.rs b/crates/madara/client/block_import/src/verify_apply.rs index f830d23ab..0905719cd 100644 --- a/crates/madara/client/block_import/src/verify_apply.rs +++ b/crates/madara/client/block_import/src/verify_apply.rs @@ -9,6 +9,7 @@ use mp_block::{ header::PendingHeader, BlockId, Header, MadaraBlockInfo, MadaraBlockInner, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo, MadaraPendingBlockInfo, }; +use mp_chain_config::StarknetVersion; use mp_convert::{FeltHexDisplay, ToFelt}; use starknet_api::core::ChainId; use starknet_types_core::felt::Felt; @@ -40,13 +41,13 @@ impl VerifyApply { block: PreValidatedBlock, validation: BlockValidationContext, ) -> Result { - tracing::debug!("acquiring verify_apply exclusive"); + tracing::trace!("acquiring verify_apply exclusive"); let _exclusive = self.mutex.lock().await; - tracing::debug!("acquired verify_apply exclusive"); + tracing::trace!("acquired verify_apply exclusive"); let backend = Arc::clone(&self.backend); let res = global_spawn_rayon_task(move || verify_apply_inner(&backend, block, validation)).await; - tracing::debug!("releasing verify_apply exclusive"); + tracing::trace!("releasing verify_apply exclusive"); res } @@ -56,13 +57,13 @@ impl VerifyApply { block: PreValidatedPendingBlock, validation: BlockValidationContext, ) -> Result { - tracing::debug!("acquiring verify_apply exclusive (pending)"); + tracing::trace!("acquiring verify_apply exclusive (pending)"); let _exclusive = self.mutex.lock().await; - tracing::debug!("acquired verify_apply exclusive (pending)"); + tracing::trace!("acquired verify_apply exclusive (pending)"); let backend = Arc::clone(&self.backend); let res = global_spawn_rayon_task(move || verify_apply_pending_inner(&backend, block, validation)).await; - tracing::debug!("releasing verify_apply exclusive (pending)"); + tracing::trace!("releasing verify_apply exclusive (pending)"); res } } @@ -85,7 +86,7 @@ pub fn verify_apply_inner( // Block hash let (block_hash, header) = block_hash(&block, &validation, block_number, parent_block_hash, global_state_root)?; - tracing::debug!("verify_apply_inner store block {}", header.block_number); + tracing::trace!("verify_apply_inner store block {}", header.block_number); // store block, also uses rayon heavily internally backend @@ -101,8 +102,6 @@ pub fn verify_apply_inner( }, block.state_diff, block.converted_classes, - block.visited_segments, - None, ) .map_err(make_db_error("storing block in db"))?; @@ -146,8 +145,6 @@ pub fn verify_apply_pending_inner( }, block.state_diff, block.converted_classes, - block.visited_segments, - None, ) .map_err(make_db_error("storing block in db"))?; @@ -221,15 +218,15 @@ fn update_tries( return Ok(global_state_root); } - tracing::debug!( + tracing::trace!( "Deployed contracts: [{:?}]", block.state_diff.deployed_contracts.iter().map(|c| c.address.hex_display()).format(", ") ); - tracing::debug!( + tracing::trace!( "Declared classes: [{:?}]", block.state_diff.declared_classes.iter().map(|c| c.class_hash.hex_display()).format(", ") ); - tracing::debug!( + tracing::trace!( "Deprecated declared classes: [{:?}]", block.state_diff.deprecated_declared_classes.iter().map(|c| c.hex_display()).format(", ") ); @@ -308,10 +305,19 @@ fn block_hash( l1_gas_price, l1_da_mode, }; - let block_hash = header.compute_hash(validation.chain_id.to_felt()); + let block_hash = header.compute_hash(validation.chain_id.to_felt(), false); + + let compute_v0_13_2_hashes_mode = + validation.compute_v0_13_2_hashes && header.protocol_version < StarknetVersion::V0_13_2; if let Some(expected) = block.unverified_block_hash { + // compute_v0_13_2_hashes: do not check block hash, return the old one. + if compute_v0_13_2_hashes_mode { + return Ok((expected, header)); + } + // mismatched block hash is allowed for blocks 1466..=2242 on mainnet + // compute_v0_13_2_hashes: we will remove this legacy check once we can verify <0.13.2 hashes. let is_special_trusted_case = validation.chain_id == ChainId::Mainnet && (1466..=2242).contains(&block_number); if is_special_trusted_case { return Ok((expected, header)); @@ -411,7 +417,7 @@ mod verify_apply_tests { if populate_db { let header = create_dummy_header(); let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); } // Create a validation context with the specified ignore_block_order flag @@ -530,6 +536,7 @@ mod verify_apply_tests { trust_global_tries, trust_transaction_hashes: false, trust_class_hashes: false, + compute_v0_13_2_hashes: false, }; // WHEN: We call update_tries with these parameters @@ -593,6 +600,7 @@ mod verify_apply_tests { trust_global_tries: false, trust_transaction_hashes: false, trust_class_hashes: false, + compute_v0_13_2_hashes: false, }, 1466, felt!("0x1"), @@ -665,7 +673,7 @@ mod verify_apply_tests { let mut header = create_dummy_header(); header.block_number = 0; let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -691,7 +699,7 @@ mod verify_apply_tests { let mut header = create_dummy_header(); header.block_number = 0; let pending_block = finalized_block_zero(header); - backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![], None, None).unwrap(); + backend.store_block(pending_block.clone(), finalized_state_diff_zero(), vec![]).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -727,7 +735,7 @@ mod verify_apply_tests { let mut genesis_header = create_dummy_header(); genesis_header.block_number = 0; let genesis_block = finalized_block_zero(genesis_header.clone()); - backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None, None).unwrap(); + backend.store_block(genesis_block, finalized_state_diff_zero(), vec![]).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); @@ -773,7 +781,7 @@ mod verify_apply_tests { let mut genesis_header = create_dummy_header(); genesis_header.block_number = 0; let genesis_block = finalized_block_zero(genesis_header.clone()); - backend.store_block(genesis_block, finalized_state_diff_zero(), vec![], None, None).unwrap(); + backend.store_block(genesis_block, finalized_state_diff_zero(), vec![]).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap(), Some(0)); diff --git a/crates/madara/client/block_import/src/verify_apply/classes.rs b/crates/madara/client/block_import/src/verify_apply/classes.rs index e2b44de69..e06fd690d 100644 --- a/crates/madara/client/block_import/src/verify_apply/classes.rs +++ b/crates/madara/client/block_import/src/verify_apply/classes.rs @@ -27,19 +27,19 @@ pub fn class_trie_root( }) .collect(); - tracing::debug!("class_trie inserting"); + tracing::trace!("class_trie inserting"); for (key, value) in updates { let bytes = key.to_bytes_be(); let bv: BitVec = bytes.as_bits()[5..].to_owned(); class_trie.insert(bonsai_identifier::CLASS, &bv, &value)?; } - tracing::debug!("class_trie committing"); + tracing::trace!("class_trie committing"); class_trie.commit(BasicId::new(block_number))?; let root_hash = class_trie.root_hash(bonsai_identifier::CLASS)?; - tracing::debug!("class_trie committed"); + tracing::trace!("class_trie committed"); Ok(root_hash) } diff --git a/crates/madara/client/block_import/src/verify_apply/contracts.rs b/crates/madara/client/block_import/src/verify_apply/contracts.rs index 6f982c9b9..3a9de2562 100644 --- a/crates/madara/client/block_import/src/verify_apply/contracts.rs +++ b/crates/madara/client/block_import/src/verify_apply/contracts.rs @@ -11,7 +11,7 @@ use starknet_types_core::felt::Felt; use starknet_types_core::hash::{Pedersen, StarkHash}; use std::collections::HashMap; -#[derive(Debug, Default)] +#[derive(trace, Default)] struct ContractLeaf { pub class_hash: Option, pub storage_root: Option, @@ -40,7 +40,7 @@ pub fn contract_trie_root( let mut contract_storage_trie = backend.contract_storage_trie(); - tracing::debug!("contract_storage_trie inserting"); + tracing::trace!("contract_storage_trie inserting"); // First we insert the contract storage changes for ContractStorageDiffItem { address, storage_entries } in storage_diffs { @@ -53,7 +53,7 @@ pub fn contract_trie_root( contract_leafs.insert(*address, Default::default()); } - tracing::debug!("contract_storage_trie commit"); + tracing::trace!("contract_storage_trie commit"); // Then we commit them contract_storage_trie.commit(BasicId::new(block_number))?; @@ -88,12 +88,12 @@ pub fn contract_trie_root( contract_trie.insert(bonsai_identifier::CONTRACT, &k, &v)?; } - tracing::debug!("contract_trie committing"); + tracing::trace!("contract_trie committing"); contract_trie.commit(BasicId::new(block_number))?; let root_hash = contract_trie.root_hash(bonsai_identifier::CONTRACT)?; - tracing::debug!("contract_trie committed"); + tracing::trace!("contract_trie committed"); Ok(root_hash) } diff --git a/crates/madara/client/block_production/Cargo.toml b/crates/madara/client/block_production/Cargo.toml index ca4311a1a..cfb82b292 100644 --- a/crates/madara/client/block_production/Cargo.toml +++ b/crates/madara/client/block_production/Cargo.toml @@ -35,7 +35,6 @@ testing = ["blockifier/testing", "mc-db/testing", "mockall"] # Madara mc-analytics.workspace = true -mc-block-import.workspace = true mc-db.workspace = true mc-exec.workspace = true mc-mempool.workspace = true diff --git a/crates/madara/client/block_production/src/close_block.rs b/crates/madara/client/block_production/src/close_block.rs index ffc520497..bab4c9cac 100644 --- a/crates/madara/client/block_production/src/close_block.rs +++ b/crates/madara/client/block_production/src/close_block.rs @@ -1,60 +1,64 @@ -use mc_block_import::{ - BlockImportError, BlockImportResult, BlockImporter, BlockValidationContext, UnverifiedFullBlock, UnverifiedHeader, +use mc_db::{MadaraBackend, MadaraStorageError}; +use mp_block::{ + commitments::CommitmentComputationContext, MadaraPendingBlock, PendingFullBlock, TransactionWithReceipt, }; -use mp_block::{header::PendingHeader, MadaraPendingBlock, MadaraPendingBlockInfo, VisitedSegments}; use mp_class::ConvertedClass; +use mp_convert::ToFelt; +use mp_receipt::EventWithTransactionHash; use mp_state_update::StateDiff; -use starknet_api::core::ChainId; +use starknet_core::types::Felt; +use std::iter; -/// Close the block (convert from pending to closed), and store to db. This is delegated to the block import module. -#[tracing::instrument(skip(importer, state_diff, declared_classes), fields(module = "BlockProductionTask"))] -pub async fn close_block( - importer: &BlockImporter, +/// Returns the block_hash of the saved block. +#[tracing::instrument(skip(backend, state_diff, declared_classes), fields(module = "BlockProductionTask"))] +pub fn close_and_save_block( + backend: &MadaraBackend, block: MadaraPendingBlock, - state_diff: &StateDiff, - chain_id: ChainId, + state_diff: StateDiff, block_number: u64, declared_classes: Vec, - visited_segments: VisitedSegments, -) -> Result { - let validation = BlockValidationContext::new(chain_id).trust_transaction_hashes(true); +) -> Result { + let block = PendingFullBlock { + header: block.info.header, + state_diff, + events: block + .inner + .receipts + .iter() + .flat_map(|receipt| { + receipt + .events() + .iter() + .cloned() + .map(|event| EventWithTransactionHash { transaction_hash: receipt.transaction_hash(), event }) + }) + .collect(), + transactions: block + .inner + .transactions + .into_iter() + .zip(block.inner.receipts) + .map(|(transaction, receipt)| TransactionWithReceipt { receipt, transaction }) + .collect(), + }; - let MadaraPendingBlock { info, inner } = block; - let MadaraPendingBlockInfo { header, tx_hashes: _tx_hashes } = info; + // Apply state, compute state root + let new_global_state_root = backend.apply_state(block_number, iter::once(&block.state_diff))?; - // Header - let PendingHeader { - parent_block_hash, - sequencer_address, - block_timestamp, - protocol_version, - l1_gas_price, - l1_da_mode, - } = header; + // Compute the block merkle commitments. + let block = block.close_block( + &CommitmentComputationContext { + protocol_version: backend.chain_config().latest_protocol_version, + chain_id: backend.chain_config().chain_id.to_felt(), + }, + block_number, + new_global_state_root, + true, + ); + let block_hash = block.block_hash; - let block = importer - .pre_validate( - UnverifiedFullBlock { - unverified_block_number: Some(block_number), - header: UnverifiedHeader { - parent_block_hash: Some(parent_block_hash), - sequencer_address, - block_timestamp, - protocol_version, - l1_gas_price, - l1_da_mode, - }, - state_diff: state_diff.clone(), - transactions: inner.transactions, - receipts: inner.receipts, - trusted_converted_classes: declared_classes, - commitments: Default::default(), // the block importer will compute the commitments for us - visited_segments: Some(visited_segments), - ..Default::default() - }, - validation.clone(), - ) - .await?; + backend.store_full_block(block)?; + backend.class_db_store_block(block_number, &declared_classes)?; - importer.verify_apply(block, validation.clone()).await + Ok(block_hash) } diff --git a/crates/madara/client/block_production/src/lib.rs b/crates/madara/client/block_production/src/lib.rs index b5783b8dd..4ee5ab394 100644 --- a/crates/madara/client/block_production/src/lib.rs +++ b/crates/madara/client/block_production/src/lib.rs @@ -15,13 +15,12 @@ //! so that's where block-production integration tests are the simplest to add. //! L1-L2 testing is a bit harder to setup, but we should definitely make the testing more comprehensive here. -use crate::close_block::close_block; +use crate::close_block::close_and_save_block; use crate::metrics::BlockProductionMetrics; use blockifier::blockifier::transaction_executor::{TransactionExecutor, BLOCK_STATE_ACCESS_ERR}; use blockifier::bouncer::BouncerWeights; use blockifier::transaction::errors::TransactionExecutionError; use finalize_execution_state::StateDiffToStateMapError; -use mc_block_import::{BlockImportError, BlockImporter}; use mc_db::db_block_id::DbBlockId; use mc_db::{MadaraBackend, MadaraStorageError}; use mc_exec::{BlockifierStateAdapter, ExecutionContext}; @@ -67,8 +66,6 @@ pub enum Error { Execution(#[from] TransactionExecutionError), #[error(transparent)] ExecutionContext(#[from] mc_exec::Error), - #[error("Import error: {0:#}")] - Import(#[from] mc_block_import::BlockImportError), #[error("Unexpected error: {0:#}")] Unexpected(Cow<'static, str>), #[error("Class compilation error when continuing the pending block: {0:#}")] @@ -86,9 +83,11 @@ struct ContinueBlockResult { /// Tracks which segments of Cairo program code were accessed during transaction execution, /// organized by class hash. This information is used as input for SNOS (Starknet OS) /// when generating proofs of execution. + #[allow(unused)] visited_segments: VisitedSegments, /// The current state of resource consumption tracked by the bouncer + #[allow(unused)] bouncer_weights: BouncerWeights, /// Statistics about transaction processing during this continuation @@ -106,7 +105,6 @@ struct ContinueBlockResult { /// To understand block production in madara, you should probably start with the [`mp_chain_config::ChainConfig`] /// documentation. pub struct BlockProductionTask { - importer: Arc, backend: Arc, mempool: Arc, block: MadaraPendingBlock, @@ -128,14 +126,12 @@ impl BlockProductionTask { /// /// This avoids re-executing transaction by re-adding them to the [Mempool], /// as was done before. - pub async fn close_pending_block( + pub fn close_pending_block( backend: &MadaraBackend, - importer: &BlockImporter, metrics: &BlockProductionMetrics, ) -> Result<(), Cow<'static, str>> { let err_pending_block = |err| format!("Getting pending block: {err:#}"); let err_pending_state_diff = |err| format!("Getting pending state update: {err:#}"); - let err_pending_visited_segments = |err| format!("Getting pending visited segments: {err:#}"); let err_pending_clear = |err| format!("Clearing pending block: {err:#}"); let err_latest_block_n = |err| format!("Failed to get latest block number: {err:#}"); @@ -155,8 +151,6 @@ impl BlockProductionTask { .expect("Checked above"); let pending_state_diff = backend.get_pending_block_state_update().map_err(err_pending_state_diff)?; - let pending_visited_segments = - backend.get_pending_block_segments().map_err(err_pending_visited_segments)?.unwrap_or_default(); let mut classes = pending_state_diff .deprecated_declared_classes @@ -186,17 +180,8 @@ impl BlockProductionTask { let n_txs = pending_block.inner.transactions.len(); // Close and import the pending block - close_block( - importer, - pending_block, - &pending_state_diff, - backend.chain_config().chain_id.clone(), - block_n, - declared_classes, - pending_visited_segments, - ) - .await - .map_err(|err| format!("Failed to close pending block: {err:#}"))?; + close_and_save_block(backend, pending_block, pending_state_diff, block_n, declared_classes) + .map_err(|err| format!("Failed to close pending block: {err:#}"))?; // Flush changes to disk, pending block removal and adding the next // block happens atomically @@ -218,14 +203,13 @@ impl BlockProductionTask { Ok(()) } - pub async fn new( + pub fn new( backend: Arc, - importer: Arc, mempool: Arc, metrics: Arc, l1_data_provider: Arc, ) -> Result { - if let Err(err) = Self::close_pending_block(&backend, &importer, &metrics).await { + if let Err(err) = Self::close_pending_block(&backend, &metrics) { // This error should not stop block production from working. If it happens, that's too bad. We drop the pending state and start from // a fresh one. tracing::error!("Failed to continue the pending block state: {err:#}"); @@ -245,7 +229,6 @@ impl BlockProductionTask { .tx_executor(); Ok(Self { - importer, backend, mempool, executor, @@ -369,12 +352,7 @@ impl BlockProductionTask { /// Closes the current block and prepares for the next one #[tracing::instrument(skip(self), fields(module = "BlockProductionTask"))] - async fn close_and_prepare_next_block( - &mut self, - state_diff: StateDiff, - visited_segments: VisitedSegments, - start_time: Instant, - ) -> Result<(), Error> { + fn close_and_prepare_next_block(&mut self, state_diff: StateDiff, start_time: Instant) -> Result<(), Error> { let block_n = self.block_n(); // Convert the pending block to a closed block and save to db let parent_block_hash = Felt::ZERO; // temp parent block hash @@ -390,16 +368,8 @@ impl BlockProductionTask { let n_txs = block_to_close.inner.transactions.len(); // Close and import the block - let import_result = close_block( - &self.importer, - block_to_close, - &state_diff, - self.backend.chain_config().chain_id.clone(), - block_n, - declared_classes, - visited_segments, - ) - .await?; + let block_hash = + close_and_save_block(&self.backend, block_to_close, state_diff.clone(), block_n, declared_classes)?; // Removes nonces in the mempool nonce cache which have been included // into the current block. @@ -408,10 +378,10 @@ impl BlockProductionTask { } // Flush changes to disk - self.backend.flush().map_err(|err| BlockImportError::Internal(format!("DB flushing error: {err:#}").into()))?; + self.backend.flush().map_err(|err| Error::Unexpected(format!("DB flushing error: {err:#}").into()))?; // Update parent hash for new pending block - self.block.info.header.parent_block_hash = import_result.block_hash; + self.block.info.header.parent_block_hash = block_hash; // Prepare executor for next block self.executor = @@ -480,7 +450,7 @@ impl BlockProductionTask { } #[tracing::instrument(skip(self), fields(module = "BlockProductionTask"))] - pub async fn on_pending_time_tick(&mut self) -> Result { + pub fn on_pending_time_tick(&mut self) -> Result { let current_pending_tick = self.current_pending_tick; if current_pending_tick == 0 { return Ok(false); @@ -490,8 +460,8 @@ impl BlockProductionTask { let ContinueBlockResult { state_diff: mut new_state_diff, - visited_segments, - bouncer_weights, + visited_segments: _, + bouncer_weights: _, stats, block_now_full, } = self.continue_block(self.backend.chain_config().bouncer_config.block_max_capacity)?; @@ -511,44 +481,33 @@ impl BlockProductionTask { self.update_block_hash_registry(&mut new_state_diff, block_n)?; tracing::info!("Resource limits reached, closing block early"); - self.close_and_prepare_next_block(new_state_diff, visited_segments, start_time).await?; + self.close_and_prepare_next_block(new_state_diff, start_time)?; return Ok(true); } // Store pending block // todo, prefer using the block import pipeline? - self.backend.store_block( - self.block.clone().into(), - new_state_diff, - self.declared_classes.clone(), - Some(visited_segments), - Some(bouncer_weights), - )?; + self.backend.store_block(self.block.clone().into(), new_state_diff, self.declared_classes.clone())?; // do not forget to flush :) - self.backend.flush().map_err(|err| BlockImportError::Internal(format!("DB flushing error: {err:#}").into()))?; + self.backend.flush().map_err(|err| Error::Unexpected(format!("DB flushing error: {err:#}").into()))?; Ok(false) } /// This creates a block, continuing the current pending block state up to the full bouncer limit. #[tracing::instrument(skip(self), fields(module = "BlockProductionTask"))] - pub(crate) async fn on_block_time(&mut self) -> Result<(), Error> { + pub(crate) fn on_block_time(&mut self) -> Result<(), Error> { let block_n = self.block_n(); - tracing::debug!("closing block #{}", block_n); + tracing::debug!("Closing block #{}", block_n); // Complete the block with full bouncer capacity let start_time = Instant::now(); - let ContinueBlockResult { - state_diff: mut new_state_diff, - visited_segments, - bouncer_weights: _weights, - stats: _stats, - block_now_full: _block_now_full, - } = self.continue_block(self.backend.chain_config().bouncer_config.block_max_capacity)?; + let ContinueBlockResult { state_diff: mut new_state_diff, .. } = + self.continue_block(self.backend.chain_config().bouncer_config.block_max_capacity)?; self.update_block_hash_registry(&mut new_state_diff, block_n)?; - self.close_and_prepare_next_block(new_state_diff, visited_segments, start_time).await + self.close_and_prepare_next_block(new_state_diff, start_time) } #[tracing::instrument(skip(self, ctx), fields(module = "BlockProductionTask"))] @@ -569,7 +528,7 @@ impl BlockProductionTask { loop { tokio::select! { instant = interval_block_time.tick() => { - if let Err(err) = self.on_block_time().await { + if let Err(err) = self.on_block_time() { tracing::error!("Block production task has errored: {err:#}"); // Clear pending block. The reason we do this is because // if the error happened because the closed block is @@ -595,7 +554,7 @@ impl BlockProductionTask { continue } - match self.on_pending_time_tick().await { + match self.on_pending_time_tick() { Ok(block_closed) => { if block_closed { interval_pending_block_update.reset_at(instant + interval_pending_block_update.period()); @@ -624,14 +583,12 @@ impl BlockProductionTask { #[cfg(test)] mod tests { - use std::{collections::HashMap, sync::Arc}; - - use blockifier::{ - bouncer::BouncerWeights, compiled_class_hash, nonce, state::cached_state::StateMaps, storage_key, + use crate::{ + finalize_execution_state::state_map_to_state_diff, metrics::BlockProductionMetrics, BlockProductionTask, }; + use blockifier::{compiled_class_hash, nonce, state::cached_state::StateMaps, storage_key}; use mc_db::MadaraBackend; use mc_mempool::Mempool; - use mp_block::VisitedSegments; use mp_chain_config::ChainConfig; use mp_convert::ToFelt; use mp_state_update::{ @@ -644,10 +601,7 @@ mod tests { felt, patricia_key, }; use starknet_types_core::felt::Felt; - - use crate::{ - finalize_execution_state::state_map_to_state_diff, metrics::BlockProductionMetrics, BlockProductionTask, - }; + use std::{collections::HashMap, sync::Arc}; type TxFixtureInfo = (mp_transactions::Transaction, mp_receipt::TransactionReceipt); @@ -657,14 +611,8 @@ mod tests { } #[rstest::fixture] - fn setup( - backend: Arc, - ) -> (Arc, Arc, Arc) { - ( - Arc::clone(&backend), - Arc::new(mc_block_import::BlockImporter::new(Arc::clone(&backend), None).unwrap()), - Arc::new(BlockProductionMetrics::register()), - ) + fn setup(backend: Arc) -> (Arc, Arc) { + (Arc::clone(&backend), Arc::new(BlockProductionMetrics::register())) } #[rstest::fixture] @@ -758,37 +706,37 @@ mod tests { }) } - #[rstest::fixture] - fn visited_segments() -> mp_block::VisitedSegments { - mp_block::VisitedSegments(vec![ - mp_block::VisitedSegmentEntry { class_hash: Felt::ONE, segments: vec![0, 1, 2] }, - mp_block::VisitedSegmentEntry { class_hash: Felt::TWO, segments: vec![0, 1, 2] }, - mp_block::VisitedSegmentEntry { class_hash: Felt::THREE, segments: vec![0, 1, 2] }, - ]) - } - - #[rstest::fixture] - fn bouncer_weights() -> BouncerWeights { - BouncerWeights { - builtin_count: blockifier::bouncer::BuiltinCount { - add_mod: 0, - bitwise: 1, - ecdsa: 2, - ec_op: 3, - keccak: 4, - mul_mod: 5, - pedersen: 6, - poseidon: 7, - range_check: 8, - range_check96: 9, - }, - gas: 10, - message_segment_length: 11, - n_events: 12, - n_steps: 13, - state_diff_size: 14, - } - } + // #[rstest::fixture] + // fn visited_segments() -> mp_block::VisitedSegments { + // mp_block::VisitedSegments(vec![ + // mp_block::VisitedSegmentEntry { class_hash: Felt::ONE, segments: vec![0, 1, 2] }, + // mp_block::VisitedSegmentEntry { class_hash: Felt::TWO, segments: vec![0, 1, 2] }, + // mp_block::VisitedSegmentEntry { class_hash: Felt::THREE, segments: vec![0, 1, 2] }, + // ]) + // } + + // #[rstest::fixture] + // fn bouncer_weights() -> BouncerWeights { + // BouncerWeights { + // builtin_count: blockifier::bouncer::BuiltinCount { + // add_mod: 0, + // bitwise: 1, + // ecdsa: 2, + // ec_op: 3, + // keccak: 4, + // mul_mod: 5, + // pedersen: 6, + // poseidon: 7, + // range_check: 8, + // range_check96: 9, + // }, + // gas: 10, + // message_segment_length: 11, + // n_events: 12, + // n_steps: 13, + // state_diff_size: 14, + // } + // } #[rstest::rstest] fn block_prod_state_map_to_state_diff(backend: Arc) { @@ -916,10 +864,9 @@ mod tests { /// This happens if a full node is shutdown (gracefully or not) midway /// during block production. #[rstest::rstest] - #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_pass( - setup: (Arc, Arc, Arc), + fn block_prod_pending_close_on_startup_pass( + setup: (Arc, Arc), #[with(Felt::ONE)] tx_invoke_v0: TxFixtureInfo, #[with(Felt::TWO)] tx_l1_handler: TxFixtureInfo, #[with(Felt::THREE)] tx_declare_v0: TxFixtureInfo, @@ -934,10 +881,8 @@ mod tests { #[from(converted_class_sierra)] #[with(Felt::TWO, Felt::TWO)] converted_class_sierra_2: mp_class::ConvertedClass, - visited_segments: VisitedSegments, - bouncer_weights: BouncerWeights, ) { - let (backend, importer, metrics) = setup; + let (backend, metrics) = setup; // ================================================================== // // PART 1: we prepare the pending block // @@ -1028,8 +973,6 @@ mod tests { }, pending_state_diff.clone(), converted_classes.clone(), - Some(visited_segments.clone()), - Some(bouncer_weights), ) .expect("Failed to store pending block"); @@ -1038,9 +981,7 @@ mod tests { // ================================================================== // // This should load the pending block from db and close it - BlockProductionTask::::close_pending_block(&backend, &importer, &metrics) - .await - .expect("Failed to close pending block"); + BlockProductionTask::::close_pending_block(&backend, &metrics).expect("Failed to close pending block"); // Now we check this was the case. assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); @@ -1084,10 +1025,9 @@ mod tests { /// at startup, then it is closed and stored in db on top of the latest /// block. #[rstest::rstest] - #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_pass_on_top( - setup: (Arc, Arc, Arc), + fn block_prod_pending_close_on_startup_pass_on_top( + setup: (Arc, Arc), // Transactions #[from(tx_invoke_v0)] @@ -1121,12 +1061,8 @@ mod tests { #[from(converted_class_sierra)] #[with(Felt::TWO, Felt::TWO)] converted_class_sierra_2: mp_class::ConvertedClass, - - // Pending data - visited_segments: VisitedSegments, - bouncer_weights: BouncerWeights, ) { - let (backend, importer, metrics) = setup; + let (backend, metrics) = setup; // ================================================================== // // PART 1: we prepare the ready block // @@ -1194,8 +1130,6 @@ mod tests { }, ready_state_diff.clone(), ready_converted_classes.clone(), - Some(visited_segments.clone()), - Some(bouncer_weights), ) .expect("Failed to store pending block"); @@ -1275,8 +1209,6 @@ mod tests { }, pending_state_diff.clone(), pending_converted_classes.clone(), - Some(visited_segments.clone()), - Some(bouncer_weights), ) .expect("Failed to store pending block"); @@ -1286,9 +1218,7 @@ mod tests { // This should load the pending block from db and close it on top of the // previous block. - BlockProductionTask::::close_pending_block(&backend, &importer, &metrics) - .await - .expect("Failed to close pending block"); + BlockProductionTask::::close_pending_block(&backend, &metrics).expect("Failed to close pending block"); // Now we check this was the case. assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 1); @@ -1348,16 +1278,11 @@ mod tests { /// This test makes sure that it is possible to start the block production /// task even if there is no pending block in db at the time of startup. #[rstest::rstest] - #[tokio::test] - async fn block_prod_pending_close_on_startup_no_pending( - setup: (Arc, Arc, Arc), - ) { - let (backend, importer, metrics) = setup; + fn block_prod_pending_close_on_startup_no_pending(setup: (Arc, Arc)) { + let (backend, metrics) = setup; // Simulates starting block production without a pending block in db - BlockProductionTask::::close_pending_block(&backend, &importer, &metrics) - .await - .expect("Failed to close pending block"); + BlockProductionTask::::close_pending_block(&backend, &metrics).expect("Failed to close pending block"); // Now we check no block was added to the db assert_eq!(backend.get_latest_block_n().unwrap(), None); @@ -1370,10 +1295,9 @@ mod tests { /// This will arise if switching from a full node to a sequencer with the /// same db. #[rstest::rstest] - #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_no_visited_segments( - setup: (Arc, Arc, Arc), + fn block_prod_pending_close_on_startup_no_visited_segments( + setup: (Arc, Arc), #[with(Felt::ONE)] tx_invoke_v0: TxFixtureInfo, #[with(Felt::TWO)] tx_l1_handler: TxFixtureInfo, #[with(Felt::THREE)] tx_declare_v0: TxFixtureInfo, @@ -1388,9 +1312,8 @@ mod tests { #[from(converted_class_sierra)] #[with(Felt::TWO, Felt::TWO)] converted_class_sierra_2: mp_class::ConvertedClass, - bouncer_weights: BouncerWeights, ) { - let (backend, importer, metrics) = setup; + let (backend, metrics) = setup; // ================================================================== // // PART 1: we prepare the pending block // @@ -1462,8 +1385,7 @@ mod tests { }, pending_state_diff.clone(), converted_classes.clone(), - None, // No visited segments! - Some(bouncer_weights), + // None, // No visited segments! ) .expect("Failed to store pending block"); @@ -1472,9 +1394,7 @@ mod tests { // ================================================================== // // This should load the pending block from db and close it - BlockProductionTask::::close_pending_block(&backend, &importer, &metrics) - .await - .expect("Failed to close pending block"); + BlockProductionTask::::close_pending_block(&backend, &metrics).expect("Failed to close pending block"); // Now we check this was the case. assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 0); @@ -1517,19 +1437,16 @@ mod tests { /// This test makes sure that closing the pending block from db will fail if /// the pending state diff references a non-existing class. #[rstest::rstest] - #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_fail_missing_class( - setup: (Arc, Arc, Arc), + fn block_prod_pending_close_on_startup_fail_missing_class( + setup: (Arc, Arc), #[with(Felt::ONE)] tx_invoke_v0: TxFixtureInfo, #[with(Felt::TWO)] tx_l1_handler: TxFixtureInfo, #[with(Felt::THREE)] tx_declare_v0: TxFixtureInfo, tx_deploy: TxFixtureInfo, tx_deploy_account: TxFixtureInfo, - visited_segments: VisitedSegments, - bouncer_weights: BouncerWeights, ) { - let (backend, importer, metrics) = setup; + let (backend, metrics) = setup; // ================================================================== // // PART 1: we prepare the pending block // @@ -1595,8 +1512,6 @@ mod tests { }, pending_state_diff.clone(), converted_classes.clone(), - Some(visited_segments.clone()), - Some(bouncer_weights), ) .expect("Failed to store pending block"); @@ -1606,9 +1521,7 @@ mod tests { // This should fail since the pending state update references a // non-existent declared class at address 0x1 - let err = BlockProductionTask::::close_pending_block(&backend, &importer, &metrics) - .await - .expect_err("Should error"); + let err = BlockProductionTask::::close_pending_block(&backend, &metrics).expect_err("Should error"); assert!(err.contains("Failed to retrieve pending declared class at hash")); assert!(err.contains("not found in db")); @@ -1617,19 +1530,16 @@ mod tests { /// This test makes sure that closing the pending block from db will fail if /// the pending state diff references a non-existing legacy class. #[rstest::rstest] - #[tokio::test] #[allow(clippy::too_many_arguments)] - async fn block_prod_pending_close_on_startup_fail_missing_class_legacy( - setup: (Arc, Arc, Arc), + fn block_prod_pending_close_on_startup_fail_missing_class_legacy( + setup: (Arc, Arc), #[with(Felt::ONE)] tx_invoke_v0: TxFixtureInfo, #[with(Felt::TWO)] tx_l1_handler: TxFixtureInfo, #[with(Felt::THREE)] tx_declare_v0: TxFixtureInfo, tx_deploy: TxFixtureInfo, tx_deploy_account: TxFixtureInfo, - visited_segments: VisitedSegments, - bouncer_weights: BouncerWeights, ) { - let (backend, importer, metrics) = setup; + let (backend, metrics) = setup; // ================================================================== // // PART 1: we prepare the pending block // @@ -1695,8 +1605,6 @@ mod tests { }, pending_state_diff.clone(), converted_classes.clone(), - Some(visited_segments.clone()), - Some(bouncer_weights), ) .expect("Failed to store pending block"); @@ -1706,9 +1614,7 @@ mod tests { // This should fail since the pending state update references a // non-existent declared class at address 0x0 - let err = BlockProductionTask::::close_pending_block(&backend, &importer, &metrics) - .await - .expect_err("Should error"); + let err = BlockProductionTask::::close_pending_block(&backend, &metrics).expect_err("Should error"); assert!(err.contains("Failed to retrieve pending declared class at hash")); assert!(err.contains("not found in db")); diff --git a/crates/madara/client/db/Cargo.toml b/crates/madara/client/db/Cargo.toml index 0f9c30e2d..975ec41d6 100644 --- a/crates/madara/client/db/Cargo.toml +++ b/crates/madara/client/db/Cargo.toml @@ -38,6 +38,8 @@ starknet_api = { workspace = true } # Other anyhow.workspace = true bincode = { workspace = true } +bitvec = { workspace = true } +futures = { workspace = true } librocksdb-sys = { workspace = true } rayon = { workspace = true } rocksdb.workspace = true @@ -72,7 +74,7 @@ tracing-subscriber = { workspace = true, features = ["env-filter"] } tempfile = "3.10" lazy_static = { workspace = true } mp-transactions = { workspace = true } - +rstest = { workspace = true } [features] default = [] diff --git a/crates/madara/client/db/src/block_db.rs b/crates/madara/client/db/src/block_db.rs index 30d8ec31b..e0bc587f5 100644 --- a/crates/madara/client/db/src/block_db.rs +++ b/crates/madara/client/db/src/block_db.rs @@ -1,15 +1,13 @@ use crate::db_block_id::{DbBlockId, DbBlockIdResolvable}; +use crate::MadaraStorageError; use crate::{Column, DatabaseExt, MadaraBackend, WriteBatchWithTransaction}; -use crate::{MadaraStorageError, DB}; use anyhow::Context; -use blockifier::bouncer::BouncerWeights; use mp_block::header::{GasPrices, PendingHeader}; use mp_block::{ BlockId, BlockTag, MadaraBlock, MadaraBlockInfo, MadaraBlockInner, MadaraMaybePendingBlock, - MadaraMaybePendingBlockInfo, MadaraPendingBlock, MadaraPendingBlockInfo, VisitedSegments, + MadaraMaybePendingBlockInfo, MadaraPendingBlock, MadaraPendingBlockInfo, }; use mp_state_update::StateDiff; -use rocksdb::WriteOptions; use starknet_api::core::ChainId; use starknet_types_core::felt::Felt; use starknet_types_rpc::EmittedEvent; @@ -25,20 +23,10 @@ struct ChainInfo { const ROW_CHAIN_INFO: &[u8] = b"chain_info"; const ROW_PENDING_INFO: &[u8] = b"pending_info"; const ROW_PENDING_STATE_UPDATE: &[u8] = b"pending_state_update"; -const ROW_PENDING_SEGMENTS: &[u8] = b"pending_segments"; -const ROW_PENDING_BOUNCER_WEIGHTS: &[u8] = b"pending_bouncer_weights"; const ROW_PENDING_INNER: &[u8] = b"pending"; const ROW_SYNC_TIP: &[u8] = b"sync_tip"; const ROW_L1_LAST_CONFIRMED_BLOCK: &[u8] = b"l1_last"; -#[tracing::instrument(skip(db), fields(module = "BlockDB"))] -pub fn get_latest_block_n(db: &DB) -> Result> { - let col = db.get_column(Column::BlockStorageMeta); - let Some(res) = db.get_cf(&col, ROW_SYNC_TIP)? else { return Ok(None) }; - let res = bincode::deserialize(&res)?; - Ok(Some(res)) -} - #[derive(Debug, PartialEq, Eq)] pub struct TxIndex(pub u64); @@ -106,7 +94,7 @@ impl MadaraBackend { #[tracing::instrument(skip(self), fields(module = "BlockDB"))] fn get_block_info_from_block_n(&self, block_n: u64) -> Result> { let col = self.db.get_column(Column::BlockNToBlockInfo); - let res = self.db.get_cf(&col, bincode::serialize(&block_n)?)?; + let res = self.db.get_cf(&col, block_n.to_be_bytes())?; let Some(res) = res else { return Ok(None) }; let block = bincode::deserialize(&res)?; Ok(Some(block)) @@ -123,7 +111,8 @@ impl MadaraBackend { #[tracing::instrument(skip(self), fields(module = "BlockDB"))] pub fn get_latest_block_n(&self) -> Result> { - get_latest_block_n(&self.db) + Ok(self.head_status().latest_full_block_n()) + // get_latest_block_n(&self.db) } // Pending block quirk: We should act as if there is always a pending block in db, to match @@ -203,28 +192,6 @@ impl MadaraBackend { Ok(res) } - #[tracing::instrument(skip(self), fields(module = "BlockDB"))] - pub fn get_pending_block_segments(&self) -> Result> { - let col = self.db.get_column(Column::BlockStorageMeta); - let Some(res) = self.db.get_cf(&col, ROW_PENDING_SEGMENTS)? else { - // See pending block quirk - return Ok(None); - }; - let res = Some(bincode::deserialize(&res)?); - Ok(res) - } - - #[tracing::instrument(skip(self), fields(module = "BlockDB"))] - pub fn get_pending_block_bouncer_weights(&self) -> Result> { - let col = self.db.get_column(Column::BlockStorageMeta); - let Some(res) = self.db.get_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS)? else { - // See pending block quirk - return Ok(None); - }; - let res = Some(bincode::deserialize(&res)?); - Ok(res) - } - #[tracing::instrument(skip(self), fields(module = "BlockDB"))] pub fn get_l1_last_confirmed_block(&self) -> Result> { let col = self.db.get_column(Column::BlockStorageMeta); @@ -236,27 +203,13 @@ impl MadaraBackend { // DB write #[tracing::instrument(skip(self), fields(module = "BlockDB"))] - pub(crate) fn block_db_store_pending( - &self, - block: &MadaraPendingBlock, - state_update: &StateDiff, - visited_segments: Option, - bouncer_weights: Option, - ) -> Result<()> { + pub(crate) fn block_db_store_pending(&self, block: &MadaraPendingBlock, state_update: &StateDiff) -> Result<()> { let mut tx = WriteBatchWithTransaction::default(); let col = self.db.get_column(Column::BlockStorageMeta); tx.put_cf(&col, ROW_PENDING_INFO, bincode::serialize(&block.info)?); tx.put_cf(&col, ROW_PENDING_INNER, bincode::serialize(&block.inner)?); tx.put_cf(&col, ROW_PENDING_STATE_UPDATE, bincode::serialize(&state_update)?); - if let Some(visited_segments) = visited_segments { - tx.put_cf(&col, ROW_PENDING_SEGMENTS, bincode::serialize(&visited_segments)?); - } - if let Some(bouncer_weights) = bouncer_weights { - tx.put_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS, bincode::serialize(&bouncer_weights)?); - } - let mut writeopts = WriteOptions::new(); - writeopts.disable_wal(true); - self.db.write_opt(tx, &writeopts)?; + self.db.write_opt(tx, &self.writeopts_no_wal)?; Ok(()) } @@ -267,20 +220,14 @@ impl MadaraBackend { tx.delete_cf(&col, ROW_PENDING_INFO); tx.delete_cf(&col, ROW_PENDING_INNER); tx.delete_cf(&col, ROW_PENDING_STATE_UPDATE); - tx.delete_cf(&col, ROW_PENDING_SEGMENTS); - tx.delete_cf(&col, ROW_PENDING_BOUNCER_WEIGHTS); - let mut writeopts = WriteOptions::new(); - writeopts.disable_wal(true); - self.db.write_opt(tx, &writeopts)?; + self.db.write_opt(tx, &self.writeopts_no_wal)?; Ok(()) } #[tracing::instrument(skip(self), fields(module = "BlockDB"))] pub fn write_last_confirmed_block(&self, l1_last: u64) -> Result<()> { let col = self.db.get_column(Column::BlockStorageMeta); - let mut writeopts = WriteOptions::default(); // todo move that in db - writeopts.disable_wal(true); - self.db.put_cf_opt(&col, ROW_L1_LAST_CONFIRMED_BLOCK, bincode::serialize(&l1_last)?, &writeopts)?; + self.db.put_cf_opt(&col, ROW_L1_LAST_CONFIRMED_BLOCK, bincode::serialize(&l1_last)?, &self.writeopts_no_wal)?; Ok(()) } @@ -308,7 +255,7 @@ impl MadaraBackend { tx.put_cf(&tx_hash_to_block_n, bincode::serialize(hash)?, &block_n_encoded); } - tx.put_cf(&block_n_to_block, &block_n_encoded, bincode::serialize(&block.info)?); + tx.put_cf(&block_n_to_block, block.info.header.block_number.to_be_bytes(), bincode::serialize(&block.info)?); tx.put_cf(&block_hash_to_block_n, block_hash_encoded, &block_n_encoded); tx.put_cf(&block_n_to_block_inner, &block_n_encoded, bincode::serialize(&block.inner)?); tx.put_cf(&block_n_to_state_diff, &block_n_encoded, bincode::serialize(state_diff)?); @@ -349,9 +296,7 @@ impl MadaraBackend { tx.delete_cf(&meta, ROW_PENDING_INNER); tx.delete_cf(&meta, ROW_PENDING_STATE_UPDATE); - let mut writeopts = WriteOptions::new(); - writeopts.disable_wal(true); - self.db.write_opt(tx, &writeopts)?; + self.db.write_opt(tx, &self.writeopts_no_wal)?; Ok(()) } diff --git a/crates/madara/client/db/src/chain_head.rs b/crates/madara/client/db/src/chain_head.rs new file mode 100644 index 000000000..19a4fa381 --- /dev/null +++ b/crates/madara/client/db/src/chain_head.rs @@ -0,0 +1,76 @@ +use crate::{Column, MadaraBackend, MadaraStorageError}; +use crate::{DatabaseExt, DB}; +use std::sync::atomic::{AtomicU64, Ordering::SeqCst}; + +#[derive(serde::Serialize, serde::Deserialize, Debug, Default)] +#[serde(transparent)] +pub struct BlockNStatus(AtomicU64); + +impl BlockNStatus { + pub fn get(&self) -> Option { + self.0.load(SeqCst).checked_sub(1) + } + pub fn set(&self, block_n: Option) { + self.0.store(block_n.map(|block_n| block_n + 1).unwrap_or(0), SeqCst) + } +} + +impl Clone for BlockNStatus { + fn clone(&self) -> Self { + Self(self.0.load(SeqCst).into()) + } +} + +/// Counter of the latest block currently in the database. +/// We have multiple counters because the sync pipeline is split in sub-pipelines. +#[derive(serde::Serialize, serde::Deserialize, Debug, Default)] +pub struct ChainHead { + pub headers: BlockNStatus, + pub state_diffs: BlockNStatus, + pub classes: BlockNStatus, + pub transactions: BlockNStatus, + pub events: BlockNStatus, + pub l1_head: BlockNStatus, + pub global_trie: BlockNStatus, +} + +impl ChainHead { + pub fn latest_full_block_n(&self) -> Option { + self.headers + .get() + .min(self.state_diffs.get()) + .min(self.classes.get()) + .min(self.transactions.get()) + .min(self.events.get()) + .min(self.global_trie.get()) + } + + pub fn next_full_block(&self) -> u64 { + self.latest_full_block_n().map(|n| n + 1).unwrap_or(0) + } + + pub(crate) fn load_from_db(db: &DB) -> Result { + let col = db.get_column(Column::BlockStorageMeta); + if let Some(res) = db.get_pinned_cf(&col, ROW_HEAD_STATUS)? { + return Ok(bincode::deserialize(res.as_ref())?); + } + Ok(Default::default()) + } +} + +const ROW_HEAD_STATUS: &[u8] = b"head_status"; + +impl MadaraBackend { + pub fn head_status(&self) -> &ChainHead { + &self.head_status + } + pub fn load_head_status_from_db(&mut self) -> Result<(), MadaraStorageError> { + self.head_status = ChainHead::load_from_db(&self.db)?; + Ok(()) + } + pub fn save_head_status_to_db(&self) -> Result<(), MadaraStorageError> { + let col = self.db.get_column(Column::BlockStorageMeta); + self.db.put_cf_opt(&col, ROW_HEAD_STATUS, bincode::serialize(&self.head_status)?, &self.writeopts_no_wal)?; + Ok(()) + } +} diff --git a/crates/madara/client/db/src/class_db.rs b/crates/madara/client/db/src/class_db.rs index f3a3914ed..85eb4927d 100644 --- a/crates/madara/client/db/src/class_db.rs +++ b/crates/madara/client/db/src/class_db.rs @@ -1,14 +1,11 @@ -use std::sync::Arc; - -use mp_class::{ClassInfo, CompiledSierra, ConvertedClass, LegacyConvertedClass, SierraConvertedClass}; -use rayon::{iter::ParallelIterator, slice::ParallelSlice}; -use rocksdb::WriteOptions; -use starknet_types_core::felt::Felt; - use crate::{ db_block_id::{DbBlockId, DbBlockIdResolvable}, Column, DatabaseExt, MadaraBackend, MadaraStorageError, WriteBatchWithTransaction, DB_UPDATES_BATCH_SIZE, }; +use mp_class::{ClassInfo, CompiledSierra, ConvertedClass, LegacyConvertedClass, SierraConvertedClass}; +use rayon::{iter::ParallelIterator, slice::ParallelSlice}; +use starknet_types_core::felt::Felt; +use std::sync::Arc; const LAST_KEY: &[u8] = &[0xFF; 64]; @@ -28,7 +25,7 @@ impl MadaraBackend { nonpending_col: Column, ) -> Result, MadaraStorageError> { // todo: smallint here to avoid alloc - tracing::debug!("class db get encoded kv, key={key:#x}"); + tracing::trace!("class db get encoded kv, key={key:#x}"); let key_encoded = bincode::serialize(key)?; // Get from pending db, then normal db if not found. @@ -38,7 +35,7 @@ impl MadaraBackend { return Ok(Some(bincode::deserialize(&res)?)); // found in pending } } - tracing::debug!("class db get encoded kv, state is not pending"); + tracing::trace!("class db get encoded kv, state is not pending"); let col = self.db.get_column(nonpending_col); let Some(val) = self.db.get_pinned_cf(&col, &key_encoded)? else { return Ok(None) }; @@ -64,6 +61,7 @@ impl MadaraBackend { Column::ClassInfo, )? else { + tracing::debug!("no class info"); return Ok(None); }; @@ -75,6 +73,7 @@ impl MadaraBackend { _ => false, }; if !valid { + tracing::debug!("rejected {:?}", (requested_id, info.block_id)); return Ok(None); } tracing::debug!("class db get class info, state is valid"); @@ -97,7 +96,7 @@ impl MadaraBackend { ) -> Result, MadaraStorageError> { let Some(requested_id) = id.resolve_db_block_id(self)? else { return Ok(None) }; - tracing::debug!("sierra compiled {requested_id:?} {compiled_class_hash:#x}"); + tracing::trace!("sierra compiled {requested_id:?} {compiled_class_hash:#x}"); let Some(compiled) = self.class_db_get_encoded_kv::( requested_id.is_pending(), @@ -159,8 +158,10 @@ impl MadaraBackend { col_info: Column, col_compiled: Column, ) -> Result<(), MadaraStorageError> { - let mut writeopts = WriteOptions::new(); - writeopts.disable_wal(true); + tracing::trace!( + "Store class {block_id:?} {:?}", + converted_classes.iter().map(|c| c.class_hash()).collect::>() + ); converted_classes.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( || self.db.get_column(col_info), @@ -182,7 +183,7 @@ impl MadaraBackend { ); } } - self.db.write_opt(batch, &writeopts)?; + self.db.write_opt(batch, &self.writeopts_no_wal)?; Ok::<_, MadaraStorageError>(()) }, )?; @@ -205,7 +206,7 @@ impl MadaraBackend { // TODO: find a way to avoid this allocation batch.put_cf(col, &key_bin, bincode::serialize(&value)?); } - self.db.write_opt(batch, &writeopts)?; + self.db.write_opt(batch, &self.writeopts_no_wal)?; Ok::<_, MadaraStorageError>(()) }, )?; @@ -215,7 +216,7 @@ impl MadaraBackend { /// NB: This functions needs to run on the rayon thread pool #[tracing::instrument(skip(self, converted_classes), fields(module = "ClassDB"))] - pub(crate) fn class_db_store_block( + pub fn class_db_store_block( &self, block_number: u64, converted_classes: &[ConvertedClass], @@ -239,15 +240,17 @@ impl MadaraBackend { #[tracing::instrument(fields(module = "ClassDB"))] pub(crate) fn class_db_clear_pending(&self) -> Result<(), MadaraStorageError> { - let mut writeopts = WriteOptions::new(); - writeopts.disable_wal(true); - - self.db.delete_range_cf_opt(&self.db.get_column(Column::PendingClassInfo), &[] as _, LAST_KEY, &writeopts)?; + self.db.delete_range_cf_opt( + &self.db.get_column(Column::PendingClassInfo), + &[] as _, + LAST_KEY, + &self.writeopts_no_wal, + )?; self.db.delete_range_cf_opt( &self.db.get_column(Column::PendingClassCompiled), &[] as _, LAST_KEY, - &writeopts, + &self.writeopts_no_wal, )?; Ok(()) diff --git a/crates/madara/client/db/src/contract_db.rs b/crates/madara/client/db/src/contract_db.rs index 70666b463..d913c320b 100644 --- a/crates/madara/client/db/src/contract_db.rs +++ b/crates/madara/client/db/src/contract_db.rs @@ -1,7 +1,10 @@ #![doc = include_str!("../docs/flat_storage.md")] -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; +use mp_state_update::{ + ContractStorageDiffItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, StorageEntry, +}; use rayon::{iter::ParallelIterator, slice::ParallelSlice}; use rocksdb::{BoundColumnFamily, IteratorMode, ReadOptions, WriteOptions}; use serde::Serialize; @@ -12,6 +15,46 @@ use crate::{ Column, DatabaseExt, MadaraBackend, MadaraStorageError, WriteBatchWithTransaction, DB, DB_UPDATES_BATCH_SIZE, }; +pub(crate) struct ContractDbBlockUpdate { + contract_class_updates: Vec<(Felt, Felt)>, + contract_nonces_updates: Vec<(Felt, Felt)>, + contract_kv_updates: Vec<((Felt, Felt), Felt)>, +} + +impl ContractDbBlockUpdate { + pub fn from_state_diff(state_diff: StateDiff) -> Self { + let nonces_from_updates = + state_diff.nonces.into_iter().map(|NonceUpdate { contract_address, nonce }| (contract_address, nonce)); + + // let nonce_map: HashMap = nonces_from_deployed.chain(nonces_from_updates).collect(); // set nonce to zero when contract deployed + let nonce_map: HashMap = nonces_from_updates.collect(); + + let contract_class_updates_replaced = state_diff + .replaced_classes + .into_iter() + .map(|ReplacedClassItem { contract_address, class_hash }| (contract_address, class_hash)); + + let contract_class_updates_deployed = state_diff + .deployed_contracts + .into_iter() + .map(|DeployedContractItem { address, class_hash }| (address, class_hash)); + + let contract_class_updates = + contract_class_updates_replaced.chain(contract_class_updates_deployed).collect::>(); + let contract_nonces_updates = nonce_map.into_iter().collect::>(); + + let contract_kv_updates = state_diff + .storage_diffs + .into_iter() + .flat_map(|ContractStorageDiffItem { address, storage_entries }| { + storage_entries.into_iter().map(move |StorageEntry { key, value }| ((address, key), value)) + }) + .collect::>(); + + Self { contract_class_updates, contract_nonces_updates, contract_kv_updates } + } +} + // NB: Columns cf needs prefix extractor of these length during creation pub(crate) const CONTRACT_STORAGE_PREFIX_EXTRACTOR: usize = 64; pub(crate) const CONTRACT_CLASS_HASH_PREFIX_EXTRACTOR: usize = 32; @@ -135,58 +178,102 @@ impl MadaraBackend { ) } - /// NB: This functions needs to run on the rayon thread pool - #[tracing::instrument( - skip(self, block_number, contract_class_updates, contract_nonces_updates, contract_kv_updates), - fields(module = "ContractDB") - )] - pub(crate) fn contract_db_store_block( + fn contract_db_store_chunk( + &self, + col: &Arc, + block_number: u32, + chunk: impl IntoIterator, Felt)>, + tx: &mut WriteBatchWithTransaction, + ) -> Result<(), MadaraStorageError> { + for (key, value) in chunk { + // TODO: find a way to avoid this allocation + let key = [key.as_ref(), &block_number.to_be_bytes() as &[u8]].concat(); + tx.put_cf(col, key, bincode::serialize(&value)?); + } + Ok(()) + } + + #[tracing::instrument(skip(self, block_number, value, tx), fields(module = "ContractDB"))] + pub(crate) fn contract_db_store_block_no_rayon( &self, block_number: u64, - contract_class_updates: &[(Felt, Felt)], - contract_nonces_updates: &[(Felt, Felt)], - contract_kv_updates: &[((Felt, Felt), Felt)], + value: ContractDbBlockUpdate, + tx: &mut WriteBatchWithTransaction, ) -> Result<(), MadaraStorageError> { let block_number = u32::try_from(block_number).map_err(|_| MadaraStorageError::InvalidBlockNumber)?; - let mut writeopts = WriteOptions::new(); - writeopts.disable_wal(true); + let contract_to_class_hash_col = self.db.get_column(Column::ContractToClassHashes); + let contract_to_nonces_col = self.db.get_column(Column::ContractToNonces); + let contract_storage_col = self.db.get_column(Column::ContractStorage); - fn write_chunk( - db: &DB, - writeopts: &WriteOptions, - col: &Arc, - block_number: u32, - chunk: impl IntoIterator, Felt)>, - ) -> Result<(), MadaraStorageError> { - let mut batch = WriteBatchWithTransaction::default(); - for (key, value) in chunk { - // TODO: find a way to avoid this allocation - let key = [key.as_ref(), &block_number.to_be_bytes() as &[u8]].concat(); - batch.put_cf(col, key, bincode::serialize(&value)?); - } - db.write_opt(batch, writeopts)?; - Ok(()) - } + self.contract_db_store_chunk( + &contract_to_class_hash_col, + block_number, + value.contract_class_updates.into_iter().map(|(k, v)| (k.to_bytes_be(), v)), + tx, + )?; + self.contract_db_store_chunk( + &contract_to_nonces_col, + block_number, + value.contract_nonces_updates.into_iter().map(|(k, v)| (k.to_bytes_be(), v)), + tx, + )?; + self.contract_db_store_chunk( + &contract_storage_col, + block_number, + value.contract_kv_updates.into_iter().map(|((k1, k2), v)| { + let mut key = [0u8; 64]; + key[..32].copy_from_slice(k1.to_bytes_be().as_ref()); + key[32..].copy_from_slice(k2.to_bytes_be().as_ref()); + (key, v) + }), + tx, + )?; + Ok(()) + } + + /// NB: This functions needs to run on the rayon thread pool + #[tracing::instrument(skip(self, block_number, value), fields(module = "ContractDB"))] + pub(crate) fn contract_db_store_block( + &self, + block_number: u64, + value: ContractDbBlockUpdate, + ) -> Result<(), MadaraStorageError> { + let block_number = u32::try_from(block_number).map_err(|_| MadaraStorageError::InvalidBlockNumber)?; - contract_class_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( + value.contract_class_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( || self.db.get_column(Column::ContractToClassHashes), |col, chunk| { - write_chunk(&self.db, &writeopts, col, block_number, chunk.iter().map(|(k, v)| (k.to_bytes_be(), *v))) + let mut batch = WriteBatchWithTransaction::default(); + self.contract_db_store_chunk( + col, + block_number, + chunk.iter().map(|(k, v)| (k.to_bytes_be(), *v)), + &mut batch, + )?; + self.db.write_opt(batch, &self.writeopts_no_wal)?; + Result::<(), MadaraStorageError>::Ok(()) }, )?; - contract_nonces_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( + value.contract_nonces_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( || self.db.get_column(Column::ContractToNonces), |col, chunk| { - write_chunk(&self.db, &writeopts, col, block_number, chunk.iter().map(|(k, v)| (k.to_bytes_be(), *v))) + let mut batch = WriteBatchWithTransaction::default(); + self.contract_db_store_chunk( + col, + block_number, + chunk.iter().map(|(k, v)| (k.to_bytes_be(), *v)), + &mut batch, + )?; + self.db.write_opt(batch, &self.writeopts_no_wal)?; + Result::<(), MadaraStorageError>::Ok(()) }, )?; - contract_kv_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( + value.contract_kv_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( || self.db.get_column(Column::ContractStorage), |col, chunk| { - write_chunk( - &self.db, - &writeopts, + let mut batch = WriteBatchWithTransaction::default(); + self.contract_db_store_chunk( col, block_number, chunk.iter().map(|((k1, k2), v)| { @@ -195,7 +282,10 @@ impl MadaraBackend { key[32..].copy_from_slice(k2.to_bytes_be().as_ref()); (key, *v) }), - ) + &mut batch, + )?; + self.db.write_opt(batch, &self.writeopts_no_wal)?; + Result::<(), MadaraStorageError>::Ok(()) }, )?; @@ -203,19 +293,8 @@ impl MadaraBackend { } /// NB: This functions needs to run on the rayon thread pool - #[tracing::instrument( - skip(self, contract_class_updates, contract_nonces_updates, contract_kv_updates), - fields(module = "ContractDB") - )] - pub(crate) fn contract_db_store_pending( - &self, - contract_class_updates: &[(Felt, Felt)], - contract_nonces_updates: &[(Felt, Felt)], - contract_kv_updates: &[((Felt, Felt), Felt)], - ) -> Result<(), MadaraStorageError> { - let mut writeopts = WriteOptions::new(); - writeopts.disable_wal(true); - + #[tracing::instrument(skip(self, value), fields(module = "ContractDB"))] + pub(crate) fn contract_db_store_pending(&self, value: ContractDbBlockUpdate) -> Result<(), MadaraStorageError> { // Note: pending has keys in bincode, not bytes fn write_chunk( @@ -233,17 +312,19 @@ impl MadaraBackend { Ok(()) } - contract_class_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( + value.contract_class_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( || self.db.get_column(Column::PendingContractToClassHashes), - |col, chunk| write_chunk(&self.db, &writeopts, col, chunk.iter().map(|(k, v)| (k, *v))), + |col, chunk| write_chunk(&self.db, &self.writeopts_no_wal, col, chunk.iter().map(|(k, v)| (k, *v))), )?; - contract_nonces_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( + value.contract_nonces_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( || self.db.get_column(Column::PendingContractToNonces), - |col, chunk| write_chunk(&self.db, &writeopts, col, chunk.iter().map(|(k, v)| (k, *v))), + |col, chunk| write_chunk(&self.db, &self.writeopts_no_wal, col, chunk.iter().map(|(k, v)| (k, *v))), )?; - contract_kv_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( + value.contract_kv_updates.par_chunks(DB_UPDATES_BATCH_SIZE).try_for_each_init( || self.db.get_column(Column::PendingContractStorage), - |col, chunk| write_chunk(&self.db, &writeopts, col, chunk.iter().map(|((k1, k2), v)| ((k1, k2), *v))), + |col, chunk| { + write_chunk(&self.db, &self.writeopts_no_wal, col, chunk.iter().map(|((k1, k2), v)| ((k1, k2), *v))) + }, )?; Ok(()) @@ -251,26 +332,23 @@ impl MadaraBackend { #[tracing::instrument(fields(module = "ContractDB"))] pub(crate) fn contract_db_clear_pending(&self) -> Result<(), MadaraStorageError> { - let mut writeopts = WriteOptions::new(); - writeopts.disable_wal(true); - self.db.delete_range_cf_opt( &self.db.get_column(Column::PendingContractToNonces), &[] as _, LAST_KEY, - &writeopts, + &self.writeopts_no_wal, )?; self.db.delete_range_cf_opt( &self.db.get_column(Column::PendingContractToClassHashes), &[] as _, LAST_KEY, - &writeopts, + &self.writeopts_no_wal, )?; self.db.delete_range_cf_opt( &self.db.get_column(Column::PendingContractStorage), &[] as _, LAST_KEY, - &writeopts, + &self.writeopts_no_wal, )?; Ok(()) diff --git a/crates/madara/client/db/src/devnet_db.rs b/crates/madara/client/db/src/devnet_db.rs index d4354aa38..c071f90ec 100644 --- a/crates/madara/client/db/src/devnet_db.rs +++ b/crates/madara/client/db/src/devnet_db.rs @@ -1,6 +1,5 @@ use crate::DatabaseExt; use crate::{Column, MadaraBackend, MadaraStorageError}; -use rocksdb::WriteOptions; use serde::{Deserialize, Serialize}; use starknet_types_core::felt::Felt; @@ -35,9 +34,7 @@ impl MadaraBackend { #[tracing::instrument(skip(self, devnet_keys), fields(module = "DevnetDB"))] pub fn set_devnet_predeployed_keys(&self, devnet_keys: DevnetPredeployedKeys) -> Result<()> { let nonce_column = self.db.get_column(Column::Devnet); - let mut writeopts = WriteOptions::default(); - writeopts.disable_wal(true); - self.db.put_cf_opt(&nonce_column, DEVNET_KEYS, bincode::serialize(&devnet_keys)?, &writeopts)?; + self.db.put_cf_opt(&nonce_column, DEVNET_KEYS, bincode::serialize(&devnet_keys)?, &self.writeopts_no_wal)?; Ok(()) } } diff --git a/crates/madara/client/db/src/error.rs b/crates/madara/client/db/src/error.rs index c0fcbaeef..13a7c2b34 100644 --- a/crates/madara/client/db/src/error.rs +++ b/crates/madara/client/db/src/error.rs @@ -27,6 +27,8 @@ pub enum MadaraStorageError { "Missing compiled class for class with hash {class_hash:#x} (compiled_class_hash={compiled_class_hash:#x}" )] MissingCompiledClass { class_hash: Felt, compiled_class_hash: Felt }, + #[error("Batch is empty")] + EmptyBatch, } pub type BonsaiStorageError = bonsai_trie::BonsaiStorageError; diff --git a/crates/madara/client/db/src/l1_db.rs b/crates/madara/client/db/src/l1_db.rs index eb7bf4fd6..100e7ee3c 100644 --- a/crates/madara/client/db/src/l1_db.rs +++ b/crates/madara/client/db/src/l1_db.rs @@ -1,9 +1,8 @@ -use rocksdb::{IteratorMode, WriteOptions}; -use serde::{Deserialize, Serialize}; -use starknet_api::core::Nonce; - use crate::error::DbError; use crate::{Column, DatabaseExt, MadaraBackend, MadaraStorageError}; +use rocksdb::IteratorMode; +use serde::{Deserialize, Serialize}; +use starknet_api::core::Nonce; type Result = std::result::Result; @@ -103,13 +102,11 @@ impl MadaraBackend { last_synced_event_block: LastSyncedEventBlock, ) -> Result<(), DbError> { let messaging_column = self.db.get_column(Column::L1Messaging); - let mut writeopts = WriteOptions::default(); // todo move that in db - writeopts.disable_wal(true); self.db.put_cf_opt( &messaging_column, LAST_SYNCED_L1_EVENT_BLOCK, bincode::serialize(&last_synced_event_block)?, - &writeopts, + &self.writeopts_no_wal, )?; Ok(()) } @@ -123,9 +120,12 @@ impl MadaraBackend { #[tracing::instrument(skip(self, nonce), fields(module = "L1DB"))] pub fn set_l1_messaging_nonce(&self, nonce: Nonce) -> Result<(), DbError> { let nonce_column = self.db.get_column(Column::L1MessagingNonce); - let mut writeopts = WriteOptions::default(); - writeopts.disable_wal(true); - self.db.put_cf_opt(&nonce_column, bincode::serialize(&nonce)?, /* empty value */ [], &writeopts)?; + self.db.put_cf_opt( + &nonce_column, + bincode::serialize(&nonce)?, + /* empty value */ [], + &self.writeopts_no_wal, + )?; Ok(()) } diff --git a/crates/madara/client/db/src/lib.rs b/crates/madara/client/db/src/lib.rs index 33bc43c4e..5f4fb42e4 100644 --- a/crates/madara/client/db/src/lib.rs +++ b/crates/madara/client/db/src/lib.rs @@ -1,9 +1,9 @@ //! Madara database use anyhow::Context; -use block_db::get_latest_block_n; use bonsai_db::{BonsaiDb, DatabaseKeyMapping}; use bonsai_trie::{BonsaiStorage, BonsaiStorageConfig}; +use chain_head::ChainHead; use db_metrics::DbMetrics; use mp_chain_config::ChainConfig; use mp_utils::service::{MadaraServiceId, PowerOfTwo, Service, ServiceId}; @@ -21,6 +21,7 @@ use std::sync::Arc; use std::{fmt, fs}; use tokio::sync::{mpsc, oneshot}; +mod chain_head; mod db_version; mod error; mod rocksdb_options; @@ -37,7 +38,9 @@ pub mod devnet_db; pub mod l1_db; pub mod mempool_db; pub mod storage_updates; +pub mod stream; pub mod tests; +mod update_global_trie; pub use bonsai_db::GlobalTrie; pub use bonsai_trie::{id::BasicId, MultiProof, ProofNode}; @@ -409,7 +412,6 @@ impl EventChannels { } } -/// Madara client database backend singleton. pub struct MadaraBackend { backup_handle: Option>, db: Arc, @@ -418,21 +420,28 @@ pub struct MadaraBackend { snapshots: Arc, trie_log_config: TrieLogConfig, sender_block_info: tokio::sync::broadcast::Sender, + head_status: ChainHead, sender_event: EventChannels, - write_opt_no_wal: WriteOptions, + /// WriteOptions with wal disabled + writeopts_no_wal: WriteOptions, #[cfg(any(test, feature = "testing"))] _temp_dir: Option, } impl fmt::Debug for MadaraBackend { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("MadaraBackend") - .field("backup_handle", &self.backup_handle) + let mut s = f.debug_struct("MadaraBackend"); + s.field("backup_handle", &self.backup_handle) .field("db", &self.db) .field("chain_config", &self.chain_config) .field("db_metrics", &self.db_metrics) - .field("sender_block_info", &self.sender_block_info) - .finish() + .field("sender_block_info", &self.sender_block_info); + + #[cfg(any(test, feature = "testing"))] + { + s.field("_temp_dir", &self._temp_dir); + } + s.finish() } } @@ -472,6 +481,10 @@ impl DatabaseService { ) .await?; + if let Some(block_n) = handle.head_status().latest_full_block_n() { + tracing::info!("📦 Database latest block: #{block_n}"); + } + Ok(Self { handle }) } @@ -511,25 +524,43 @@ impl MadaraBackend { &self.chain_config } - #[cfg(any(test, feature = "testing"))] - pub fn open_for_testing(chain_config: Arc) -> Arc { - let temp_dir = tempfile::TempDir::with_prefix("madara-test").unwrap(); - let db = open_rocksdb(temp_dir.as_ref()).unwrap(); - let snapshots = Arc::new(Snapshots::new(Arc::clone(&db), None, Some(0), 5)); - Arc::new(Self { - backup_handle: None, + fn new( + backup_handle: Option>, + db: Arc, + chain_config: Arc, + trie_log_config: TrieLogConfig, + ) -> anyhow::Result { + let snapshots = Arc::new(Snapshots::new( + Arc::clone(&db), + ChainHead::load_from_db(&db).context("Getting latest block_n from database")?.global_trie.get(), + Some(trie_log_config.max_kept_snapshots), + trie_log_config.snapshot_interval, + )); + Ok(Self { + writeopts_no_wal: make_write_opt_no_wal(), + db_metrics: DbMetrics::register().context("Registering db metrics")?, + backup_handle, db, chain_config, - db_metrics: DbMetrics::register().unwrap(), - snapshots, - trie_log_config: Default::default(), sender_block_info: tokio::sync::broadcast::channel(100).0, sender_event: EventChannels::new(100), - write_opt_no_wal: make_write_opt_no_wal(), - _temp_dir: Some(temp_dir), + trie_log_config: Default::default(), + head_status: ChainHead::default(), + snapshots, + #[cfg(any(test, feature = "testing"))] + _temp_dir: None, }) } + #[cfg(any(test, feature = "testing"))] + pub fn open_for_testing(chain_config: Arc) -> Arc { + let temp_dir = tempfile::TempDir::with_prefix("madara-test").unwrap(); + let db = open_rocksdb(temp_dir.as_ref()).unwrap(); + let mut backend = Self::new(None, db, chain_config, Default::default()).unwrap(); + backend._temp_dir = Some(temp_dir); + Arc::new(backend) + } + /// Open the db. pub async fn open( db_config_dir: PathBuf, @@ -568,30 +599,12 @@ impl MadaraBackend { }; let db = open_rocksdb(&db_path)?; - let current_block_n = get_latest_block_n(&db).context("Getting latest block_n from database")?; - let snapshots = Arc::new(Snapshots::new( - Arc::clone(&db), - current_block_n, - Some(trie_log_config.max_kept_snapshots), - trie_log_config.snapshot_interval, - )); - let backend = Arc::new(Self { - db_metrics: DbMetrics::register().context("Registering db metrics")?, - backup_handle, - db, - chain_config: Arc::clone(&chain_config), - snapshots, - trie_log_config, - sender_block_info: tokio::sync::broadcast::channel(100).0, - sender_event: EventChannels::new(100), - write_opt_no_wal: make_write_opt_no_wal(), - #[cfg(any(test, feature = "testing"))] - _temp_dir: None, - }); + let mut backend = Self::new(backup_handle, db, chain_config, trie_log_config)?; backend.check_configuration()?; + backend.load_head_status_from_db()?; backend.update_metrics(); - Ok(backend) + Ok(Arc::new(backend)) } pub fn flush(&self) -> anyhow::Result<()> { diff --git a/crates/madara/client/db/src/mempool_db.rs b/crates/madara/client/db/src/mempool_db.rs index 7aade92b2..9f6f786ce 100644 --- a/crates/madara/client/db/src/mempool_db.rs +++ b/crates/madara/client/db/src/mempool_db.rs @@ -101,7 +101,7 @@ impl MadaraBackend { // atomically. let col = self.db.get_column(Column::MempoolTransactions); - self.db.delete_cf_opt(&col, bincode::serialize(tx_hash)?, &self.write_opt_no_wal)?; + self.db.delete_cf_opt(&col, bincode::serialize(tx_hash)?, &self.writeopts_no_wal)?; tracing::debug!("remove_mempool_tx {:?}", tx_hash); Ok(()) } diff --git a/crates/madara/client/db/src/storage_updates.rs b/crates/madara/client/db/src/storage_updates.rs index 2476a5f03..4bbe1c845 100644 --- a/crates/madara/client/db/src/storage_updates.rs +++ b/crates/madara/client/db/src/storage_updates.rs @@ -1,15 +1,179 @@ +use crate::contract_db::ContractDbBlockUpdate; use crate::db_block_id::DbBlockId; +use crate::Column; +use crate::DatabaseExt; use crate::MadaraBackend; use crate::MadaraStorageError; -use blockifier::bouncer::BouncerWeights; -use mp_block::VisitedSegments; -use mp_block::{MadaraBlock, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo, MadaraPendingBlock}; -use mp_class::ConvertedClass; -use mp_state_update::{ - ContractStorageDiffItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StateDiff, StorageEntry, +use crate::WriteBatchWithTransaction; +use mp_block::FullBlock; +use mp_block::MadaraBlockInfo; +use mp_block::MadaraBlockInner; +use mp_block::TransactionWithReceipt; +use mp_block::{ + BlockHeaderWithSignatures, MadaraBlock, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo, MadaraPendingBlock, }; +use mp_class::ConvertedClass; +use mp_receipt::EventWithTransactionHash; +use mp_receipt::TransactionReceipt; +use mp_state_update::StateDiff; use starknet_types_core::felt::Felt; -use std::collections::HashMap; + +impl MadaraBackend { + pub fn store_full_block(&self, block: FullBlock) -> Result<(), MadaraStorageError> { + let block_n = block.header.block_number; + self.store_block_header(BlockHeaderWithSignatures { + header: block.header, + block_hash: block.block_hash, + consensus_signatures: vec![], + })?; + self.store_transactions(block_n, block.transactions)?; + self.store_state_diff(block_n, block.state_diff)?; + self.store_events(block_n, block.events)?; + Ok(()) + } + + pub fn store_block_header(&self, header: BlockHeaderWithSignatures) -> Result<(), MadaraStorageError> { + let mut tx = WriteBatchWithTransaction::default(); + let block_n = header.header.block_number; + + let block_hash_to_block_n = self.db.get_column(Column::BlockHashToBlockN); + let block_n_to_block = self.db.get_column(Column::BlockNToBlockInfo); + + let info = MadaraBlockInfo { header: header.header, block_hash: header.block_hash, tx_hashes: vec![] }; + + let block_n_encoded = bincode::serialize(&block_n)?; + tx.put_cf(&block_n_to_block, block_n.to_be_bytes(), bincode::serialize(&info)?); + tx.put_cf(&block_hash_to_block_n, &bincode::serialize(&header.block_hash)?, &block_n_encoded); + + self.db.write_opt(tx, &self.writeopts_no_wal)?; + Ok(()) + } + + pub fn store_transactions( + &self, + block_n: u64, + value: Vec, + ) -> Result<(), MadaraStorageError> { + let mut tx = WriteBatchWithTransaction::default(); + + let block_n_to_block = self.db.get_column(Column::BlockNToBlockInfo); + let block_n_to_block_inner = self.db.get_column(Column::BlockNToBlockInner); + + let block_n_encoded = bincode::serialize(&block_n)?; + + // update block info tx hashes (we should get rid of this field at some point IMO) + let mut block_info: MadaraBlockInfo = + bincode::deserialize(&self.db.get_cf(&block_n_to_block, block_n.to_be_bytes())?.unwrap_or_default())?; + block_info.tx_hashes = value.iter().map(|tx_with_receipt| tx_with_receipt.receipt.transaction_hash()).collect(); + tx.put_cf(&block_n_to_block, block_n.to_be_bytes(), bincode::serialize(&block_info)?); + + let (transactions, receipts) = value.into_iter().map(|t| (t.transaction, t.receipt)).unzip(); + let block_inner = MadaraBlockInner { transactions, receipts }; + tx.put_cf(&block_n_to_block_inner, &block_n_encoded, &bincode::serialize(&block_inner)?); + + // TODO: other columns + + self.db.write_opt(tx, &self.writeopts_no_wal)?; + Ok(()) + } + + pub fn store_state_diff(&self, block_n: u64, value: StateDiff) -> Result<(), MadaraStorageError> { + let mut batch = WriteBatchWithTransaction::default(); + + let block_n_to_state_diff = self.db.get_column(Column::BlockNToStateDiff); + let block_n_encoded = bincode::serialize(&block_n)?; + batch.put_cf(&block_n_to_state_diff, &block_n_encoded, &bincode::serialize(&value)?); + self.db.write_opt(batch, &self.writeopts_no_wal)?; + + self.contract_db_store_block(block_n, ContractDbBlockUpdate::from_state_diff(value))?; + + Ok(()) + } + + pub fn store_events(&self, block_n: u64, value: Vec) -> Result<(), MadaraStorageError> { + let mut batch = WriteBatchWithTransaction::default(); + + let block_n_to_block_inner = self.db.get_column(Column::BlockNToBlockInner); + let block_n_encoded = bincode::serialize(&block_n)?; + + // update block transactions (TODO: we should separate receipts and events) + let mut inner: MadaraBlockInner = + bincode::deserialize(&self.db.get_cf(&block_n_to_block_inner, &block_n_encoded)?.unwrap_or_default())?; + + // just in case we stored them with receipt earlier, overwrite them + for receipt in inner.receipts.iter_mut() { + let events_mut = match receipt { + TransactionReceipt::Invoke(receipt) => &mut receipt.events, + TransactionReceipt::L1Handler(receipt) => &mut receipt.events, + TransactionReceipt::Declare(receipt) => &mut receipt.events, + TransactionReceipt::Deploy(receipt) => &mut receipt.events, + TransactionReceipt::DeployAccount(receipt) => &mut receipt.events, + }; + events_mut.clear() + } + + let mut inner_m = inner.receipts.iter_mut().peekable(); + for ev in value { + let receipt_mut = loop { + let Some(receipt) = inner_m.peek_mut() else { + return Err(MadaraStorageError::InconsistentStorage( + format!("No transaction for hash {:#x} in block_n {block_n}", ev.transaction_hash).into(), + )); + }; + + if receipt.transaction_hash() == ev.transaction_hash { + break receipt; + } + let _item = inner_m.next(); + }; + + let events_mut = match receipt_mut { + TransactionReceipt::Invoke(receipt) => &mut receipt.events, + TransactionReceipt::L1Handler(receipt) => &mut receipt.events, + TransactionReceipt::Declare(receipt) => &mut receipt.events, + TransactionReceipt::Deploy(receipt) => &mut receipt.events, + TransactionReceipt::DeployAccount(receipt) => &mut receipt.events, + }; + + events_mut.push(ev.event); + } + + batch.put_cf(&block_n_to_block_inner, &block_n_encoded, &bincode::serialize(&inner)?); + self.db.write_opt(batch, &self.writeopts_no_wal)?; + + Ok(()) + } + + /// Returns the new global state root. Multiple state diffs can be applied at once, only the latest state root will + /// be returned. + /// Errors if the batch is empty. + pub fn apply_state<'a>( + &self, + start_block_n: u64, + state_diffs: impl IntoIterator, + ) -> Result { + let mut state_root = None; + for (block_n, state_diff) in (start_block_n..).zip(state_diffs) { + tracing::debug!("applying state_diff block_n={block_n}"); + let (contract_trie_root, class_trie_root) = rayon::join( + || { + crate::update_global_trie::contracts::contract_trie_root( + self, + &state_diff.deployed_contracts, + &state_diff.replaced_classes, + &state_diff.nonces, + &state_diff.storage_diffs, + block_n, + ) + }, + || crate::update_global_trie::classes::class_trie_root(self, &state_diff.declared_classes, block_n), + ); + + state_root = Some(crate::update_global_trie::calculate_state_root(contract_trie_root?, class_trie_root?)); + } + state_root.ok_or(MadaraStorageError::EmptyBatch) + } +} impl MadaraBackend { /// NB: This functions needs to run on the rayon thread pool @@ -18,8 +182,6 @@ impl MadaraBackend { block: MadaraMaybePendingBlock, state_diff: StateDiff, converted_classes: Vec, - visited_segments: Option, - bouncer_weights: Option, ) -> Result<(), MadaraStorageError> { let block_n = block.info.block_n(); let state_diff_cpy = state_diff.clone(); @@ -28,50 +190,20 @@ impl MadaraBackend { self.clear_pending_block()?; let task_block_db = || match block.info { - MadaraMaybePendingBlockInfo::Pending(info) => self.block_db_store_pending( - &MadaraPendingBlock { info, inner: block.inner }, - &state_diff_cpy, - visited_segments, - bouncer_weights, - ), + MadaraMaybePendingBlockInfo::Pending(info) => { + self.block_db_store_pending(&MadaraPendingBlock { info, inner: block.inner }, &state_diff_cpy) + } MadaraMaybePendingBlockInfo::NotPending(info) => { self.block_db_store_block(&MadaraBlock { info, inner: block.inner }, &state_diff_cpy) } }; let task_contract_db = || { - let nonces_from_updates = - state_diff.nonces.into_iter().map(|NonceUpdate { contract_address, nonce }| (contract_address, nonce)); - - let nonce_map: HashMap = nonces_from_updates.collect(); - - let contract_class_updates_replaced = state_diff - .replaced_classes - .into_iter() - .map(|ReplacedClassItem { contract_address, class_hash }| (contract_address, class_hash)); - - let contract_class_updates_deployed = state_diff - .deployed_contracts - .into_iter() - .map(|DeployedContractItem { address, class_hash }| (address, class_hash)); - - let contract_class_updates = - contract_class_updates_replaced.chain(contract_class_updates_deployed).collect::>(); - let nonces_updates = nonce_map.into_iter().collect::>(); - - let storage_kv_updates = state_diff - .storage_diffs - .into_iter() - .flat_map(|ContractStorageDiffItem { address, storage_entries }| { - storage_entries.into_iter().map(move |StorageEntry { key, value }| ((address, key), value)) - }) - .collect::>(); + let update = ContractDbBlockUpdate::from_state_diff(state_diff); match block_n { - None => self.contract_db_store_pending(&contract_class_updates, &nonces_updates, &storage_kv_updates), - Some(block_n) => { - self.contract_db_store_block(block_n, &contract_class_updates, &nonces_updates, &storage_kv_updates) - } + None => self.contract_db_store_pending(update), + Some(block_n) => self.contract_db_store_block(block_n, update), } }; @@ -85,6 +217,16 @@ impl MadaraBackend { r1.and(r2).and(r3)?; self.snapshots.set_new_head(DbBlockId::from_block_n(block_n)); + + if let Some(block_n) = block_n { + self.head_status.headers.set(Some(block_n)); + self.head_status.state_diffs.set(Some(block_n)); + self.head_status.transactions.set(Some(block_n)); + self.head_status.classes.set(Some(block_n)); + self.head_status.events.set(Some(block_n)); + self.head_status.global_trie.set(Some(block_n)); + } + Ok(()) } diff --git a/crates/madara/client/db/src/stream.rs b/crates/madara/client/db/src/stream.rs new file mode 100644 index 000000000..bd2ab9947 --- /dev/null +++ b/crates/madara/client/db/src/stream.rs @@ -0,0 +1,822 @@ +use crate::{db_block_id::DbBlockId, MadaraBackend, MadaraStorageError}; +use crate::{Column, DatabaseExt}; +use futures::{stream, Stream}; +use mp_block::MadaraBlockInfo; +use std::iter; +use std::{ + collections::VecDeque, + num::NonZeroU64, + ops::{Bound, RangeBounds}, + sync::Arc, +}; +use tokio::sync::broadcast::{error::RecvError, Receiver}; + +/// Returns (inclusive start, optional limit). +/// When the start is unbounded, we start at 0. +fn resolve_range(range: impl RangeBounds) -> (u64, Option) { + let start = match range.start_bound() { + Bound::Included(start) => *start, + Bound::Excluded(start) => match start.checked_add(1) { + Some(start) => start, + None => { + // start is u64::max, excluded. Return an empty range. + return (u64::MAX, Some(0)); + } + }, + Bound::Unbounded => 0, + }; + let limit = match range.end_bound() { + Bound::Included(end) => Some(end.saturating_add(1).saturating_sub(start)), + Bound::Excluded(end) => Some(end.saturating_sub(start)), + Bound::Unbounded => None, + }; + + (start, limit) +} + +#[derive(Default, Debug, Clone, Copy, Eq, PartialEq)] +pub enum Direction { + #[default] + Forward, + Backward, +} +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct BlockStreamConfig { + pub direction: Direction, + /// Block number from which to start (inclusive). + /// In the case of reverse iteration, if the block does not exist yet, iteration will start from the latest block in db. + pub start: u64, + pub step: NonZeroU64, + pub limit: Option, +} + +impl BlockStreamConfig { + pub fn backward(mut self) -> Self { + self.direction = Direction::Backward; + self + } + + pub fn forward(mut self) -> Self { + self.direction = Direction::Forward; + self + } + + pub fn with_block_range(mut self, range: impl RangeBounds) -> Self { + let (start, limit) = resolve_range(range); + self.start = start; + self.limit = limit; + self + } + + pub fn with_limit(mut self, limit: impl Into>) -> Self { + self.limit = limit.into(); + self + } + + pub fn with_start(mut self, start: impl Into) -> Self { + self.start = start.into(); + self + } +} + +impl Default for BlockStreamConfig { + fn default() -> Self { + Self { direction: Direction::Forward, start: 0, step: NonZeroU64::MIN, limit: None } + } +} + +impl MadaraBackend { + pub fn block_info_iterator( + self: &Arc, + iteration: BlockStreamConfig, + ) -> impl Iterator> { + // The important thing here is to avoid keeping iterators around in the iterator state, + // as an iterator instance will pin the memtables. + // To avoid that, we buffer a few blocks ahead to still benefit from the rocksdb iterators. + // Note, none of that has been benchmarked. + const BUFFER_SIZE: usize = 32; + + struct State { + backend: Arc, + iteration: BlockStreamConfig, + buf: VecDeque, + next_block_n: Option, + total_got: u64, + } + + impl State { + fn next_item(&mut self) -> Result, MadaraStorageError> { + if self.buf.is_empty() { + if self.iteration.limit.is_some_and(|limit| self.total_got >= limit) { + return Ok(None); + } + + let Some(mut next_block_n) = self.next_block_n else { + return Ok(None); + }; + + let col = self.backend.db.get_column(Column::BlockNToBlockInfo); + let mut ite = self.backend.db.raw_iterator_cf(&col); + + match self.iteration.direction { + Direction::Forward => ite.seek(next_block_n.to_be_bytes()), + Direction::Backward => ite.seek_for_prev(next_block_n.to_be_bytes()), + } + for _ in 0..BUFFER_SIZE { + // End condition when moving forward is the latest full block in db. + if self.iteration.direction == Direction::Forward + && self.backend.head_status().next_full_block() <= next_block_n + { + break; + } + + let Some((k, v)) = ite.item() else { + ite.status()?; // bubble up error, or, we reached the end. + break; + }; + + let block_n = + u64::from_be_bytes(k.try_into().map_err(|_| MadaraStorageError::InvalidBlockNumber)?); + + if self.iteration.direction == Direction::Backward { + // If we asked for a block that does not yet exist, we start from the highest block found instead. + next_block_n = u64::min(next_block_n, block_n); + } + + let val = bincode::deserialize(v)?; + + self.buf.push_back(val); + self.total_got += 1; + + // update next_block_n + match self.iteration.direction { + Direction::Forward => { + self.next_block_n = next_block_n.checked_add(self.iteration.step.get()); + } + Direction::Backward => { + self.next_block_n = next_block_n.checked_sub(self.iteration.step.get()); + } + } + let Some(next) = self.next_block_n else { break }; + next_block_n = next; + + if self.iteration.limit.is_some_and(|limit| self.total_got >= limit) { + break; + } + + if self.iteration.step.get() > 1 { + // seek here instead of next/prev + match self.iteration.direction { + Direction::Forward => ite.seek(next_block_n.to_be_bytes()), + Direction::Backward => ite.seek_for_prev(next_block_n.to_be_bytes()), + } + } else { + match self.iteration.direction { + Direction::Forward => ite.next(), + Direction::Backward => ite.prev(), + } + } + } + } + + Ok(self.buf.pop_front()) + } + } + + let mut state = State { + backend: Arc::clone(self), + buf: VecDeque::with_capacity(BUFFER_SIZE), + next_block_n: Some(iteration.start), + total_got: 0, + iteration, + }; + iter::from_fn(move || state.next_item().transpose()) + } + + /// This function will follow the tip of the chain when asked for Forward iteration, hence it is a Stream and not an Iterator. + pub fn block_info_stream( + self: &Arc, + iteration: BlockStreamConfig, + ) -> impl Stream> { + // So, this is a somewhat funny problem: by the time we return the blocks until the current latest_block in db, + // the database may actually have new blocks now! + // Remember that we're returning a stream here, which means that the time between polls varies with the caller - and, + // in the use cases we're interested in (websocket/p2p) the time between polls varies depending on the speed of the + // connection with the client/peer that's calling the endpoint. + // So! it may very well be the case that once we caught up with the latest block_n in db as we saw at the beginning + // of the call, and once we sent all the blocks that have been added within the time we sent all of those, we might + // still not have caught up with the latest block in db - because new blocks could have come by then. + // This implementation solves this problem by checking the latest block in db in a loop and only once it really looks + // like we caught up with the db, we subscribe to the new blocks channel. But hold on a minute, this subscribe is + // done after getting the latest block number! There's a split second where it could have been possible to miss a + // block. Because of this rare case, there are two supplementary things to note: we *also* get the latest block_n + // *after* subscribing, so that we can check that we did not miss anything during subscription - and just in case, + // we also handle the case when the subscription returns a block that's futher into the future than the one we + // would expect. + // All in all, this implementation tries its *very best* not to subscribe to the channel when it does not have to. + // In addition, because rust does not have `yield` syntax (yet? I'm losing hope..) - this is implemented as a + // funky looking state machine. yay! + + // TODO: use db iterators to fill a VecDeque buffer (we don't want to hold a db iterator across an await point!) + // => reuse block_info_iterator logic + // TODO: what should we do about reorgs?! i would assume we go back and rereturn the new blocks..? + + struct State { + iteration: BlockStreamConfig, + backend: Arc, + /// `None` here means we reached the end of iteration. + next_to_return: Option, + num_blocks_returned: u64, + /// This is `+ 1` because we want to handle returning genesis. If the chain is empty (does not even have a genesis + /// block), this field will be 0. + latest_plus_one: Option, + subscription: Option>, + } + + impl State { + /// Get the `latest_plus_one` variable in `self`, populating it if it is empty. + fn get_latest_plus_one(&mut self) -> Result { + let latest_plus_one = match self.latest_plus_one { + Some(n) => n, + None => { + self.backend.get_latest_block_n()?.map(|n| n.saturating_add(1)).unwrap_or(/* genesis */ 0) + } + }; + self.latest_plus_one = Some(latest_plus_one); + Ok(latest_plus_one) + } + + async fn next_forward(&mut self) -> Result, MadaraStorageError> { + 'retry: loop { + let Some(next_to_return) = self.next_to_return else { return Ok(None) }; + + // If we have a subscription, return blocks from it. + if let Some(subscription) = &mut self.subscription { + match subscription.recv().await { + // return this block + Ok(info) if info.header.block_number == next_to_return => { + self.next_to_return = next_to_return.checked_add(self.iteration.step.get()); + return Ok(Some(info)); + } + // skip this block + Ok(info) if info.header.block_number < next_to_return => continue 'retry, + // the channel returned a block number that we didn't expect. Treat that as if it lagged..? + Ok(_info) => self.subscription = None, + // If it lagged (buffer full), continue using db and we'll eventually resubscribe again once caught up :) + Err(RecvError::Lagged(_n_skipped_messages)) => self.subscription = None, + Err(RecvError::Closed) => return Ok(None), + } + } + + // Or else, return blocks from the db. + + if self.latest_plus_one.is_some_and(|latest_plus_one| latest_plus_one <= next_to_return) { + // new blocks may have arrived, get latest_block_n again + self.latest_plus_one = None + } + + let latest_plus_one = self.get_latest_plus_one()?; + + if latest_plus_one <= next_to_return { + // caught up with the db :) + self.subscription = Some(self.backend.subscribe_block_info()); + // get latest_block_n again after subscribing, because it could have changed during subscribing + self.latest_plus_one = None; + self.get_latest_plus_one()?; + continue 'retry; + } + + let block_info = &self.backend.get_block_info(&DbBlockId::Number(next_to_return))?.ok_or( + MadaraStorageError::InconsistentStorage("latest_block_n points to a non existent block".into()), + )?; + let block_info = block_info + .as_nonpending() + .ok_or(MadaraStorageError::InconsistentStorage("Closed block should not be pending".into()))?; + + self.next_to_return = next_to_return.checked_add(self.iteration.step.get()); + return Ok(Some(block_info.clone())); + } + } + + // Implement backward mode in another function. + async fn next_backward(&mut self) -> Result, MadaraStorageError> { + // This makes sure we're starting from a block that actually exists. It bounds the `next_to_return` variable. + if self.latest_plus_one.is_none() { + let Some(next_to_return) = self.next_to_return else { return Ok(None) }; + let latest_block = self.get_latest_plus_one()?.checked_sub(1); + // If there are no blocks in db, this will set `next_to_return` to None. + self.next_to_return = latest_block.map(|latest_block| u64::min(latest_block, next_to_return)) + } + + let Some(next_to_return) = self.next_to_return else { return Ok(None) }; + + let block_info = &self.backend.get_block_info(&DbBlockId::Number(next_to_return))?.ok_or( + MadaraStorageError::InconsistentStorage("latest_block_n points to a non existent block".into()), + )?; + let block_info = block_info + .as_nonpending() + .ok_or(MadaraStorageError::InconsistentStorage("Closed block should not be pending".into()))?; + + // The None here will stop the iteration once we passed genesis. + self.next_to_return = next_to_return.checked_sub(self.iteration.step.get()); + Ok(Some(block_info.clone())) + } + + async fn try_next(&mut self) -> Result, MadaraStorageError> { + if self.iteration.limit.is_some_and(|limit| self.num_blocks_returned >= limit) { + return Ok(None); + } + + let ret = match self.iteration.direction { + Direction::Forward => self.next_forward().await?, + Direction::Backward => self.next_backward().await?, + }; + + if ret.is_some() { + self.num_blocks_returned = self.num_blocks_returned.saturating_add(1); + } + + Ok(ret) + } + } + + stream::unfold( + State { + next_to_return: Some(iteration.start), + iteration, + num_blocks_returned: 0, + latest_plus_one: None, + backend: Arc::clone(self), + subscription: None, + }, + |mut s| async { s.try_next().await.transpose().map(|el| (el, s)) }, + ) + } +} + +#[cfg(test)] +mod tests { + //! To test: + //! - [x] Simple iteration, everything in db. + //! - [x] Simple iteration, db is empty. + //! - [x] Simple iteration, everything in db. Start from a specific block. + //! - [x] Simple iteration, everything in db. Start from a block that doesnt exist yet. + //! - [x] More complex cases where blocks are added during iteration. + //! - [x] Reverse iteration. + //! - [x] Reverse iteration, db is empty. + //! - [x] Reverse iteration: start from a specific block. + //! - [x] Reverse: Start from a block that doesnt exist yet. + //! - [x] Step iteration, forward. + //! - [x] Step iteration, backward. + //! - [x] Limit field. + //! - [x] Limit field wait on channel. + //! - [x] Limit field reverse iteration. + + use super::*; + use mp_block::{Header, MadaraMaybePendingBlock, MadaraMaybePendingBlockInfo}; + use mp_chain_config::ChainConfig; + use starknet_types_core::felt::Felt; + use std::time::Duration; + use stream::{StreamExt, TryStreamExt}; + use tokio::{pin, time::timeout}; + + fn block_info(block_number: u64) -> MadaraBlockInfo { + MadaraBlockInfo { + header: Header { block_number, ..Default::default() }, + block_hash: Felt::from(block_number), + tx_hashes: Default::default(), + } + } + + fn store_block(backend: &MadaraBackend, block_number: u64) { + backend + .store_block( + MadaraMaybePendingBlock { + inner: Default::default(), + info: MadaraMaybePendingBlockInfo::NotPending(block_info(block_number)), + }, + Default::default(), + Default::default(), + ) + .unwrap(); + } + + #[rstest::fixture] + fn empty_chain() -> Arc { + MadaraBackend::open_for_testing(ChainConfig::madara_test().into()) + } + + #[rstest::fixture] + fn test_chain() -> Arc { + let backend = MadaraBackend::open_for_testing(ChainConfig::madara_test().into()); + for block_number in 0..5 { + store_block(&backend, block_number) + } + backend + } + + #[rstest::rstest] + #[tokio::test] + async fn test_simple(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig::default()); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(0))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(1))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(2))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(3))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&test_chain, 5); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(5))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_empty_chain(empty_chain: Arc) { + let stream = empty_chain.block_info_stream(BlockStreamConfig::default()); + pin!(stream); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&empty_chain, 0); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(0))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_start_from_block(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { start: 3, ..Default::default() }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(3))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&test_chain, 5); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(5))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_start_from_not_yet_created(empty_chain: Arc) { + let stream = empty_chain.block_info_stream(BlockStreamConfig { start: 3, ..Default::default() }); + pin!(stream); + + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 0); + store_block(&empty_chain, 1); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 2); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 3); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(3))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 4); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_concurrent(empty_chain: Arc) { + let stream = empty_chain.block_info_stream(BlockStreamConfig::default()); + pin!(stream); + + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 0); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(0))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 1); + store_block(&empty_chain, 2); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(1))); + store_block(&empty_chain, 3); + store_block(&empty_chain, 4); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(2))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(3))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&empty_chain, 5); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(5))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_backward(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + start: 3, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(3))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(2))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(1))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(0))); + assert_eq!(stream.try_next().await.unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_backward_empty(empty_chain: Arc) { + let stream = empty_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + start: 0, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_backward_start_from_not_yet_created(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + start: 10, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(3))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(2))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(1))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(0))); + assert_eq!(stream.try_next().await.unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_step(test_chain: Arc) { + let stream = + test_chain.block_info_stream(BlockStreamConfig { step: 2.try_into().unwrap(), ..Default::default() }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(0))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(2))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&test_chain, 5); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + + store_block(&test_chain, 6); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(6))); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_step_backward(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + step: 2.try_into().unwrap(), + start: 4, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(2))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(0))); + assert_eq!(stream.try_next().await.unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_limit(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { limit: Some(3), ..Default::default() }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(0))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(1))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(2))); + assert_eq!(stream.try_next().await.unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_limit2(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { limit: Some(3), start: 4, ..Default::default() }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&test_chain, 5); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(5))); + assert!(timeout(Duration::from_millis(50), stream.next()).await.is_err()); + store_block(&test_chain, 6); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(6))); + assert_eq!(stream.try_next().await.unwrap(), None); + } + + #[rstest::rstest] + #[tokio::test] + async fn test_limit_backward(test_chain: Arc) { + let stream = test_chain.block_info_stream(BlockStreamConfig { + direction: Direction::Backward, + limit: Some(3), + start: 5, + ..Default::default() + }); + pin!(stream); + + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(4))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(3))); + assert_eq!(stream.try_next().await.unwrap(), Some(block_info(2))); + assert_eq!(stream.try_next().await.unwrap(), None); + } + + #[test] + #[allow(clippy::reversed_empty_ranges)] + fn test_resolve_range() { + assert_eq!(resolve_range(0..), (0, None)); + assert_eq!(resolve_range(..), (0, None)); + assert_eq!(resolve_range(0..5), (0, Some(5))); + assert_eq!(resolve_range(..5), (0, Some(5))); + assert_eq!(resolve_range(0..=5), (0, Some(6))); + assert_eq!(resolve_range(..=5), (0, Some(6))); + assert_eq!(resolve_range(10..5), (10, Some(0))); + assert_eq!(resolve_range(10..=5), (10, Some(0))); + assert_eq!(resolve_range(10..10), (10, Some(0))); + assert_eq!(resolve_range(10..=10), (10, Some(1))); + assert_eq!(resolve_range(10..11), (10, Some(1))); + assert_eq!(resolve_range(10..=11), (10, Some(2))); + assert_eq!(resolve_range(10..9), (10, Some(0))); + assert_eq!(resolve_range(10..=9), (10, Some(0))); + assert_eq!(resolve_range(10..), (10, None)); + assert_eq!(resolve_range(10..15), (10, Some(5))); + assert_eq!(resolve_range(10..=15), (10, Some(6))); + } + + #[rstest::rstest] + fn test_iterator_simple(test_chain: Arc) { + let mut ite = test_chain.block_info_iterator(BlockStreamConfig::default()); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(0))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(1))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(3))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), None); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_empty_chain(empty_chain: Arc) { + let mut ite = empty_chain.block_info_iterator(BlockStreamConfig::default()); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_start_from_block(test_chain: Arc) { + let mut ite = test_chain.block_info_iterator(BlockStreamConfig { start: 3, ..Default::default() }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(3))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), None); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_start_from_not_yet_created(empty_chain: Arc) { + let mut ite = empty_chain.block_info_iterator(BlockStreamConfig { start: 3, ..Default::default() }); + assert_eq!(ite.next().transpose().unwrap(), None); + + store_block(&empty_chain, 0); + store_block(&empty_chain, 1); + let mut ite = empty_chain.block_info_iterator(BlockStreamConfig { start: 3, ..Default::default() }); + assert_eq!(ite.next().transpose().unwrap(), None); + store_block(&empty_chain, 2); + store_block(&empty_chain, 3); + let mut ite = empty_chain.block_info_iterator(BlockStreamConfig { start: 3, ..Default::default() }); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(3))); + assert_eq!(ite.next().transpose().unwrap(), None); + store_block(&empty_chain, 4); + let mut ite = empty_chain.block_info_iterator(BlockStreamConfig { start: 3, ..Default::default() }); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(3))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_backward(test_chain: Arc) { + let mut ite = test_chain.block_info_iterator(BlockStreamConfig { + direction: Direction::Backward, + start: 3, + ..Default::default() + }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(3))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(1))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(0))); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_backward_empty(empty_chain: Arc) { + let mut ite = empty_chain.block_info_iterator(BlockStreamConfig { + direction: Direction::Backward, + start: 0, + ..Default::default() + }); + + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_backward_start_from_not_yet_created(test_chain: Arc) { + let mut ite = test_chain.block_info_iterator(BlockStreamConfig { + direction: Direction::Backward, + start: 10, + ..Default::default() + }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(3))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(1))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(0))); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_step(test_chain: Arc) { + let mut ite = + test_chain.block_info_iterator(BlockStreamConfig { step: 2.try_into().unwrap(), ..Default::default() }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(0))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), None); + + store_block(&test_chain, 5); + let mut ite = + test_chain.block_info_iterator(BlockStreamConfig { step: 2.try_into().unwrap(), ..Default::default() }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(0))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), None); + + store_block(&test_chain, 6); + let mut ite = + test_chain.block_info_iterator(BlockStreamConfig { step: 2.try_into().unwrap(), ..Default::default() }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(0))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(6))); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_step_backward(test_chain: Arc) { + let mut ite = test_chain.block_info_iterator(BlockStreamConfig { + direction: Direction::Backward, + step: 2.try_into().unwrap(), + start: 4, + ..Default::default() + }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(0))); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_limit(test_chain: Arc) { + let mut ite = test_chain.block_info_iterator(BlockStreamConfig { limit: Some(3), ..Default::default() }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(0))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(1))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), None); + } + + #[rstest::rstest] + fn test_iterator_limit_backward(test_chain: Arc) { + let mut ite = test_chain.block_info_iterator(BlockStreamConfig { + direction: Direction::Backward, + limit: Some(3), + start: 5, + ..Default::default() + }); + + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(4))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(3))); + assert_eq!(ite.next().transpose().unwrap(), Some(block_info(2))); + assert_eq!(ite.next().transpose().unwrap(), None); + } +} diff --git a/crates/madara/client/db/src/tests/test_block.rs b/crates/madara/client/db/src/tests/test_block.rs index 2d7ed0c88..a9e043c06 100644 --- a/crates/madara/client/db/src/tests/test_block.rs +++ b/crates/madara/client/db/src/tests/test_block.rs @@ -24,8 +24,8 @@ mod block_tests { let block_hash = block.info.block_hash().unwrap(); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); - backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None, None).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); + backend.store_block(pending_block_one(), pending_state_diff_one(), vec![]).unwrap(); assert_eq!(backend.resolve_block_id(&BlockId::Hash(block_hash)).unwrap().unwrap(), DbBlockId::Number(0)); assert_eq!(backend.resolve_block_id(&BlockId::Number(0)).unwrap().unwrap(), DbBlockId::Number(0)); @@ -52,7 +52,7 @@ mod block_tests { let block = finalized_block_zero(Header::default()); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); assert_eq!(backend.get_block_hash(&BLOCK_ID_0).unwrap().unwrap(), block.info.block_hash().unwrap()); assert_eq!(BLOCK_ID_0.resolve_db_block_id(backend).unwrap().unwrap(), BLOCK_ID_0); @@ -75,7 +75,7 @@ mod block_tests { let block = pending_block_one(); let state_diff = pending_state_diff_one(); - backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); assert!(backend.get_block_hash(&BLOCK_ID_PENDING).unwrap().is_none()); assert_eq!(backend.get_block_info(&BLOCK_ID_PENDING).unwrap().unwrap(), block.info); @@ -91,10 +91,8 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend - .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) - .unwrap(); - backend.store_block(pending_block_one(), pending_state_diff_one(), vec![], None, None).unwrap(); + backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); + backend.store_block(pending_block_one(), pending_state_diff_one(), vec![]).unwrap(); backend.clear_pending_block().unwrap(); assert!(backend.get_block(&BLOCK_ID_PENDING).unwrap().unwrap().inner.transactions.is_empty()); @@ -104,11 +102,11 @@ mod block_tests { "fake pending block parent hash must match with latest block in db" ); - backend.store_block(finalized_block_one(), finalized_state_diff_one(), vec![], None, None).unwrap(); + backend.store_block(finalized_block_one(), finalized_state_diff_one(), vec![]).unwrap(); let block_pending = pending_block_two(); let state_diff = pending_state_diff_two(); - backend.store_block(block_pending.clone(), state_diff.clone(), vec![], None, None).unwrap(); + backend.store_block(block_pending.clone(), state_diff.clone(), vec![]).unwrap(); assert!(backend.get_block_hash(&BLOCK_ID_PENDING).unwrap().is_none()); assert_eq!(backend.get_block_info(&BLOCK_ID_PENDING).unwrap().unwrap(), block_pending.info); @@ -122,12 +120,10 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend - .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) - .unwrap(); + backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); let latest_block = finalized_block_one(); - backend.store_block(latest_block.clone(), finalized_state_diff_one(), vec![], None, None).unwrap(); + backend.store_block(latest_block.clone(), finalized_state_diff_one(), vec![]).unwrap(); assert_eq!(backend.get_latest_block_n().unwrap().unwrap(), 1); } @@ -152,7 +148,7 @@ mod block_tests { let block = finalized_block_zero(Header::default()); let state_diff = finalized_state_diff_zero(); - backend.store_block(block.clone(), state_diff.clone(), vec![], None, None).unwrap(); + backend.store_block(block.clone(), state_diff.clone(), vec![]).unwrap(); let tx_hash_1 = block.info.tx_hashes()[1]; assert_eq!(backend.find_tx_hash_block_info(&tx_hash_1).unwrap().unwrap(), (block.info.clone(), TxIndex(1))); @@ -164,12 +160,10 @@ mod block_tests { let db = temp_db().await; let backend = db.backend(); - backend - .store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![], None, None) - .unwrap(); + backend.store_block(finalized_block_zero(Header::default()), finalized_state_diff_zero(), vec![]).unwrap(); let block_pending = pending_block_one(); - backend.store_block(block_pending.clone(), pending_state_diff_one(), vec![], None, None).unwrap(); + backend.store_block(block_pending.clone(), pending_state_diff_one(), vec![]).unwrap(); let tx_hash_1 = block_pending.info.tx_hashes()[1]; assert_eq!( diff --git a/crates/madara/client/db/src/update_global_trie/classes.rs b/crates/madara/client/db/src/update_global_trie/classes.rs new file mode 100644 index 000000000..46e096e72 --- /dev/null +++ b/crates/madara/client/db/src/update_global_trie/classes.rs @@ -0,0 +1,93 @@ +use crate::MadaraBackend; +use crate::{bonsai_identifier, MadaraStorageError}; +use bitvec::order::Msb0; +use bitvec::vec::BitVec; +use bitvec::view::AsBits; +use bonsai_trie::id::BasicId; +use mp_state_update::DeclaredClassItem; +use rayon::prelude::*; +use starknet_types_core::felt::Felt; +use starknet_types_core::hash::{Poseidon, StarkHash}; + +// "CONTRACT_CLASS_LEAF_V0" +const CONTRACT_CLASS_HASH_VERSION: Felt = Felt::from_hex_unchecked("0x434f4e54524143545f434c4153535f4c4541465f5630"); + +pub fn class_trie_root( + backend: &MadaraBackend, + declared_classes: &[DeclaredClassItem], + block_number: u64, +) -> Result { + let mut class_trie = backend.class_trie(); + + let updates: Vec<_> = declared_classes + .into_par_iter() + .map(|DeclaredClassItem { class_hash, compiled_class_hash }| { + let hash = Poseidon::hash(&CONTRACT_CLASS_HASH_VERSION, compiled_class_hash); + (*class_hash, hash) + }) + .collect(); + + tracing::trace!("class_trie inserting"); + for (key, value) in updates { + let bytes = key.to_bytes_be(); + let bv: BitVec = bytes.as_bits()[5..].to_owned(); + class_trie.insert(bonsai_identifier::CLASS, &bv, &value)?; + } + + tracing::trace!("class_trie committing"); + class_trie.commit(BasicId::new(block_number))?; + + let root_hash = class_trie.root_hash(bonsai_identifier::CLASS)?; + + tracing::trace!("class_trie committed"); + + Ok(root_hash) +} + +// #[cfg(test)] +// mod tests { +// use super::*; +// use crate::verify_apply::verify_apply_tests::setup_test_backend; +// use rstest::*; +// use std::sync::Arc; +// #[test] +// fn test_contract_class_hash_version() { +// assert_eq!(CONTRACT_CLASS_HASH_VERSION, Felt::from_bytes_be_slice(b"CONTRACT_CLASS_LEAF_V0")); +// } + +// #[rstest] +// fn test_class_trie_root(setup_test_backend: Arc) { +// let backend = setup_test_backend; +// // Create sample DeclaredClassItems with predefined class and compiled class hashes +// let declared_classes = vec![ +// DeclaredClassItem { +// class_hash: Felt::from_hex_unchecked( +// "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", +// ), +// compiled_class_hash: Felt::from_hex_unchecked( +// "0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321", +// ), +// }, +// DeclaredClassItem { +// class_hash: Felt::from_hex_unchecked( +// "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", +// ), +// compiled_class_hash: Felt::from_hex_unchecked( +// "0x1234567890abcdeffedcba09876543211234567890abcdeffedcba0987654321", +// ), +// }, +// ]; + +// // Set the block number for the test +// let block_number = 1; + +// // Call the class_trie_root function with the test data +// let result = class_trie_root(&backend, &declared_classes, block_number).unwrap(); + +// // Assert that the resulting root hash matches the expected value +// assert_eq!( +// result, +// Felt::from_hex_unchecked("0x9e521cb5e73189fe985db9dfd50b1dcdefc95ca4e1ebf23b0a4408a81bb610") +// ); +// } +// } diff --git a/crates/madara/client/db/src/update_global_trie/contracts.rs b/crates/madara/client/db/src/update_global_trie/contracts.rs new file mode 100644 index 000000000..3dec21bba --- /dev/null +++ b/crates/madara/client/db/src/update_global_trie/contracts.rs @@ -0,0 +1,212 @@ +use crate::MadaraBackend; +use crate::{bonsai_identifier, MadaraStorageError}; +use bitvec::order::Msb0; +use bitvec::vec::BitVec; +use bitvec::view::AsBits; +use bonsai_trie::id::BasicId; +use mp_block::BlockId; +use mp_state_update::{ContractStorageDiffItem, DeployedContractItem, NonceUpdate, ReplacedClassItem, StorageEntry}; +use rayon::prelude::*; +use starknet_types_core::felt::Felt; +use starknet_types_core::hash::{Pedersen, StarkHash}; +use std::collections::HashMap; + +#[derive(Debug, Default)] +struct ContractLeaf { + pub class_hash: Option, + pub storage_root: Option, + pub nonce: Option, +} + +/// Calculates the contract trie root +/// +/// # Arguments +/// +/// * `csd` - Commitment state diff for the current block. +/// * `block_number` - The current block number. +/// +/// # Returns +/// +/// The contract root. +pub fn contract_trie_root( + backend: &MadaraBackend, + deployed_contracts: &[DeployedContractItem], + replaced_classes: &[ReplacedClassItem], + nonces: &[NonceUpdate], + storage_diffs: &[ContractStorageDiffItem], + block_number: u64, +) -> Result { + let mut contract_leafs: HashMap = HashMap::new(); + + let mut contract_storage_trie = backend.contract_storage_trie(); + + tracing::trace!("contract_storage_trie inserting"); + + // First we insert the contract storage changes + for ContractStorageDiffItem { address, storage_entries } in storage_diffs { + for StorageEntry { key, value } in storage_entries { + let bytes = key.to_bytes_be(); + let bv: BitVec = bytes.as_bits()[5..].to_owned(); + contract_storage_trie.insert(&address.to_bytes_be(), &bv, value)?; + } + // insert the contract address in the contract_leafs to put the storage root later + contract_leafs.insert(*address, Default::default()); + } + + tracing::trace!("contract_storage_trie commit"); + + // Then we commit them + contract_storage_trie.commit(BasicId::new(block_number))?; + + for NonceUpdate { contract_address, nonce } in nonces { + contract_leafs.entry(*contract_address).or_default().nonce = Some(*nonce); + } + + for DeployedContractItem { address, class_hash } in deployed_contracts { + contract_leafs.entry(*address).or_default().class_hash = Some(*class_hash); + } + + for ReplacedClassItem { contract_address, class_hash } in replaced_classes { + contract_leafs.entry(*contract_address).or_default().class_hash = Some(*class_hash); + } + + let mut contract_trie = backend.contract_trie(); + + let leaf_hashes: Vec<_> = contract_leafs + .into_par_iter() + .map(|(contract_address, mut leaf)| { + let storage_root = contract_storage_trie.root_hash(&contract_address.to_bytes_be())?; + leaf.storage_root = Some(storage_root); + let leaf_hash = contract_state_leaf_hash(backend, &contract_address, &leaf, block_number)?; + let bytes = contract_address.to_bytes_be(); + let bv: BitVec = bytes.as_bits()[5..].to_owned(); + Ok((bv, leaf_hash)) + }) + .collect::>()?; + + for (k, v) in leaf_hashes { + contract_trie.insert(bonsai_identifier::CONTRACT, &k, &v)?; + } + + tracing::trace!("contract_trie committing"); + + contract_trie.commit(BasicId::new(block_number))?; + let root_hash = contract_trie.root_hash(bonsai_identifier::CONTRACT)?; + + tracing::trace!("contract_trie committed"); + + Ok(root_hash) +} + +/// Computes the contract state leaf hash +/// +/// # Arguments +/// +/// * `csd` - Commitment state diff for the current block. +/// * `contract_address` - The contract address. +/// * `storage_root` - The storage root of the contract. +/// +/// # Returns +/// +/// The contract state leaf hash. +fn contract_state_leaf_hash( + backend: &MadaraBackend, + contract_address: &Felt, + contract_leaf: &ContractLeaf, + block_number: u64, +) -> Result { + let nonce = contract_leaf.nonce.unwrap_or( + backend.get_contract_nonce_at(&BlockId::Number(block_number), contract_address)?.unwrap_or(Felt::ZERO), + ); + + let class_hash = contract_leaf.class_hash.unwrap_or( + backend.get_contract_class_hash_at(&BlockId::Number(block_number), contract_address)?.unwrap_or(Felt::ZERO), // .ok_or(MadaraStorageError::InconsistentStorage("Class hash not found".into()))? + ); + + let storage_root = contract_leaf + .storage_root + .ok_or(MadaraStorageError::InconsistentStorage("Storage root need to be set".into()))?; + + tracing::trace!("contract is {contract_address:#x} block_n={block_number} nonce={nonce:#x} class_hash={class_hash:#x} storage_root={storage_root:#x}"); + + // computes the contract state leaf hash + Ok(Pedersen::hash(&Pedersen::hash(&Pedersen::hash(&class_hash, &storage_root), &nonce), &Felt::ZERO)) +} + +// #[cfg(test)] +// mod contract_trie_root_tests { +// use super::*; +// use crate::verify_apply::verify_apply_tests::setup_test_backend; +// use rstest::*; +// use std::sync::Arc; + +// #[rstest] +// fn test_contract_trie_root_success(setup_test_backend: Arc) { +// let backend = setup_test_backend; + +// // Create dummy data +// let deployed_contracts = vec![DeployedContractItem { +// address: Felt::from_hex_unchecked("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), +// class_hash: Felt::from_hex_unchecked("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), +// }]; + +// let replaced_classes = vec![ReplacedClassItem { +// contract_address: Felt::from_hex_unchecked( +// "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", +// ), +// class_hash: Felt::from_hex_unchecked("0x1234567890abcdeffedcba09876543211234567890abcdeffedcba0987654321"), +// }]; + +// let nonces = vec![NonceUpdate { +// contract_address: Felt::from_hex_unchecked( +// "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", +// ), +// nonce: Felt::from_hex_unchecked("0x0000000000000000000000000000000000000000000000000000000000000001"), +// }]; + +// let storage_diffs = vec![ContractStorageDiffItem { +// address: Felt::from_hex_unchecked("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), +// storage_entries: vec![StorageEntry { +// key: Felt::from_hex_unchecked("0x0000000000000000000000000000000000000000000000000000000000000001"), +// value: Felt::from_hex_unchecked("0x0000000000000000000000000000000000000000000000000000000000000002"), +// }], +// }]; + +// let block_number = 1; + +// // Call the function and print the result +// let result = +// contract_trie_root(&backend, &deployed_contracts, &replaced_classes, &nonces, &storage_diffs, block_number) +// .unwrap(); + +// assert_eq!( +// result, +// Felt::from_hex_unchecked("0x59b89ceac43986727fb4a57bd9f74690b5b3b0e976e7af0b10213c3d4392ef2") +// ); +// } + +// #[rstest] +// fn test_contract_state_leaf_hash_success(setup_test_backend: Arc) { +// let backend = setup_test_backend; + +// // Create dummy data +// let contract_address = +// Felt::from_hex_unchecked("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"); +// let contract_leaf = ContractLeaf { +// class_hash: Some(Felt::from_hex_unchecked( +// "0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321", +// )), +// storage_root: Some(Felt::from_hex_unchecked( +// "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890", +// )), +// nonce: Some(Felt::from_hex_unchecked("0x0000000000000000000000000000000000000000000000000000000000000001")), +// }; + +// // Call the function and print the result +// let result = contract_state_leaf_hash(&backend, &contract_address, &contract_leaf).unwrap(); +// assert_eq!( +// result, +// Felt::from_hex_unchecked("0x6bbd8d4b5692148f83c38e19091f64381b5239e2a73f53b59be3ec3efb41143") +// ); +// } +// } diff --git a/crates/madara/client/db/src/update_global_trie/mod.rs b/crates/madara/client/db/src/update_global_trie/mod.rs new file mode 100644 index 000000000..cbce4aedd --- /dev/null +++ b/crates/madara/client/db/src/update_global_trie/mod.rs @@ -0,0 +1,19 @@ +use starknet_types_core::{ + felt::Felt, + hash::{Poseidon, StarkHash}, +}; + +pub mod classes; +pub mod contracts; + +/// "STARKNET_STATE_V0" +const STARKNET_STATE_PREFIX: Felt = Felt::from_hex_unchecked("0x535441524b4e45545f53544154455f5630"); + +pub fn calculate_state_root(contracts_trie_root: Felt, classes_trie_root: Felt) -> Felt { + tracing::trace!("global state root calc {contracts_trie_root:#x} {classes_trie_root:#x}"); + if classes_trie_root == Felt::ZERO { + contracts_trie_root + } else { + Poseidon::hash_array(&[STARKNET_STATE_PREFIX, contracts_trie_root, classes_trie_root]) + } +} diff --git a/crates/madara/client/devnet/Cargo.toml b/crates/madara/client/devnet/Cargo.toml index 038dfcd50..3a7718129 100644 --- a/crates/madara/client/devnet/Cargo.toml +++ b/crates/madara/client/devnet/Cargo.toml @@ -33,7 +33,6 @@ m-cairo-test-contracts.workspace = true [dependencies] # Madara -mc-block-import.workspace = true mc-db.workspace = true mp-block.workspace = true mp-chain-config.workspace = true diff --git a/crates/madara/client/devnet/src/classes.rs b/crates/madara/client/devnet/src/classes.rs index e1b27b415..09d243ebe 100644 --- a/crates/madara/client/devnet/src/classes.rs +++ b/crates/madara/client/devnet/src/classes.rs @@ -1,6 +1,7 @@ use anyhow::Context; -use mc_block_import::{DeclaredClass, LegacyDeclaredClass, SierraDeclaredClass}; -use mp_class::{CompressedLegacyContractClass, FlattenedSierraClass}; +use mp_class::{ + ClassInfo, ClassInfoWithHash, CompressedLegacyContractClass, FlattenedSierraClass, LegacyClassInfo, SierraClassInfo, +}; use mp_state_update::DeclaredClassItem; use starknet_core::types::contract::{legacy::LegacyContractClass, SierraClass}; use starknet_types_core::felt::Felt; @@ -92,20 +93,21 @@ impl InitiallyDeclaredClasses { .collect() } - /// Load the classes into `DeclaredClass`es. - pub fn into_loaded_classes(self) -> Vec { + pub fn into_class_infos(self) -> Vec { self.0 .into_values() .map(|class| match class { - InitiallyDeclaredClass::Sierra(c) => DeclaredClass::Sierra(SierraDeclaredClass { + InitiallyDeclaredClass::Sierra(c) => ClassInfoWithHash { class_hash: c.class_hash, - contract_class: c.contract_class, - compiled_class_hash: c.compiled_class_hash, - }), - InitiallyDeclaredClass::Legacy(c) => DeclaredClass::Legacy(LegacyDeclaredClass { + class_info: ClassInfo::Sierra(SierraClassInfo { + contract_class: c.contract_class.into(), + compiled_class_hash: c.compiled_class_hash, + }), + }, + InitiallyDeclaredClass::Legacy(c) => ClassInfoWithHash { class_hash: c.class_hash, - contract_class: c.contract_class, - }), + class_info: ClassInfo::Legacy(LegacyClassInfo { contract_class: c.contract_class.into() }), + }, }) .collect() } diff --git a/crates/madara/client/devnet/src/lib.rs b/crates/madara/client/devnet/src/lib.rs index 867125e85..b8567d5c5 100644 --- a/crates/madara/client/devnet/src/lib.rs +++ b/crates/madara/client/devnet/src/lib.rs @@ -1,8 +1,13 @@ use anyhow::Context; use blockifier::abi::abi_utils::get_storage_var_address; -use mc_block_import::{UnverifiedFullBlock, UnverifiedHeader}; -use mp_block::header::{BlockTimestamp, GasPrices}; +use mc_db::MadaraBackend; +use mp_block::{ + commitments::CommitmentComputationContext, + header::{GasPrices, PendingHeader}, + PendingFullBlock, +}; use mp_chain_config::ChainConfig; +use mp_class::ClassInfoWithHash; use mp_convert::ToFelt; use mp_state_update::{ContractStorageDiffItem, StateDiff, StorageEntry}; use starknet_api::{core::ContractAddress, state::StorageKey}; @@ -11,7 +16,7 @@ use starknet_types_core::{ felt::Felt, hash::{Poseidon, StarkHash}, }; -use std::collections::HashMap; +use std::{collections::HashMap, iter, time::SystemTime}; mod balances; mod classes; @@ -147,35 +152,67 @@ impl ChainGenesisDescription { } #[tracing::instrument(skip(self, chain_config), fields(module = "ChainGenesisDescription"))] - pub fn build(mut self, chain_config: &ChainConfig) -> anyhow::Result { + pub fn into_block( + mut self, + chain_config: &ChainConfig, + ) -> anyhow::Result<(PendingFullBlock, Vec)> { self.initial_balances.to_storage_diffs(chain_config, &mut self.initial_storage); - Ok(UnverifiedFullBlock { - header: UnverifiedHeader { - parent_block_hash: Some(Felt::ZERO), - sequencer_address: chain_config.sequencer_address.to_felt(), - block_timestamp: BlockTimestamp::now(), - protocol_version: chain_config.latest_protocol_version, - l1_gas_price: GasPrices { - eth_l1_gas_price: 5, - strk_l1_gas_price: 5, - eth_l1_data_gas_price: 5, - strk_l1_data_gas_price: 5, + Ok(( + PendingFullBlock { + header: PendingHeader { + parent_block_hash: Felt::ZERO, + sequencer_address: chain_config.sequencer_address.to_felt(), + block_timestamp: mp_block::header::BlockTimestamp( + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Current time is before unix epoch!") + .as_secs(), + ), + protocol_version: chain_config.latest_protocol_version, + l1_gas_price: GasPrices { + eth_l1_gas_price: 5, + strk_l1_gas_price: 5, + eth_l1_data_gas_price: 5, + strk_l1_data_gas_price: 5, + }, + l1_da_mode: mp_block::header::L1DataAvailabilityMode::Blob, + }, + state_diff: StateDiff { + storage_diffs: self.initial_storage.as_state_diff(), + deprecated_declared_classes: self.declared_classes.as_legacy_state_diff(), + declared_classes: self.declared_classes.as_state_diff(), + deployed_contracts: self.deployed_contracts.as_state_diff(), + replaced_classes: vec![], + nonces: vec![], }, - l1_da_mode: mp_block::header::L1DataAvailabilityMode::Blob, + transactions: vec![], + events: vec![], }, - state_diff: StateDiff { - storage_diffs: self.initial_storage.as_state_diff(), - deprecated_declared_classes: self.declared_classes.as_legacy_state_diff(), - declared_classes: self.declared_classes.as_state_diff(), - deployed_contracts: self.deployed_contracts.as_state_diff(), - replaced_classes: vec![], - nonces: vec![], + self.declared_classes.into_class_infos(), + )) + } + + pub fn build_and_store(self, backend: &MadaraBackend) -> anyhow::Result<()> { + let (block, classes) = self.into_block(backend.chain_config()).unwrap(); + + let block_number = 0; + let new_global_state_root = backend.apply_state(block_number, iter::once(&block.state_diff))?; + + let block = block.close_block( + &CommitmentComputationContext { + protocol_version: backend.chain_config().latest_protocol_version, + chain_id: backend.chain_config().chain_id.to_felt(), }, - declared_classes: self.declared_classes.into_loaded_classes(), - unverified_block_number: Some(0), - ..Default::default() - }) + block_number, + new_global_state_root, + true, + ); + let classes: Vec<_> = classes.into_iter().map(|class| class.convert()).collect::>()?; + + backend.store_full_block(block)?; + backend.class_db_store_block(block_number, &classes)?; + Ok(()) } } @@ -183,7 +220,6 @@ impl ChainGenesisDescription { mod tests { use super::*; use assert_matches::assert_matches; - use mc_block_import::{BlockImporter, BlockValidationContext}; use mc_block_production::metrics::BlockProductionMetrics; use mc_block_production::BlockProductionTask; use mc_db::MadaraBackend; @@ -193,7 +229,6 @@ mod tests { use mp_block::header::L1DataAvailabilityMode; use mp_block::{BlockId, BlockTag}; use mp_class::{ClassInfo, FlattenedSierraClass}; - use mp_receipt::{Event, ExecutionResult, FeePayment, InvokeTransactionReceipt, PriceUnit, TransactionReceipt}; use mp_transactions::compute_hash::calculate_contract_address; use mp_transactions::BroadcastedTransactionExt; @@ -305,23 +340,8 @@ mod tests { let mut g = ChainGenesisDescription::base_config().unwrap(); let contracts = g.add_devnet_contracts(10).unwrap(); - let chain_config = Arc::new(ChainConfig::madara_devnet()); - let block = g.build(&chain_config).unwrap(); - let backend = MadaraBackend::open_for_testing(Arc::clone(&chain_config)); - let importer = Arc::new(BlockImporter::new(Arc::clone(&backend), None).unwrap()); - - tracing::debug!("{:?}", block.state_diff); - let runtime = tokio::runtime::Runtime::new().unwrap(); - - runtime - .block_on( - importer.add_block( - block, - BlockValidationContext::new(chain_config.chain_id.clone()).trust_class_hashes(true), - ), - ) - .unwrap(); - + let backend = MadaraBackend::open_for_testing(Arc::new(ChainConfig::madara_devnet())); + g.build_and_store(&backend).unwrap(); tracing::debug!("block imported {:?}", backend.get_block_info(&BlockId::Tag(BlockTag::Latest))); let mut l1_data_provider = MockL1DataProvider::new(); @@ -336,15 +356,13 @@ mod tests { let mempool = Arc::new(Mempool::new(Arc::clone(&backend), Arc::clone(&l1_data_provider), mempool_limits)); let metrics = BlockProductionMetrics::register(); - let block_production = runtime - .block_on(BlockProductionTask::new( - Arc::clone(&backend), - Arc::clone(&importer), - Arc::clone(&mempool), - Arc::new(metrics), - Arc::clone(&l1_data_provider), - )) - .unwrap(); + let block_production = BlockProductionTask::new( + Arc::clone(&backend), + Arc::clone(&mempool), + Arc::new(metrics), + Arc::clone(&l1_data_provider), + ) + .unwrap(); DevnetForTesting { backend, contracts, block_production, mempool } } @@ -386,11 +404,8 @@ mod tests { assert_eq!(res.class_hash, calculated_class_hash); - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - chain.block_production.set_current_pending_tick(1); - chain.block_production.on_pending_time_tick().await.unwrap(); - }); + chain.block_production.set_current_pending_tick(1); + chain.block_production.on_pending_time_tick().unwrap(); let block = chain.backend.get_block(&BlockId::Tag(BlockTag::Pending)).unwrap().unwrap(); @@ -458,11 +473,8 @@ mod tests { .unwrap(); tracing::debug!("tx hash: {:#x}", transfer_txn.transaction_hash); - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - chain.block_production.set_current_pending_tick(chain.backend.chain_config().n_pending_ticks_per_block()); - chain.block_production.on_pending_time_tick().await.unwrap(); - }); + chain.block_production.set_current_pending_tick(chain.backend.chain_config().n_pending_ticks_per_block()); + chain.block_production.on_pending_time_tick().unwrap(); // ===================================================================================== @@ -493,11 +505,8 @@ mod tests { let res = chain.sign_and_add_deploy_account_tx(deploy_account_txn, &account).unwrap(); - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - chain.block_production.set_current_pending_tick(chain.backend.chain_config().n_pending_ticks_per_block()); - chain.block_production.on_pending_time_tick().await.unwrap(); - }); + chain.block_production.set_current_pending_tick(chain.backend.chain_config().n_pending_ticks_per_block()); + chain.block_production.on_pending_time_tick().unwrap(); assert_eq!(res.contract_address, account.address); @@ -557,11 +566,8 @@ mod tests { tracing::info!("tx hash: {:#x}", result.transaction_hash); - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - chain.block_production.set_current_pending_tick(1); - chain.block_production.on_pending_time_tick().await.unwrap(); - }); + chain.block_production.set_current_pending_tick(1); + chain.block_production.on_pending_time_tick().unwrap(); let block = chain.backend.get_block(&BlockId::Tag(BlockTag::Pending)).unwrap().unwrap(); @@ -766,11 +772,8 @@ mod tests { .unwrap(); std::thread::sleep(max_age); // max age reached - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - chain.block_production.set_current_pending_tick(1); - chain.block_production.on_pending_time_tick().await.unwrap(); - }); + chain.block_production.set_current_pending_tick(1); + chain.block_production.on_pending_time_tick().unwrap(); let block = chain.backend.get_block(&BlockId::Tag(BlockTag::Pending)).unwrap().unwrap(); diff --git a/crates/madara/client/eth/src/state_update.rs b/crates/madara/client/eth/src/state_update.rs index 2164f9bf4..60f30283a 100644 --- a/crates/madara/client/eth/src/state_update.rs +++ b/crates/madara/client/eth/src/state_update.rs @@ -20,8 +20,11 @@ pub struct L1StateUpdate { pub block_hash: Felt, } +pub type L1HeadReceiver = tokio::sync::watch::Receiver>; +pub type L1HeadSender = tokio::sync::watch::Sender>; + /// Get the last Starknet state update verified on the L1 -pub async fn get_initial_state(client: &EthereumClient) -> anyhow::Result { +async fn get_initial_state(client: &EthereumClient) -> anyhow::Result { let block_number = client.get_last_verified_block_number().await?; let block_hash = client.get_last_verified_block_hash().await?; let global_root = client.get_last_state_root().await?; @@ -29,9 +32,40 @@ pub async fn get_initial_state(client: &EthereumClient) -> anyhow::Result, + backend: Arc, + l1_head_sender: L1HeadSender, +) -> anyhow::Result<()> { + let event_filter = eth_client.l1_core_contract.event_filter::(); + // Listen to LogStateUpdate (0x77552641) update and send changes continuously + let mut event_stream = event_filter.watch().await.context(ERR_ARCHIVE)?.into_stream(); + + // // This does not seem to play well with anvil + // #[cfg(not(test))] + // { + let state_update = get_initial_state(ð_client).await.context("Getting initial ethereum state")?; + update_l1(&backend, &state_update, ð_client.l1_block_metrics)?; + l1_head_sender.send_modify(|s| *s = Some(state_update.clone())); + // } + + tracing::info!("🚀 Subscribed to L1 state verification"); + + while let Some(event_result) = event_stream.next().await { + let log = event_result.context("listening for events")?; + let state_update = convert_log_state_update(log.0.clone()).context("formatting event into an L1StateUpdate")?; + update_l1(&backend, &state_update, ð_client.l1_block_metrics)?; + l1_head_sender.send_modify(|s| *s = Some(state_update.clone())); + } + + Ok(()) +} + pub fn update_l1( backend: &MadaraBackend, - state_update: L1StateUpdate, + state_update: &L1StateUpdate, block_metrics: &L1BlockMetrics, ) -> anyhow::Result<()> { tracing::info!( @@ -53,35 +87,13 @@ pub async fn state_update_worker( backend: Arc, eth_client: Arc, mut ctx: ServiceContext, + l1_head_sender: L1HeadSender, ) -> anyhow::Result<()> { // Clear L1 confirmed block at startup backend.clear_last_confirmed_block().context("Clearing l1 last confirmed block number")?; tracing::debug!("update_l1: cleared confirmed block number"); - tracing::info!("🚀 Subscribed to L1 state verification"); - // This does not seem to play well with anvil - #[cfg(not(test))] - { - let initial_state = get_initial_state(ð_client).await.context("Getting initial ethereum state")?; - update_l1(&backend, initial_state, ð_client.l1_block_metrics)?; - } - - // Listen to LogStateUpdate (0x77552641) update and send changes continuously - let event_filter = eth_client.l1_core_contract.event_filter::(); - - let mut event_stream = match ctx.run_until_cancelled(event_filter.watch()).await { - Some(res) => res.context(ERR_ARCHIVE)?.into_stream(), - None => return anyhow::Ok(()), - }; - - while let Some(Some(event_result)) = ctx.run_until_cancelled(event_stream.next()).await { - let log = event_result.context("listening for events")?; - let format_event: L1StateUpdate = - convert_log_state_update(log.0.clone()).context("formatting event into an L1StateUpdate")?; - update_l1(&backend, format_event, ð_client.l1_block_metrics)?; - } - - anyhow::Ok(()) + ctx.run_until_cancelled(listen_and_update_state(eth_client, backend, l1_head_sender)).await.unwrap_or(Ok(())) } #[cfg(test)] @@ -163,14 +175,12 @@ mod eth_client_event_subscription_test { let eth_client = EthereumClient { provider: Arc::new(provider), l1_core_contract: core_contract.clone(), l1_block_metrics }; + let (snd, _recv) = tokio::sync::watch::channel(None); + // Start listening for state updates let listen_handle = { let db = Arc::clone(&db); - tokio::spawn(async move { - state_update_worker(Arc::clone(db.backend()), Arc::new(eth_client), ServiceContext::new_for_testing()) - .await - .unwrap() - }) + tokio::spawn(listen_and_update_state(Arc::new(eth_client), db.backend().clone(), snd)) }; let _ = contract.fireEvent().send().await.expect("Failed to fire event"); diff --git a/crates/madara/client/eth/src/sync.rs b/crates/madara/client/eth/src/sync.rs index 2a607fcde..228fd34df 100644 --- a/crates/madara/client/eth/src/sync.rs +++ b/crates/madara/client/eth/src/sync.rs @@ -1,15 +1,14 @@ use crate::client::EthereumClient; use crate::l1_gas_price::gas_price_worker; use crate::l1_messaging::sync; -use crate::state_update::state_update_worker; +use crate::state_update::{state_update_worker, L1HeadSender}; +use mc_db::MadaraBackend; use mc_mempool::{GasPriceProvider, Mempool}; use mp_utils::service::ServiceContext; use starknet_api::core::ChainId; use std::sync::Arc; use std::time::Duration; -use mc_db::MadaraBackend; - #[allow(clippy::too_many_arguments)] pub async fn l1_sync_worker( backend: Arc, @@ -19,11 +18,12 @@ pub async fn l1_sync_worker( gas_price_sync_disabled: bool, gas_price_poll_ms: Duration, mempool: Arc, + l1_head_sender: L1HeadSender, ctx: ServiceContext, ) -> anyhow::Result<()> { let mut join_set = tokio::task::JoinSet::new(); - join_set.spawn(state_update_worker(Arc::clone(&backend), Arc::clone(ð_client), ctx.clone())); + join_set.spawn(state_update_worker(Arc::clone(&backend), Arc::clone(ð_client), ctx.clone(), l1_head_sender)); join_set.spawn(sync(Arc::clone(&backend), Arc::clone(ð_client), chain_id, mempool, ctx.clone())); if !gas_price_sync_disabled { diff --git a/crates/madara/client/gateway/client/src/methods.rs b/crates/madara/client/gateway/client/src/methods.rs index 632475ee8..c52293ef1 100644 --- a/crates/madara/client/gateway/client/src/methods.rs +++ b/crates/madara/client/gateway/client/src/methods.rs @@ -4,7 +4,9 @@ use mp_block::{BlockId, BlockTag}; use mp_class::{ContractClass, FlattenedSierraClass}; use mp_gateway::error::{SequencerError, StarknetError}; use mp_gateway::{ - block::{ProviderBlock, ProviderBlockPending, ProviderBlockPendingMaybe, ProviderBlockSignature}, + block::{ + ProviderBlock, ProviderBlockHeader, ProviderBlockPending, ProviderBlockPendingMaybe, ProviderBlockSignature, + }, state_update::{ ProviderStateUpdate, ProviderStateUpdatePending, ProviderStateUpdatePendingMaybe, ProviderStateUpdateWithBlock, ProviderStateUpdateWithBlockPending, ProviderStateUpdateWithBlockPendingMaybe, @@ -36,6 +38,16 @@ impl GatewayProvider { } } + pub async fn get_header(&self, block_id: BlockId) -> Result { + let request = RequestBuilder::new(&self.client, self.feeder_gateway_url.clone(), self.headers.clone()) + .add_uri_segment("get_block") + .expect("Failed to add URI segment. This should not fail in prod.") + .with_block_id(&block_id) + .add_param("headerOnly", "true"); + + request.send_get::().await + } + pub async fn get_state_update(&self, block_id: BlockId) -> Result { let request = RequestBuilder::new(&self.client, self.feeder_gateway_url.clone(), self.headers.clone()) .add_uri_segment("get_state_update") diff --git a/crates/madara/client/gateway/client/src/request_builder.rs b/crates/madara/client/gateway/client/src/request_builder.rs index ca7d56e26..91b817bfb 100644 --- a/crates/madara/client/gateway/client/src/request_builder.rs +++ b/crates/madara/client/gateway/client/src/request_builder.rs @@ -1,5 +1,4 @@ -use std::{borrow::Cow, collections::HashMap}; - +use super::builder::PausedClient; use bytes::Buf; use http::Method; use http_body_util::BodyExt; @@ -11,11 +10,10 @@ use mp_gateway::error::{SequencerError, StarknetError}; use serde::de::DeserializeOwned; use serde::Serialize; use starknet_types_core::felt::Felt; +use std::{borrow::Cow, collections::HashMap}; use tower::Service; use url::Url; -use super::builder::PausedClient; - #[derive(Debug)] pub struct RequestBuilder<'a> { client: &'a PausedClient, @@ -40,18 +38,18 @@ impl<'a> RequestBuilder<'a> { self } - pub fn add_param(mut self, name: Cow<'static, str>, value: &str) -> Self { - self.params.insert(name, value.to_string()); + pub fn add_param(mut self, name: impl Into>, value: impl Into>) -> Self { + self.params.insert(name.into(), value.into().to_string()); self } pub fn with_block_id(mut self, block_id: &BlockId) -> Self { match block_id { BlockId::Hash(hash) => { - self = self.add_param(Cow::from("blockHash"), &format!("0x{hash:x}")); + self = self.add_param(Cow::from("blockHash"), format!("0x{hash:x}")); } BlockId::Number(number) => { - self = self.add_param(Cow::from("blockNumber"), &number.to_string()); + self = self.add_param(Cow::from("blockNumber"), number.to_string()); } BlockId::Tag(tag) => { let tag = match tag { @@ -65,7 +63,7 @@ impl<'a> RequestBuilder<'a> { } pub fn with_class_hash(mut self, class_hash: Felt) -> Self { - self = self.add_param(Cow::from("classHash"), &format!("0x{class_hash:x}")); + self = self.add_param(Cow::from("classHash"), format!("0x{class_hash:x}")); self } diff --git a/crates/madara/client/mempool/Cargo.toml b/crates/madara/client/mempool/Cargo.toml index 6e937af8c..e3c4eaba5 100644 --- a/crates/madara/client/mempool/Cargo.toml +++ b/crates/madara/client/mempool/Cargo.toml @@ -39,7 +39,6 @@ testing = ["blockifier/testing", "mc-db/testing", "mockall"] # Madara mc-analytics.workspace = true -mc-block-import.workspace = true mc-db.workspace = true mc-exec.workspace = true mp-block.workspace = true diff --git a/crates/madara/client/mempool/src/lib.rs b/crates/madara/client/mempool/src/lib.rs index 2178cb5b0..3e54d50f0 100644 --- a/crates/madara/client/mempool/src/lib.rs +++ b/crates/madara/client/mempool/src/lib.rs @@ -1646,8 +1646,6 @@ mod test { ..Default::default() }, vec![], - None, - None, ) .expect("Failed to store block"); @@ -1687,8 +1685,6 @@ mod test { ..Default::default() }, vec![], - None, - None, ) .expect("Failed to store block"); diff --git a/crates/madara/client/p2p/Cargo.toml b/crates/madara/client/p2p/Cargo.toml new file mode 100644 index 000000000..1ef03c943 --- /dev/null +++ b/crates/madara/client/p2p/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "mc-p2p" +authors.workspace = true +homepage.workspace = true +edition.workspace = true +repository.workspace = true +version.workspace = true +license.workspace = true + +[dependencies] +anyhow.workspace = true +async-trait.workspace = true +base64.workspace = true +bytes.workspace = true +futures-bounded.workspace = true +futures.workspace = true +libp2p.workspace = true +prost.workspace = true +serde.workspace = true +serde_json.workspace = true +thiserror.workspace = true +tokio-stream.workspace = true +tokio-util.workspace = true +tokio.workspace = true +tracing.workspace = true +unsigned-varint.workspace = true + +mc-db.workspace = true +mc-rpc.workspace = true +mp-block.workspace = true +mp-chain-config.workspace = true +mp-class.workspace = true +mp-convert.workspace = true +mp-receipt.workspace = true +mp-state-update.workspace = true +mp-transactions.workspace = true +mp-utils.workspace = true +mp-proto.workspace = true +p2p_stream.workspace = true + +starknet-core.workspace = true +starknet-types-core.workspace = true + +[lints] +workspace = true + +[build-dependencies] +prost-build.workspace = true diff --git a/crates/madara/client/p2p/src/behaviour.rs b/crates/madara/client/p2p/src/behaviour.rs new file mode 100644 index 000000000..e4ae0354a --- /dev/null +++ b/crates/madara/client/p2p/src/behaviour.rs @@ -0,0 +1,91 @@ +use libp2p::{ + autonat, dcutr, + gossipsub::{self, MessageAuthenticity}, + identify, + identity::Keypair, + kad::{self, store::MemoryStore}, + ping, + relay::{self}, + swarm::NetworkBehaviour, + StreamProtocol, +}; +use mp_chain_config::ChainConfig; +use std::time::Duration; + +use crate::sync_codec::codecs; + +pub type Event = ::ToSwarm; + +#[derive(NetworkBehaviour)] +pub struct MadaraP2pBehaviour { + /// Ping protocol. + pub ping: ping::Behaviour, + /// Kademlia is used for node discovery only. + pub kad: kad::Behaviour, + /// Identify as starknet node. + pub identify: identify::Behaviour, + + /// Automatically make NAT configuration. + pub autonat: autonat::Behaviour, + /// DCUTR: Direct Connection Upgrade using Relay: this allows nodes behind a NAT to receive incoming connections through a relay node. + pub dcutr: dcutr::Behaviour, + /// If we're behind a NAT, we want to have a relay client to advertise a public address. It'll then be upgraded using DCUTR to a direct connection. + pub relay: relay::client::Behaviour, + + /// Pubsub. + pub gossipsub: gossipsub::Behaviour, + + // Single Req - Multiple Responses Streams + pub headers_sync: p2p_stream::Behaviour, + pub classes_sync: p2p_stream::Behaviour, + pub state_diffs_sync: p2p_stream::Behaviour, + pub transactions_sync: p2p_stream::Behaviour, + pub events_sync: p2p_stream::Behaviour, +} + +impl MadaraP2pBehaviour { + // The return error type can't be anyhow::Error unfortunately because the SwarmBuilder won't let us + pub fn new( + chain_config: &ChainConfig, + identity: &Keypair, + relay_behaviour: libp2p::relay::client::Behaviour, + ) -> Result> { + let pubkey = identity.public(); + let local_peer_id = pubkey.to_peer_id(); + + let p2p_stream_config = p2p_stream::Config::default(); + Ok(Self { + identify: identify::Behaviour::new( + identify::Config::new(identify::PROTOCOL_NAME.to_string(), pubkey) + .with_agent_version(format!("madara/{}", env!("CARGO_PKG_VERSION"))), + ), + ping: Default::default(), + kad: { + let protocol = StreamProtocol::try_from_owned(format!("/starknet/kad/{}/1.0.0", chain_config.chain_id)) + .expect("Invalid kad stream protocol"); + let mut cfg = kad::Config::new(protocol); + const PROVIDER_PUBLICATION_INTERVAL: Duration = Duration::from_secs(600); + cfg.set_record_ttl(Some(Duration::from_secs(0))); + cfg.set_provider_record_ttl(Some(PROVIDER_PUBLICATION_INTERVAL * 3)); + cfg.set_provider_publication_interval(Some(PROVIDER_PUBLICATION_INTERVAL)); + cfg.set_periodic_bootstrap_interval(Some(Duration::from_millis(500))); + cfg.set_query_timeout(Duration::from_secs(5 * 60)); + kad::Behaviour::with_config(local_peer_id, MemoryStore::new(local_peer_id), cfg) + }, + autonat: autonat::Behaviour::new(local_peer_id, autonat::Config::default()), + dcutr: dcutr::Behaviour::new(local_peer_id), + relay: relay_behaviour, + gossipsub: { + let privacy = MessageAuthenticity::Signed(identity.clone()); + gossipsub::Behaviour::new(privacy, gossipsub::Config::default()) + .map_err(|err| anyhow::anyhow!("Error making gossipsub config: {err}"))? + }, + + headers_sync: p2p_stream::Behaviour::with_codec(codecs::headers(), p2p_stream_config), + classes_sync: p2p_stream::Behaviour::with_codec(codecs::classes(), p2p_stream_config), + state_diffs_sync: p2p_stream::Behaviour::with_codec(codecs::state_diffs(), p2p_stream_config), + transactions_sync: p2p_stream::Behaviour::with_codec(codecs::transactions(), p2p_stream_config), + events_sync: p2p_stream::Behaviour::with_codec(codecs::events(), p2p_stream_config), + }) + } +} diff --git a/crates/madara/client/p2p/src/commands.rs b/crates/madara/client/p2p/src/commands.rs new file mode 100644 index 000000000..47501c127 --- /dev/null +++ b/crates/madara/client/p2p/src/commands.rs @@ -0,0 +1,198 @@ +use std::collections::{HashMap, HashSet}; + +use crate::{ + handlers_impl::{self}, + sync_handlers, MadaraP2p, +}; +use futures::{channel::mpsc, stream, SinkExt, Stream, StreamExt}; +use libp2p::PeerId; +use mc_db::stream::BlockStreamConfig; +use mp_block::{BlockHeaderWithSignatures, TransactionWithReceipt}; +use mp_class::ClassInfoWithHash; +use mp_proto::model; +use mp_receipt::EventWithTransactionHash; +use mp_state_update::{DeclaredClassCompiledClass, StateDiff}; +use starknet_core::types::Felt; + +#[derive(Debug, Clone)] +pub struct P2pCommands { + pub(crate) inner: mpsc::Sender, + pub(crate) peer_id: PeerId, +} + +impl P2pCommands { + pub async fn get_random_peers(&mut self) -> HashSet { + let (callback, recv) = mpsc::unbounded(); + let _res = self.inner.send(Command::GetRandomPeers { callback }).await; + recv.collect().await + } + + pub fn peer_id(&self) -> PeerId { + self.peer_id + } + + pub async fn make_headers_stream( + &mut self, + peer: PeerId, + config: BlockStreamConfig, + ) -> impl Stream> + 'static { + let req = model::BlockHeadersRequest { iteration: Some(config.into()) }; + let (callback, recv) = mpsc::channel(3); + let _res = self.inner.send(Command::SyncHeaders { peer, req, callback }).await; + + stream::unfold(recv, |mut recv| async move { + let res = handlers_impl::read_headers_stream(recv.by_ref()).await; + if matches!(res, Err(sync_handlers::Error::EndOfStream)) { + return None; + } + Some((res, recv)) + }) + } + + /// Note: The events in the transaction receipt will not be filled in. Use [`Self::make_events_stream`] to get them. + pub async fn make_transactions_stream<'a>( + &mut self, + peer: PeerId, + config: BlockStreamConfig, + transactions_count: impl IntoIterator + 'a, + ) -> impl Stream, sync_handlers::Error>> + 'a { + let req = model::TransactionsRequest { iteration: Some(config.into()) }; + let (callback, recv) = mpsc::channel(3); + let _res = self.inner.send(Command::SyncTransactions { peer, req, callback }).await; + + stream::unfold((recv, transactions_count.into_iter()), |(mut recv, mut transactions_count)| async move { + let res = handlers_impl::read_transactions_stream(recv.by_ref(), transactions_count.next()?).await; + if matches!(res, Err(sync_handlers::Error::EndOfStream)) { + return None; + } + Some((res, (recv, transactions_count))) + }) + } + + /// Note: The declared_contracts field of the state diff will be empty. Its content will be instead in the replaced class field. + pub async fn make_state_diffs_stream<'a>( + &mut self, + peer: PeerId, + config: BlockStreamConfig, + state_diffs_length: impl IntoIterator + 'a, + ) -> impl Stream> + 'a { + let req = model::StateDiffsRequest { iteration: Some(config.into()) }; + let (callback, recv) = mpsc::channel(3); + let _res = self.inner.send(Command::SyncStateDiffs { peer, req, callback }).await; + + stream::unfold((recv, state_diffs_length.into_iter()), |(mut recv, mut state_diffs_length)| async move { + let res = handlers_impl::read_state_diffs_stream(recv.by_ref(), state_diffs_length.next()?).await; + if matches!(res, Err(sync_handlers::Error::EndOfStream)) { + return None; + } + Some((res, (recv, state_diffs_length))) + }) + } + + pub async fn make_events_stream<'a>( + &mut self, + peer: PeerId, + config: BlockStreamConfig, + events_count: impl IntoIterator + 'a, + ) -> impl Stream, sync_handlers::Error>> + 'a { + let req = model::EventsRequest { iteration: Some(config.into()) }; + let (callback, recv) = mpsc::channel(3); + let _res = self.inner.send(Command::SyncEvents { peer, req, callback }).await; + + stream::unfold((recv, events_count.into_iter()), |(mut recv, mut events_count)| async move { + let res = handlers_impl::read_events_stream(recv.by_ref(), events_count.next()?).await; + if matches!(res, Err(sync_handlers::Error::EndOfStream)) { + return None; + } + Some((res, (recv, events_count))) + }) + } + + /// Note: you need to get the `declared_classes` from the `StateDiff`s beforehand. + pub async fn make_classes_stream<'a>( + &mut self, + peer: PeerId, + config: BlockStreamConfig, + declared_classes: impl IntoIterator> + 'a, + ) -> impl Stream, sync_handlers::Error>> + 'a { + let req = model::ClassesRequest { iteration: Some(config.into()) }; + let (callback, recv) = mpsc::channel(3); + let _res = self.inner.send(Command::SyncClasses { peer, req, callback }).await; + + stream::unfold((recv, declared_classes.into_iter()), |(mut recv, mut declared_classes)| async move { + let res = handlers_impl::read_classes_stream(recv.by_ref(), declared_classes.next()?).await; + if matches!(res, Err(sync_handlers::Error::EndOfStream)) { + return None; + } + Some((res, (recv, declared_classes))) + }) + } +} + +#[derive(Debug)] +pub(crate) enum Command { + GetRandomPeers { + /// Channel is unbounded because we do not want the receiver to be able to block the p2p task. + /// This is not an issue for the sync commands as their respective handlers are spawned as new tasks - thus handling + /// backpressure. + callback: mpsc::UnboundedSender, + }, + SyncHeaders { + peer: PeerId, + req: model::BlockHeadersRequest, + callback: mpsc::Sender, + }, + SyncClasses { + peer: PeerId, + req: model::ClassesRequest, + callback: mpsc::Sender, + }, + SyncStateDiffs { + peer: PeerId, + req: model::StateDiffsRequest, + callback: mpsc::Sender, + }, + SyncTransactions { + peer: PeerId, + req: model::TransactionsRequest, + callback: mpsc::Sender, + }, + SyncEvents { + peer: PeerId, + req: model::EventsRequest, + callback: mpsc::Sender, + }, +} + +impl MadaraP2p { + pub(crate) fn handle_command(&mut self, command: Command) { + tracing::trace!("Handle command: {command:?}"); + match command { + Command::GetRandomPeers { callback } => { + let query_id = self.swarm.behaviour_mut().kad.get_closest_peers(PeerId::random()); + tracing::debug!("Started get random peers query: {query_id}"); + self.pending_get_closest_peers.insert(query_id, callback); + } + Command::SyncHeaders { peer, req, callback } => { + let request_id = self.swarm.behaviour_mut().headers_sync.send_request(&peer, req); + self.headers_sync_handler.add_outbound(request_id, callback); + } + Command::SyncClasses { peer, req, callback } => { + let request_id = self.swarm.behaviour_mut().classes_sync.send_request(&peer, req); + self.classes_sync_handler.add_outbound(request_id, callback); + } + Command::SyncStateDiffs { peer, req, callback } => { + let request_id = self.swarm.behaviour_mut().state_diffs_sync.send_request(&peer, req); + self.state_diffs_sync_handler.add_outbound(request_id, callback); + } + Command::SyncTransactions { peer, req, callback } => { + let request_id = self.swarm.behaviour_mut().transactions_sync.send_request(&peer, req); + self.transactions_sync_handler.add_outbound(request_id, callback); + } + Command::SyncEvents { peer, req, callback } => { + let request_id = self.swarm.behaviour_mut().events_sync.send_request(&peer, req); + self.events_sync_handler.add_outbound(request_id, callback); + } + } + } +} diff --git a/crates/madara/client/p2p/src/events.rs b/crates/madara/client/p2p/src/events.rs new file mode 100644 index 000000000..01b611979 --- /dev/null +++ b/crates/madara/client/p2p/src/events.rs @@ -0,0 +1,108 @@ +//! Handle incomming p2p events +use crate::{ + behaviour::{self}, + MadaraP2p, +}; +use futures::channel::mpsc; +use libp2p::{kad::QueryResult, swarm::SwarmEvent}; +use std::collections::hash_map; + +impl MadaraP2p { + pub fn handle_event(&mut self, event: SwarmEvent) -> anyhow::Result<()> { + tracing::trace!("event: {event:?}"); + match event { + SwarmEvent::NewListenAddr { address, .. } => { + let listen_address = address.with_p2p(*self.swarm.local_peer_id()).expect("Making multiaddr"); + tracing::info!("📡 Peer-to-peer listening on address {listen_address:?}"); + } + + // Pending get closest peer queries. + SwarmEvent::Behaviour(behaviour::Event::Kad(libp2p::kad::Event::OutboundQueryProgressed { + id, + result, + stats: _stats, + step, + })) => { + tracing::trace!("KAD OutboundQueryProgressed: {id:?} {result:?} {step:?}"); + if let hash_map::Entry::Occupied(mut entry) = self.pending_get_closest_peers.entry(id) { + let QueryResult::GetClosestPeers(res) = result else { + anyhow::bail!("pending_get_closest_peers entry {id} has the wrong result type: {result:?}") + }; + + match res { + Ok(res) => { + let send_all = || { + for el in res.peers { + tracing::trace!("KAD SEND {id:?}"); + entry.get_mut().unbounded_send(el.peer_id)?; + } + Ok::<_, mpsc::TrySendError<_>>(()) + }; + + if let Err(err) = send_all() { + tracing::trace!("Channel closed for kad query {id}: {err:#}"); + entry.remove(); + return Ok(()); + } + } + Err(err) => tracing::trace!("Failed get_closest_peer request: {err:#}"), + } + + if step.last { + // query is finished + entry.remove(); + } + } + } + + SwarmEvent::Behaviour(behaviour::Event::Identify(libp2p::identify::Event::Received { + peer_id, + info, + connection_id: _, + })) => { + // TODO: we may want to tell the local node about the info.observed_addr - but we probably need to check that address first + // maybe we do want to trust the address if it comes from the relay..? + // https://github.com/libp2p/rust-libp2p/blob/master/protocols/identify/CHANGELOG.md#0430 + // https://github.com/search?q=repo%3Alibp2p%2Frust-libp2p%20add_external_address&type=code + self.swarm.add_external_address(info.observed_addr.clone()); // removing this will mean that the node won't switch to kad server mode and will stay client + tracing::debug!("add external address: {:?}", info.observed_addr); + + // check that we're supposed to be in the same network - we check that they have at least the kademlia protocol for our chain + let local_kad_protocols = self.swarm.behaviour().kad.protocol_names(); + // local_kad_protocols=[/starknet/kad/SN_SEPOLIA/1.0.0] + tracing::debug!("identify: {info:?} {local_kad_protocols:?}"); + + if !info.protocols.iter().any(|p| local_kad_protocols.contains(p)) { + // TODO: should we be more restrictive about this? + tracing::debug!( + "Got an Identify response from a peer ({peer_id}) that is not running any of our protocols" + ); + return Ok(()); + } + + // Make kademlia aware of the identity of the peer we connected to. + for addr in info.listen_addrs { + self.swarm.behaviour_mut().kad.add_address(&peer_id, addr); + } + } + + SwarmEvent::Behaviour(behaviour::Event::HeadersSync(event)) => { + self.headers_sync_handler.handle_event(event); + } + SwarmEvent::Behaviour(behaviour::Event::ClassesSync(event)) => { + self.classes_sync_handler.handle_event(event); + } + SwarmEvent::Behaviour(behaviour::Event::StateDiffsSync(event)) => { + self.state_diffs_sync_handler.handle_event(event); + } + SwarmEvent::Behaviour(behaviour::Event::TransactionsSync(event)) => { + self.transactions_sync_handler.handle_event(event); + } + SwarmEvent::Behaviour(behaviour::Event::EventsSync(event)) => { + self.events_sync_handler.handle_event(event); + } + _event => {} + } + Ok(()) + } +} diff --git a/crates/madara/client/p2p/src/handlers_impl/classes.rs b/crates/madara/client/p2p/src/handlers_impl/classes.rs new file mode 100644 index 000000000..c13266887 --- /dev/null +++ b/crates/madara/client/p2p/src/handlers_impl/classes.rs @@ -0,0 +1,115 @@ +use crate::{ + handlers_impl::{ + block_stream_config, + error::{OptionExt, ResultExt}, + }, + sync_handlers::{self, ReqContext}, + MadaraP2pContext, +}; +use futures::{channel::mpsc::Sender, SinkExt, Stream, StreamExt}; +use mc_db::db_block_id::DbBlockId; +use mp_class::{ClassInfo, ClassInfoWithHash}; +use mp_proto::model; +use mp_state_update::DeclaredClassCompiledClass; +use starknet_core::types::Felt; +use std::collections::{hash_map, HashMap}; +use tokio::pin; + +pub async fn classes_sync( + ctx: ReqContext, + req: model::ClassesRequest, + mut out: Sender, +) -> Result<(), sync_handlers::Error> { + let iterator_config = block_stream_config(&ctx.app_ctx.backend, req.iteration.unwrap_or_default())?; + let ite = ctx.app_ctx.backend.block_info_iterator(iterator_config.clone()); + + tracing::debug!("serving classes sync! {iterator_config:?}"); + + for res in ite { + let header = res.or_internal_server_error("Error while reading from block stream")?; + + let state_diff = ctx + .app_ctx + .backend + .get_block_state_diff(&DbBlockId::Number(header.header.block_number)) + .or_internal_server_error("Getting block state diff")? + .ok_or_internal_server_error("No state diff for block")?; + + for class_hash in state_diff + .deprecated_declared_classes + .into_iter() + .chain(state_diff.declared_classes.into_iter().map(|entry| entry.class_hash)) + { + let Some(class_info) = ctx + .app_ctx + .backend + .get_class_info(&DbBlockId::Number(header.header.block_number), &class_hash) + .or_internal_server_error("Getting class info")? + else { + continue; // it is possible that we have the state diff but not the class yet for that block. + }; + + let class = model::Class { domain: 0, class_hash: Some(class_hash.into()), class: Some(class_info.into()) }; + + out.send(model::ClassesResponse { + class_message: Some(model::classes_response::ClassMessage::Class(class)), + }) + .await? + } + } + + // Add the Fin message + let _res = out + .send(model::ClassesResponse { class_message: Some(model::classes_response::ClassMessage::Fin(model::Fin {})) }) + .await; + + Ok(()) +} + +/// Note: you need to get the `declared_classes` field from the `StateDiff` beforehand. +pub async fn read_classes_stream( + res: impl Stream, + declared_classes: &HashMap, +) -> Result, sync_handlers::Error> { + pin!(res); + + let mut out: HashMap = HashMap::with_capacity(declared_classes.len()); + while out.len() < declared_classes.len() { + let handle_fin = || { + if out.is_empty() { + sync_handlers::Error::EndOfStream + } else { + sync_handlers::Error::bad_request(format!( + "Expected {} messages in stream, got {}", + declared_classes.len(), + out.len() + )) + } + }; + + let Some(res) = res.next().await else { return Err(handle_fin()) }; + let val = match res.class_message.ok_or_bad_request("No message")? { + model::classes_response::ClassMessage::Class(message) => message, + model::classes_response::ClassMessage::Fin(_) => return Err(handle_fin()), + }; + + let class_hash = val.class_hash.ok_or_bad_request("Missing class_hash field")?.into(); + let hash_map::Entry::Vacant(out_entry) = out.entry(class_hash) else { + return Err(sync_handlers::Error::bad_request("Duplicate class_hash")); + }; + + // Get the expected compiled_class_hash. + let Some(compiled_class_hash) = declared_classes.get(&class_hash) else { + return Err(sync_handlers::Error::bad_request("Duplicate class_hash")); + }; + + let class_info = val + .class + .ok_or_bad_request("Missing class field")? + .parse_model(*compiled_class_hash) + .or_bad_request("Converting class info")?; + out_entry.insert(class_info); + } + + Ok(out.into_iter().map(|(class_hash, class_info)| ClassInfoWithHash { class_info, class_hash }).collect()) +} diff --git a/crates/madara/client/p2p/src/handlers_impl/error.rs b/crates/madara/client/p2p/src/handlers_impl/error.rs new file mode 100644 index 000000000..8e6ff846d --- /dev/null +++ b/crates/madara/client/p2p/src/handlers_impl/error.rs @@ -0,0 +1,106 @@ +#![allow(unused)] + +use crate::sync_handlers; +use std::fmt; + +#[macro_export] +macro_rules! bail_internal_server_error { + ($msg:literal $(,)?) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::Internal(anyhow::anyhow!($msg))) + }; + ($err:expr $(,)?) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::Internal(anyhow::anyhow!($err))) + }; + ($fmt:expr, $($arg:tt)*) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::Internal(anyhow::anyhow!($err, $($arg)*))) + }; +} + +#[macro_export] +macro_rules! bail_bad_request { + ($msg:literal $(,)?) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::BadRequest(format!($msg))) + }; + ($err:expr $(,)?) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::BadRequest(format!($err))) + }; + ($fmt:expr, $($arg:tt)*) => { + return ::core::result::Result::Err($crate::sync_handlers::Error::BadRequest(format!($err, $($arg)*))) + }; +} + +pub trait ResultExt { + fn or_internal_server_error(self, context: C) -> Result; + fn or_else_internal_server_error C>( + self, + context_fn: F, + ) -> Result; + fn or_bad_request(self, context: C) -> Result; + fn or_else_bad_request C>(self, context_fn: F) -> Result; +} + +impl> ResultExt for Result { + fn or_internal_server_error(self, context: C) -> Result { + self.map_err(|err| sync_handlers::Error::Internal(anyhow::anyhow!("{}: {:#}", context, E::into(err)))) + } + fn or_else_internal_server_error C>( + self, + context_fn: F, + ) -> Result { + self.map_err(|err| sync_handlers::Error::Internal(anyhow::anyhow!("{}: {:#}", context_fn(), E::into(err)))) + } + + fn or_bad_request(self, context: C) -> Result { + self.map_err(|err| sync_handlers::Error::BadRequest(format!("{}: {:#}", context, E::into(err)).into())) + } + fn or_else_bad_request C>(self, context_fn: F) -> Result { + self.map_err(|err| sync_handlers::Error::BadRequest(format!("{}: {:#}", context_fn(), E::into(err)).into())) + } +} + +pub trait OptionExt { + fn ok_or_internal_server_error( + self, + context: C, + ) -> Result; + fn ok_or_else_internal_server_error C>( + self, + context_fn: F, + ) -> Result; + fn ok_or_bad_request( + self, + context: C, + ) -> Result; + fn ok_or_else_bad_request C>( + self, + context_fn: F, + ) -> Result; +} + +impl OptionExt for Option { + fn ok_or_internal_server_error( + self, + context: C, + ) -> Result { + self.ok_or_else(|| sync_handlers::Error::Internal(anyhow::anyhow!("{}", context))) + } + fn ok_or_else_internal_server_error C>( + self, + context_fn: F, + ) -> Result { + self.ok_or_else(|| sync_handlers::Error::Internal(anyhow::anyhow!("{}", context_fn()))) + } + + fn ok_or_bad_request( + self, + context: C, + ) -> Result { + self.ok_or_else(|| sync_handlers::Error::BadRequest(format!("{}", context).into())) + } + fn ok_or_else_bad_request C>( + self, + context_fn: F, + ) -> Result { + self.ok_or_else(|| sync_handlers::Error::BadRequest(format!("{}", context_fn()).into())) + } +} diff --git a/crates/madara/client/p2p/src/handlers_impl/events.rs b/crates/madara/client/p2p/src/handlers_impl/events.rs new file mode 100644 index 000000000..9611e06e1 --- /dev/null +++ b/crates/madara/client/p2p/src/handlers_impl/events.rs @@ -0,0 +1,81 @@ +use crate::{ + handlers_impl::{ + block_stream_config, + error::{OptionExt, ResultExt}, + }, + sync_handlers::{self, ReqContext}, + MadaraP2pContext, +}; +use futures::{channel::mpsc::Sender, SinkExt, Stream, StreamExt}; +use mc_db::db_block_id::DbBlockId; +use mp_proto::model; +use mp_receipt::EventWithTransactionHash; +use tokio::pin; + +pub async fn events_sync( + ctx: ReqContext, + req: model::EventsRequest, + mut out: Sender, +) -> Result<(), sync_handlers::Error> { + let iterator_config = block_stream_config(&ctx.app_ctx.backend, req.iteration.unwrap_or_default())?; + let ite = ctx.app_ctx.backend.block_info_iterator(iterator_config.clone()); + + tracing::debug!("serving events sync! {iterator_config:?}"); + + for res in ite { + let header = res.or_internal_server_error("Error while reading from block stream")?; + + let Some(block_inner) = ctx + .app_ctx + .backend + .get_block_inner(&DbBlockId::Number(header.header.block_number)) + .or_internal_server_error("Getting block state diff")? + else { + continue; // it is possible that we have the header but not the events for this block yet. + }; + + let events = block_inner.receipts.iter().zip(&header.tx_hashes).flat_map(|(receipt, &transaction_hash)| { + receipt.events().iter().cloned().map(move |event| EventWithTransactionHash { transaction_hash, event }) + }); + for event in events { + out.send(model::EventsResponse { + event_message: Some(model::events_response::EventMessage::Event(event.into())), + }) + .await?; + } + } + + // Add the Fin message + out.send(model::EventsResponse { event_message: Some(model::events_response::EventMessage::Fin(model::Fin {})) }) + .await?; + + Ok(()) +} + +pub async fn read_events_stream( + res: impl Stream, + events_count: usize, +) -> Result, sync_handlers::Error> { + pin!(res); + + let mut vec = Vec::with_capacity(events_count); + for i in 0..events_count { + let handle_fin = || { + if i == 0 { + sync_handlers::Error::EndOfStream + } else { + sync_handlers::Error::bad_request(format!("Expected {} messages in stream, got {}", events_count, i)) + } + }; + + let Some(res) = res.next().await else { return Err(handle_fin()) }; + let val = match res.event_message.ok_or_bad_request("No message")? { + model::events_response::EventMessage::Event(message) => message, + model::events_response::EventMessage::Fin(_) => return Err(handle_fin()), + }; + let res = EventWithTransactionHash::try_from(val).or_bad_request("Converting transaction with receipt")?; + vec.push(res); + } + + Ok(vec) +} diff --git a/crates/madara/client/p2p/src/handlers_impl/headers.rs b/crates/madara/client/p2p/src/handlers_impl/headers.rs new file mode 100644 index 000000000..34177b33c --- /dev/null +++ b/crates/madara/client/p2p/src/handlers_impl/headers.rs @@ -0,0 +1,60 @@ +use super::{ + block_stream_config, + error::{OptionExt, ResultExt}, +}; +use crate::{ + sync_handlers::{self, ReqContext}, + MadaraP2pContext, +}; +use futures::{channel::mpsc::Sender, SinkExt, Stream, StreamExt}; +use mp_block::{BlockHeaderWithSignatures, ConsensusSignature}; +use mp_proto::model; +use starknet_core::types::Felt; +use tokio::pin; + +pub async fn headers_sync( + ctx: ReqContext, + req: model::BlockHeadersRequest, + mut out: Sender, +) -> Result<(), sync_handlers::Error> { + let iterator_config = block_stream_config(&ctx.app_ctx.backend, req.iteration.unwrap_or_default())?; + let ite = ctx.app_ctx.backend.block_info_iterator(iterator_config.clone()); + + tracing::debug!("serving headers sync! {iterator_config:?}"); + + for res in ite { + let header = res.or_internal_server_error("Error while reading from block stream")?; + let header = BlockHeaderWithSignatures { + header: header.header, + block_hash: header.block_hash, + consensus_signatures: vec![ConsensusSignature { r: Felt::ONE, s: Felt::ONE }], + }; + out.send(model::BlockHeadersResponse { + header_message: Some(model::block_headers_response::HeaderMessage::Header(header.into())), + }) + .await?; + } + + // Add the Fin message + out.send(model::BlockHeadersResponse { + header_message: Some(model::block_headers_response::HeaderMessage::Fin(model::Fin {})), + }) + .await?; + + Ok(()) +} + +pub async fn read_headers_stream( + res: impl Stream, +) -> Result { + pin!(res); + + let Some(res) = res.next().await else { return Err(sync_handlers::Error::EndOfStream) }; + let header = match res.header_message.ok_or_bad_request("No message")? { + model::block_headers_response::HeaderMessage::Header(message) => message, + model::block_headers_response::HeaderMessage::Fin(_) => { + return Err(sync_handlers::Error::EndOfStream); + } + }; + BlockHeaderWithSignatures::try_from(header).or_bad_request("Converting header") +} diff --git a/crates/madara/client/p2p/src/handlers_impl/mod.rs b/crates/madara/client/p2p/src/handlers_impl/mod.rs new file mode 100644 index 000000000..114b0af70 --- /dev/null +++ b/crates/madara/client/p2p/src/handlers_impl/mod.rs @@ -0,0 +1,50 @@ +use crate::{model, sync_handlers}; +use error::{OptionExt, ResultExt}; +use mc_db::{ + stream::{BlockStreamConfig, Direction}, + MadaraBackend, +}; +use mp_block::BlockId; +use std::num::NonZeroU64; + +mod classes; +mod error; +mod events; +mod headers; +mod state_diffs; +mod transactions; + +pub use classes::*; +pub use events::*; +pub use headers::*; +pub use state_diffs::*; +pub use transactions::*; + +pub fn block_stream_config( + db: &MadaraBackend, + value: model::Iteration, +) -> Result { + let direction = match value.direction() { + model::iteration::Direction::Forward => Direction::Forward, + model::iteration::Direction::Backward => Direction::Backward, + }; + + let start = match (value.start, &direction) { + (Some(model::iteration::Start::BlockNumber(n)), _) => n, + (Some(model::iteration::Start::Header(hash)), _) => db + .get_block_n(&BlockId::Hash(hash.into())) + .or_internal_server_error("Getting block_n from hash")? + .ok_or_bad_request("Block not found")?, + (None, Direction::Forward) => 0, + (None, Direction::Backward) => { + db.get_latest_block_n().or_internal_server_error("Getting latest block_n")?.unwrap_or(0) + } + }; + Ok(BlockStreamConfig { + direction, + start, + // in protobuf fields default to 0 - we should not return any error in these cases. + step: value.step.try_into().unwrap_or(NonZeroU64::MIN), + limit: if value.limit == 0 { None } else { Some(value.limit) }, + }) +} diff --git a/crates/madara/client/p2p/src/handlers_impl/state_diffs.rs b/crates/madara/client/p2p/src/handlers_impl/state_diffs.rs new file mode 100644 index 000000000..f3d94d1d8 --- /dev/null +++ b/crates/madara/client/p2p/src/handlers_impl/state_diffs.rs @@ -0,0 +1,219 @@ +use super::error::ResultExt; +use crate::{ + handlers_impl::{block_stream_config, error::OptionExt}, + sync_handlers::{self, ReqContext}, + MadaraP2pContext, +}; +use futures::{channel::mpsc::Sender, SinkExt, Stream, StreamExt}; +use mc_db::db_block_id::DbBlockId; +use mp_proto::model; +use mp_state_update::{ + ContractStorageDiffItem, DeclaredClassItem, NonceUpdate, ReplacedClassItem, StateDiff, StorageEntry, +}; +use starknet_core::types::Felt; +use std::collections::{BTreeMap, HashMap}; +use tokio::pin; + +fn contract_state_diffs(state_diff: &StateDiff) -> HashMap { + let mut res: HashMap = Default::default(); + + for nonce_update in &state_diff.nonces { + let entry = res.entry(nonce_update.contract_address).or_default(); + entry.nonce = Some(nonce_update.nonce.into()); + } + + for deployed_contract in &state_diff.deployed_contracts { + let entry = res.entry(deployed_contract.address).or_default(); + entry.class_hash = Some(deployed_contract.class_hash.into()); + } + + for replaced_class in &state_diff.replaced_classes { + let entry = res.entry(replaced_class.contract_address).or_default(); + entry.class_hash = Some(replaced_class.class_hash.into()); + } + + for storage_diff in &state_diff.storage_diffs { + let entry = res.entry(storage_diff.address).or_default(); + entry.values = storage_diff + .storage_entries + .iter() + .map(|el| model::ContractStoredValue { key: Some(el.key.into()), value: Some(el.value.into()) }) + .collect(); + } + + res +} + +pub async fn state_diffs_sync( + ctx: ReqContext, + req: model::StateDiffsRequest, + mut out: Sender, +) -> Result<(), sync_handlers::Error> { + let iterator_config = block_stream_config(&ctx.app_ctx.backend, req.iteration.unwrap_or_default())?; + let ite = ctx.app_ctx.backend.block_info_iterator(iterator_config.clone()); + + tracing::debug!("serving state diffs sync! {iterator_config:?}"); + + for res in ite { + let header = res.or_internal_server_error("Error while reading from block stream")?; + + let Some(state_diff) = ctx + .app_ctx + .backend + .get_block_state_diff(&DbBlockId::Number(header.header.block_number)) + .or_internal_server_error("Getting block state diff")? + else { + continue; // it is possible that we have the header but not the state diff for this block yet. + }; + + // Legacy declared classes + for &class_hash in &state_diff.deprecated_declared_classes { + let el = model::DeclaredClass { class_hash: Some(class_hash.into()), compiled_class_hash: None }; + + out.send(model::StateDiffsResponse { + state_diff_message: Some(model::state_diffs_response::StateDiffMessage::DeclaredClass(el)), + }) + .await? + } + + // Declared classes + for declared_class in &state_diff.declared_classes { + let el = model::DeclaredClass { + class_hash: Some(declared_class.class_hash.into()), + compiled_class_hash: Some(declared_class.compiled_class_hash.into()), + }; + + out.send(model::StateDiffsResponse { + state_diff_message: Some(model::state_diffs_response::StateDiffMessage::DeclaredClass(el)), + }) + .await? + } + + // Contract updates (nonces, storage, deployed/replaced) + for (contract_address, mut el) in contract_state_diffs(&state_diff) { + el.address = Some(contract_address.into()); + out.send(model::StateDiffsResponse { + state_diff_message: Some(model::state_diffs_response::StateDiffMessage::ContractDiff(el)), + }) + .await? + } + } + + // Add the Fin message + out.send(model::StateDiffsResponse { + state_diff_message: Some(model::state_diffs_response::StateDiffMessage::Fin(model::Fin {})), + }) + .await?; + + Ok(()) +} + +/// Note: The declared_contracts field of the state diff will be empty. Its content will be instead in the replaced_classes field. +pub async fn read_state_diffs_stream( + res: impl Stream, + state_diff_length: usize, +) -> Result { + pin!(res); + + let mut storage_diffs: BTreeMap> = BTreeMap::new(); + let mut all_declared_classes: BTreeMap> = BTreeMap::new(); // Sierra and legacy. + let mut deployed_or_replaced_contracts: BTreeMap = BTreeMap::new(); + let mut nonces: BTreeMap = BTreeMap::new(); + + let mut current_len: usize = 0; + while current_len < state_diff_length { + let old_len = current_len; + + let handle_fin = || { + if current_len == 0 { + sync_handlers::Error::EndOfStream + } else { + sync_handlers::Error::bad_request(format!( + "Expected {} messages in stream, got {}", + state_diff_length, current_len + )) + } + }; + let Some(res) = res.next().await else { return Err(handle_fin()) }; + match res.state_diff_message.ok_or_bad_request("No message")? { + model::state_diffs_response::StateDiffMessage::ContractDiff(message) => { + let contract = message.address.ok_or_bad_request("Missing field address in contract diff")?.into(); + if let Some(nonce) = message.nonce.map(Into::into) { + if nonces.insert(contract, nonce).is_some() { + return Err(sync_handlers::Error::bad_request("Duplicate nonce")); + } + current_len += 1; + } + if let Some(nonce) = message.class_hash.map(Into::into) { + if deployed_or_replaced_contracts.insert(contract, nonce).is_some() { + return Err(sync_handlers::Error::bad_request( + "Duplicate deployed contract or replaced contract class", + )); + } + current_len += 1; + } + // ignore message.domain for now. + if !message.values.is_empty() { + let entry = storage_diffs.entry(contract).or_default(); + for kv in message.values { + let key = kv.key.ok_or_bad_request("No storage key for storage diff")?.into(); + if entry.insert(key, kv.value.unwrap_or_default().into()).is_some() { + return Err(sync_handlers::Error::bad_request("Duplicate contract storage value diff")); + } + current_len += 1; + } + } + } + model::state_diffs_response::StateDiffMessage::DeclaredClass(message) => { + let class_hash = message.class_hash.ok_or_bad_request("No class hash for declared class")?.into(); + if all_declared_classes.insert(class_hash, message.compiled_class_hash.map(Into::into)).is_some() { + return Err(sync_handlers::Error::bad_request("Duplicate contract storage value diff")); + } + current_len += 1; + } + model::state_diffs_response::StateDiffMessage::Fin(_) => return Err(handle_fin()), + }; + + // Just in case contract_diff message is actually empty, we want to force the peer to actually make progress. + // Otherwise they could just keep the stream hanging by sending empty messages. + if old_len == current_len { + return Err(sync_handlers::Error::bad_request("Empty state diff message")); + } + } + + let mut deprecated_declared_classes = vec![]; + let mut declared_classes = vec![]; + + for (class_hash, compiled_class_hash) in all_declared_classes { + if let Some(compiled_class_hash) = compiled_class_hash { + declared_classes.push(DeclaredClassItem { class_hash, compiled_class_hash }); + } else { + deprecated_declared_classes.push(class_hash); + } + } + + let state_diff = StateDiff { + storage_diffs: storage_diffs + .into_iter() + .map(|(address, kv)| ContractStorageDiffItem { + address, + storage_entries: kv.into_iter().map(|(key, value)| StorageEntry { key, value }).collect(), + }) + .collect(), + deprecated_declared_classes, + declared_classes, + deployed_contracts: Default::default(), // TODO: i really want to drop the support of that field. + replaced_classes: deployed_or_replaced_contracts + .into_iter() + .map(|(contract_address, class_hash)| ReplacedClassItem { contract_address, class_hash }) + .collect(), + nonces: nonces.into_iter().map(|(contract_address, nonce)| NonceUpdate { contract_address, nonce }).collect(), + }; + + if state_diff.len() != state_diff_length { + // this shouldn't happen since we always check for duplicates when inserting, but just in case. + return Err(sync_handlers::Error::bad_request("State diff length mismatch")); + } + + Ok(state_diff) +} diff --git a/crates/madara/client/p2p/src/handlers_impl/transactions.rs b/crates/madara/client/p2p/src/handlers_impl/transactions.rs new file mode 100644 index 000000000..80a024cef --- /dev/null +++ b/crates/madara/client/p2p/src/handlers_impl/transactions.rs @@ -0,0 +1,92 @@ +//! TODO: range check contract addresses? + +use crate::{ + handlers_impl::{ + block_stream_config, + error::{OptionExt, ResultExt}, + }, + sync_handlers::{self, ReqContext}, + MadaraP2pContext, +}; +use futures::{channel::mpsc::Sender, SinkExt, Stream, StreamExt}; +use mc_db::db_block_id::DbBlockId; +use mp_block::TransactionWithReceipt; +use mp_proto::model; +use tokio::pin; + +/// Reply to a transactions sync request. +pub async fn transactions_sync( + ctx: ReqContext, + req: model::TransactionsRequest, + mut out: Sender, +) -> Result<(), sync_handlers::Error> { + let iterator_config = block_stream_config(&ctx.app_ctx.backend, req.iteration.unwrap_or_default())?; + let ite = ctx.app_ctx.backend.block_info_iterator(iterator_config.clone()); + + tracing::debug!("serving transactions sync! {iterator_config:?}"); + + for res in ite { + let header = res.or_internal_server_error("Error while reading from block stream")?; + + let Some(block_inner) = ctx + .app_ctx + .backend + .get_block_inner(&DbBlockId::Number(header.header.block_number)) + .or_internal_server_error("Getting block state diff")? + else { + continue; // it is possible that we have the header but not the transactions for this block yet. + }; + + for (transaction, receipt) in block_inner.transactions.into_iter().zip(block_inner.receipts) { + let el = TransactionWithReceipt { transaction, receipt }; + + out.send(model::TransactionsResponse { + transaction_message: Some(model::transactions_response::TransactionMessage::TransactionWithReceipt( + el.into(), + )), + }) + .await? + } + } + + // Add the Fin message + out.send(model::TransactionsResponse { + transaction_message: Some(model::transactions_response::TransactionMessage::Fin(model::Fin {})), + }) + .await?; + + Ok(()) +} + +/// Used by [`crate::commands::P2pCommands::make_transactions_stream`] to send a transactions stream request. +/// Note that the events in the transaction receipt will not be filled in, as they need to be fetched using the events stream request. +pub async fn read_transactions_stream( + res: impl Stream, + transactions_count: usize, +) -> Result, sync_handlers::Error> { + pin!(res); + + let mut vec = Vec::with_capacity(transactions_count); + for i in 0..transactions_count { + let handle_fin = || { + if i == 0 { + sync_handlers::Error::EndOfStream + } else { + sync_handlers::Error::bad_request(format!( + "Expected {} messages in stream, got {}", + transactions_count, i + )) + } + }; + + let Some(res) = res.next().await else { return Err(handle_fin()) }; + let val = match res.transaction_message.ok_or_bad_request("No message")? { + model::transactions_response::TransactionMessage::TransactionWithReceipt(message) => message, + model::transactions_response::TransactionMessage::Fin(_) => return Err(handle_fin()), + }; + let res = TransactionWithReceipt::try_from(val).or_bad_request("Converting transaction with receipt")?; + vec.push(res); + } + + Ok(vec) +} diff --git a/crates/madara/client/p2p/src/identity.rs b/crates/madara/client/p2p/src/identity.rs new file mode 100644 index 000000000..f64ec7d86 --- /dev/null +++ b/crates/madara/client/p2p/src/identity.rs @@ -0,0 +1,65 @@ +use anyhow::{ensure, Context}; +use base64::prelude::*; +use libp2p::{identity::Keypair, PeerId}; +use std::{fs, path::Path}; + +/// This happens to be the pathfinder-compatible identity.json format. +#[derive(serde::Serialize, serde::Deserialize)] +struct P2pIdentity { + pub private_key: String, + pub peer_id: String, +} + +pub fn load_identity(identity_file: Option<&Path>, save_identity: bool) -> anyhow::Result { + let Some(identity_file) = identity_file else { + // no stable identity, generate a new one at every startup. + return Ok(Keypair::generate_ed25519()); + }; + + if save_identity + && !fs::exists(identity_file).with_context(|| { + format!("Checking if peer-to-peer identity file at path '{}' exists", identity_file.display()) + })? + { + // make & save an peer-to-peer identity file. + let keypair = Keypair::generate_ed25519(); + + let private_key = keypair + .to_protobuf_encoding() + .context("Converting newly-created peer-to-peer identity file to protobuf format")?; + + let mut content = serde_json::to_string_pretty(&P2pIdentity { + private_key: BASE64_STANDARD.encode(private_key), + peer_id: PeerId::from_public_key(&keypair.public()).to_base58(), + }) + .context("Converting peer-to-peer identity file to json")?; + + content.push('\n'); + + fs::write(identity_file, content) + .with_context(|| format!("Saving peer-to-peer identity to file at path '{}'", identity_file.display()))?; + + return Ok(keypair); + } + + let load_identity_file = || { + let data = fs::read(identity_file).context("Reading file")?; + let content: P2pIdentity = serde_json::from_slice(&data).context("Parsing json file")?; + + let sk = BASE64_STANDARD.decode(&content.private_key).context("Parsing private_key as base64")?; + let keypair = Keypair::from_protobuf_encoding(&sk).context("Parsing private_key protobuf encoding")?; + + let peer_id = PeerId::from_public_key(&keypair.public()).to_base58(); + ensure!( + peer_id == content.peer_id, + "PeerID derived from secret key does not match with the PeerID in the file. Expected: {peer_id}, Got: {}", + content.peer_id + ); + + Ok(keypair) + }; + + load_identity_file().with_context(|| { + format!("Reading and parsing peer-to-peer identity file at path '{}'", identity_file.display()) + }) +} diff --git a/crates/madara/client/p2p/src/lib.rs b/crates/madara/client/p2p/src/lib.rs new file mode 100644 index 000000000..541a5fa33 --- /dev/null +++ b/crates/madara/client/p2p/src/lib.rs @@ -0,0 +1,197 @@ +use anyhow::Context; +use behaviour::MadaraP2pBehaviour; +use futures::{channel::mpsc, FutureExt}; +use libp2p::{futures::StreamExt, gossipsub::IdentTopic, identity::Keypair, multiaddr::Protocol, Multiaddr, Swarm}; +use mc_db::MadaraBackend; +use mp_proto::model; +use std::{collections::HashMap, path::PathBuf, sync::Arc, time::Duration}; +use sync_handlers::DynSyncHandler; + +mod behaviour; +mod commands; +mod events; +mod handlers_impl; +mod identity; +mod sync_codec; +mod sync_handlers; + +pub use commands::*; +pub use libp2p::PeerId; +pub use sync_handlers::Error as SyncHandlerError; + +pub struct P2pConfig { + /// None to get an OS-assigned port. + pub port: Option, + pub bootstrap_nodes: Vec, + pub status_interval: Duration, + /// Peer-to-peer identity.json file. By default, we generate a new one everytime. + pub identity_file: Option, + pub save_identity: bool, +} + +#[derive(Clone)] +struct MadaraP2pContext { + backend: Arc, +} + +pub struct MadaraP2pBuilder { + commands: P2pCommands, + commands_receiver: mpsc::Receiver, + config: P2pConfig, + db: Arc, + keypair: Keypair, +} + +impl MadaraP2pBuilder { + pub fn new(config: P2pConfig, db: Arc) -> anyhow::Result { + let (commands, commands_receiver) = mpsc::channel(100); + + // we do not need to provide a stable identity except for bootstrap nodes + let keypair = identity::load_identity(config.identity_file.as_deref(), config.save_identity)?; + + Ok(Self { + commands: P2pCommands { inner: commands, peer_id: keypair.public().to_peer_id() }, + commands_receiver, + config, + db, + keypair, + }) + } + + pub fn commands(&self) -> P2pCommands { + self.commands.clone() + } + + pub fn build(self) -> anyhow::Result { + let MadaraP2pBuilder { commands: _, commands_receiver, config, db, keypair } = self; + + let swarm = libp2p::SwarmBuilder::with_existing_identity(keypair) + .with_tokio() + .with_tcp( + Default::default(), + // support tls and noise + (libp2p::tls::Config::new, libp2p::noise::Config::new), + // multiplexing protocol (yamux) + libp2p::yamux::Config::default, + ) + .context("Configuring libp2p tcp transport")? + .with_relay_client(libp2p::noise::Config::new, libp2p::yamux::Config::default) + .context("Configuring relay transport")? + .with_behaviour(|identity, relay_client| MadaraP2pBehaviour::new(db.chain_config(), identity, relay_client)) + .context("Configuring libp2p behaviour")? + .build(); + + let app_ctx = MadaraP2pContext { backend: Arc::clone(&db) }; + + Ok(MadaraP2p { + config, + db, + // add_transaction_provider, + swarm, + commands_receiver: Some(commands_receiver), + pending_get_closest_peers: Default::default(), + headers_sync_handler: DynSyncHandler::new("headers", app_ctx.clone(), |ctx, req, out| { + handlers_impl::headers_sync(ctx, req, out).boxed() + }), + classes_sync_handler: DynSyncHandler::new("classes", app_ctx.clone(), |ctx, req, out| { + handlers_impl::classes_sync(ctx, req, out).boxed() + }), + state_diffs_sync_handler: DynSyncHandler::new("state_diffs", app_ctx.clone(), |ctx, req, out| { + handlers_impl::state_diffs_sync(ctx, req, out).boxed() + }), + transactions_sync_handler: DynSyncHandler::new("transactions", app_ctx.clone(), |ctx, req, out| { + handlers_impl::transactions_sync(ctx, req, out).boxed() + }), + events_sync_handler: DynSyncHandler::new("events", app_ctx.clone(), |ctx, req, out| { + handlers_impl::events_sync(ctx, req, out).boxed() + }), + }) + } +} + +pub struct MadaraP2p { + config: P2pConfig, + #[allow(unused)] + db: Arc, + // #[allow(unused)] + // add_transaction_provider: Arc, + commands_receiver: Option>, + swarm: Swarm, + + pending_get_closest_peers: HashMap>, + + headers_sync_handler: DynSyncHandler, + classes_sync_handler: DynSyncHandler, + state_diffs_sync_handler: DynSyncHandler, + transactions_sync_handler: + DynSyncHandler, + events_sync_handler: DynSyncHandler, +} + +impl MadaraP2p { + pub fn dial_bootstrap_nodes(&mut self) { + for node in &self.config.bootstrap_nodes { + if let Err(err) = self.swarm.dial(node.clone()) { + tracing::debug!("Could not dial bootstrap node {node}: {err:#}"); + } + } + } + + /// Main loop of the p2p service. + pub async fn run(&mut self, mut ctx: mp_utils::service::ServiceContext) -> anyhow::Result<()> { + let multi_addr = "/ip4/0.0.0.0".parse::()?.with(Protocol::Tcp(self.config.port.unwrap_or(0))); + self.swarm.listen_on(multi_addr).context("Binding port")?; + + self.dial_bootstrap_nodes(); + + let block_propagation_topic = &format!("blocks/{}", self.db.chain_config().chain_id.as_hex()); + self.swarm.behaviour_mut().gossipsub.subscribe(&IdentTopic::new(block_propagation_topic))?; + tracing::debug!("Gossipsub subscribed to {block_propagation_topic}"); + + let mut status_interval = tokio::time::interval(self.config.status_interval); + let mut commands_recv = self.commands_receiver.take().context("Service already started")?; + + loop { + tokio::select! { + // Stop condition + _ = ctx.cancelled() => break, + + // Show node status regularly + _ = status_interval.tick() => { + let network_info = self.swarm.network_info(); + let connections_info = network_info.connection_counters(); + + let peers = network_info.num_peers(); + let connections_in = connections_info.num_established_incoming(); + let connections_out = connections_info.num_established_outgoing(); + let pending_connections = connections_info.num_pending(); + let dht = self.swarm.behaviour_mut().kad + .kbuckets() + // Cannot .into_iter() a KBucketRef, hence the inner collect followed by flat_map + .map(|kbucket_ref| { + kbucket_ref + .iter() + .map(|entry_ref| *entry_ref.node.key.preimage()) + .collect::>() + }) + .flat_map(|peers_in_bucket| peers_in_bucket.into_iter()) + .collect::>(); + tracing::info!("P2P {peers} peers IN: {connections_in} OUT: {connections_out} Pending: {pending_connections}"); + tracing::trace!("DHT {dht:?}"); + } + + // Make progress on the swarm and handle the events it yields + event = self.swarm.next() => match event { + Some(event) => self.handle_event(event).context("Handling p2p event")?, + None => break, + }, + + // Handle incoming service commands + Some(command) = commands_recv.next() => { + self.handle_command(command); + } + } + } + Ok(()) + } +} diff --git a/crates/madara/client/p2p/src/sync_codec.rs b/crates/madara/client/p2p/src/sync_codec.rs new file mode 100644 index 000000000..94d1eb800 --- /dev/null +++ b/crates/madara/client/p2p/src/sync_codec.rs @@ -0,0 +1,156 @@ +//! Part of this file is inspired by the wonderful pathfinder implementation + +use async_trait::async_trait; +use futures::io::{AsyncReadExt, AsyncWriteExt}; +use libp2p::futures::{AsyncRead, AsyncWrite}; +use std::{io, marker::PhantomData}; + +pub mod protocols { + //! This only handles 1 protocol version for now. In the future this file would need + //! to be rewritten so that it handles returning responses for older protocol versions. + + macro_rules! define_protocols { + { $( struct $type_name:ident = $name:literal ; )* } => { + $( + #[derive(Debug, Clone, Copy, Default)] + pub struct $type_name; + impl AsRef for $type_name { + fn as_ref(&self) -> &str { + $name + } + } + )* + } + } + + define_protocols! { + struct Headers = "/starknet/headers/0.1.0-rc.0"; + struct StateDiffs = "/starknet/state_diffs/0.1.0-rc.0"; + struct Classes = "/starknet/classes/0.1.0-rc.0"; + struct Transactions = "/starknet/transactions/0.1.0-rc.0"; + struct Events = "/starknet/events/0.1.0-rc.0"; + } +} + +pub mod codecs { + #![allow(clippy::identity_op)] // allow 1 * MiB + #![allow(non_upper_case_globals)] // allow MiB name + use super::*; + + const MiB: u64 = 1024 * 1024; + + pub type Headers = + SyncCodec; + pub fn headers() -> Headers { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 1 * MiB }) + } + pub type StateDiffs = + SyncCodec; + pub fn state_diffs() -> StateDiffs { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 1 * MiB }) + } + pub type Classes = SyncCodec; + pub fn classes() -> Classes { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 4 * MiB }) + } + pub type Transactions = + SyncCodec; + pub fn transactions() -> Transactions { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 1 * MiB }) + } + pub type Events = SyncCodec; + pub fn events() -> Events { + SyncCodec::new(SyncCodecConfig { req_size_limit_bytes: 1 * MiB, res_size_limit_bytes: 1 * MiB }) + } +} + +#[derive(Debug, Clone)] +pub struct SyncCodecConfig { + pub req_size_limit_bytes: u64, + pub res_size_limit_bytes: u64, +} + +#[derive(Debug, Clone)] +pub struct SyncCodec { + config: SyncCodecConfig, + /// buffer reuse + buf: Vec, + _boo: PhantomData<(Protocol, Req, Res)>, +} + +impl SyncCodec { + pub fn new(config: SyncCodecConfig) -> Self { + Self { buf: Vec::new(), config, _boo: PhantomData } + } +} + +#[async_trait] +impl< + Protocol: AsRef + Send + Clone, + Req: prost::Message + Default + Send, + Res: prost::Message + Default + Send, + > p2p_stream::Codec for SyncCodec +{ + type Protocol = Protocol; + type Request = Req; + type Response = Res; + + async fn read_request(&mut self, _protocol: &Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + self.buf.clear(); + io.take(self.config.req_size_limit_bytes).read_to_end(&mut self.buf).await?; + Ok(Req::decode(self.buf.as_ref())?) + } + + async fn read_response(&mut self, _protocol: &Protocol, mut io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + // Response is prepended with the message length + // We do not directly use [`prost::Message::decode_length_delimited`] because we want to reject the message before reading it + // if it's too long + + // unsigned_varint's error type implements Into and not From io::Error, so we have to map the error by hand + let encoded_len = unsigned_varint::aio::read_usize(&mut io).await.map_err(Into::::into)?; + if encoded_len > self.config.res_size_limit_bytes as _ { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Response has length {} which exceeds the spec-defined limit of {}", + encoded_len, self.config.res_size_limit_bytes + ), + )); + } + + self.buf.clear(); + self.buf.reserve(encoded_len); + io.take(encoded_len as _).read_to_end(&mut self.buf).await?; + if self.buf.len() != encoded_len { + return Err(io::ErrorKind::UnexpectedEof.into()); + } + + Ok(Res::decode(self.buf.as_ref())?) + } + + async fn write_request(&mut self, _protocol: &Protocol, io: &mut T, req: Req) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + self.buf.clear(); + req.encode(&mut self.buf)?; + io.write_all(&self.buf).await + } + + async fn write_response(&mut self, _protocol: &Protocol, io: &mut T, res: Res) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + // we don't have to use unsigned_varint::aio::write_usize here we can just use prost's length delimited messages impl + + self.buf.clear(); + res.encode_length_delimited(&mut self.buf)?; + io.write_all(&self.buf).await + } +} diff --git a/crates/madara/client/p2p/src/sync_handlers.rs b/crates/madara/client/p2p/src/sync_handlers.rs new file mode 100644 index 000000000..220cdade9 --- /dev/null +++ b/crates/madara/client/p2p/src/sync_handlers.rs @@ -0,0 +1,168 @@ +use futures::channel::mpsc; +use futures::{channel::mpsc::Sender, future::BoxFuture, pin_mut, Future}; +use futures::{SinkExt, StreamExt}; +use p2p_stream::{InboundRequestId, OutboundRequestId}; +use std::borrow::Cow; +use std::{collections::HashMap, fmt, marker::PhantomData}; +use tokio::task::{AbortHandle, JoinSet}; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + /// Error is internal and will be reported with error level. + #[error("Internal server error: {0:#}")] + Internal(#[from] anyhow::Error), + /// Error is the peer's fault, will only be reported with debug level. + #[error("Bad request: {0}")] + BadRequest(Cow<'static, str>), + + /// Sender closed. Do nothing. + #[error("End of stream.")] + EndOfStream, +} + +impl Error { + pub fn bad_request(s: impl Into>) -> Self { + Self::BadRequest(s.into()) + } +} + +impl From for Error { + fn from(_: futures::channel::mpsc::SendError) -> Self { + Self::EndOfStream + } +} + +pub struct ReqContext { + pub app_ctx: AppCtx, +} + +pub type DynSyncHandler = StreamHandler< + AppCtx, + Req, + Res, + fn(ReqContext, Req, Sender) -> BoxFuture<'static, Result<(), Error>>, + BoxFuture<'static, Result<(), Error>>, +>; + +pub struct StreamHandler { + debug_name: &'static str, + app_ctx: AppCtx, + handler: F, + join_set: JoinSet<()>, + current_inbound: HashMap, + pending_outbounds_channels: HashMap>, + current_outbound: HashMap, + _boo: PhantomData<(Req, Res, Fut)>, +} + +impl StreamHandler +where + F: Fn(ReqContext, Req, Sender) -> Fut, + Fut: Future> + Send + 'static, +{ + pub fn new(debug_name: &'static str, app_ctx: AppCtx, handler: F) -> Self { + Self { + debug_name, + handler, + app_ctx, + join_set: Default::default(), + current_inbound: Default::default(), + pending_outbounds_channels: Default::default(), + current_outbound: Default::default(), + _boo: PhantomData, + } + } + + pub fn handle_event(&mut self, ev: p2p_stream::Event) { + match ev { + /* === OTHER PEER => US === */ + p2p_stream::Event::InboundRequest { request_id, request, peer, channel } => { + tracing::debug!("New inbounds request in stream {} [peer_id {}]", self.debug_name, peer); + let ctx = ReqContext { app_ctx: self.app_ctx.clone() }; + // Spawn the task that responds to the request. + + let fut = (self.handler)(ctx, request, channel); + + let debug_name = self.debug_name; + let abort_handle = self.join_set.spawn(async move { + let fut = fut; + pin_mut!(fut); + + if let Err(err) = fut.await { + match err { + Error::Internal(err) => { + tracing::error!(target: "p2p_errors", "Internal Server Error in stream {} [peer_id {peer}]: {err:#}", debug_name); + } + Error::BadRequest(err) => { + tracing::debug!(target: "p2p_errors", "Bad Request in stream {} [peer_id {peer}]: {err:#}", debug_name); + } + Error::EndOfStream => { /* sender closed, do nothing */ } + } + } + }); + + self.current_inbound.insert(request_id, abort_handle); + } + p2p_stream::Event::InboundFailure { peer, request_id, error } => { + tracing::debug!("Inbounds failure in stream {} [peer_id {}]: {:#}", self.debug_name, peer, error); + if let Some(v) = self.current_inbound.remove(&request_id) { + v.abort(); + } + } + p2p_stream::Event::OutboundResponseStreamClosed { peer, request_id } => { + tracing::debug!("End of stream {} [peer_id {}]", self.debug_name, peer); + if let Some(v) = self.current_inbound.remove(&request_id) { + v.abort(); // abort if not yet aborted + } + } + /* === US => OTHER PEER === */ + p2p_stream::Event::OutboundRequestSentAwaitingResponses { peer, request_id, mut channel } => { + if let Some(mut snd) = self.pending_outbounds_channels.remove(&request_id) { + let debug_name = self.debug_name; + let abort_handle = self.join_set.spawn(async move { + loop { + let Some(el) = channel.next().await else { + break; // channel closed + }; + let res = match el { + Ok(res) => res, + Err(err) => { + tracing::debug!(target: "p2p_errors", "I/O error in stream {} [peer_id {peer}]: {err:#}", debug_name); + break; + } + }; + if snd.send(res).await.is_err() { + break; // channel closed + } + } + }); + self.current_outbound.insert(request_id, abort_handle); + } + } + p2p_stream::Event::OutboundFailure { peer, request_id, error } => { + tracing::debug!("Outbounds failure in stream {} [peer_id {}]: {:#}", self.debug_name, peer, error); + self.pending_outbounds_channels.remove(&request_id); + if let Some(v) = self.current_outbound.remove(&request_id) { + v.abort(); + } + } + p2p_stream::Event::InboundResponseStreamClosed { peer, request_id } => { + tracing::debug!("End of outbound stream {} [peer_id {}]", self.debug_name, peer); + self.pending_outbounds_channels.remove(&request_id); + if let Some(v) = self.current_outbound.remove(&request_id) { + v.abort(); + } + } + } + } + + pub fn add_outbound(&mut self, id: OutboundRequestId, stream: mpsc::Sender) { + self.pending_outbounds_channels.insert(id, stream); + } +} + +impl fmt::Debug for StreamHandler { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "StreamHandler[{}] <{} inbounds tasks>", self.debug_name, self.current_inbound.len()) + } +} diff --git a/crates/madara/client/p2p_stream/Cargo.toml b/crates/madara/client/p2p_stream/Cargo.toml new file mode 100644 index 000000000..1c7a10b2f --- /dev/null +++ b/crates/madara/client/p2p_stream/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "p2p_stream" +description = "Extension of libp2p-request-response that allows for streaming responses to a single request." +authors = [ + "Parity Technologies ", + "Equilibrium Labs ", +] +version = { workspace = true } +edition = { workspace = true } +license = { workspace = true } + +[dependencies] +async-trait = { workspace = true } +futures = { workspace = true } +futures-bounded = { workspace = true } +libp2p = { workspace = true, features = ["identify", "noise", "tcp", "tokio"] } +tracing = { workspace = true } + +[dev-dependencies] +anyhow = { workspace = true } +libp2p = { workspace = true, features = [ + "identify", + "noise", + "tcp", + "tokio", + "yamux", +] } +libp2p-plaintext = { workspace = true } +libp2p-swarm-test = { workspace = true } +rstest = { workspace = true } +tokio = { workspace = true, features = ["macros", "time"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/madara/client/p2p_stream/README.md b/crates/madara/client/p2p_stream/README.md new file mode 100644 index 000000000..8f19b754a --- /dev/null +++ b/crates/madara/client/p2p_stream/README.md @@ -0,0 +1,41 @@ +# pathfinder's `p2p_stream` crate + +This crate is a copy of [pathfinder's `p2p_stream` crate](https://github.com/eqlabs/pathfinder/blob/main/crates/p2p_stream) which itself is a derivate +of the [`libp2p request/response`](https://docs.rs/libp2p-request-response/latest/libp2p_request_response/) crate with a few changes to allow streaming responses. + +Original readme follows. + +------------------- + +# Introduction + +This crate is a derivative of Parity Technologies' [`libp2p request/response`](https://docs.rs/libp2p-request-response/latest/libp2p_request_response/) crate, which provides a generic **"single request - stream of responses"** protocol, similar to [gRPC's server streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#server-streaming-rpc). + +# Feature comparison with request/response + +| | p2p-stream | libp2p-request-response | +| ----------- | ----------- | ----------- | +| libp2p compatibility | [≥ libp2p-v0.53.2](https://github.com/libp2p/rust-libp2p/releases/tag/libp2p-v0.53.2) | ✔ | +| sending request opens new libp2p stream | ✔ | ✔ | +| sending request | `Behavior::send_request` | `Behavior::send_request` | +| receiving request | `InboundRequest` event | `Message::Request` in `Message` event | +| sending response(s) | into a channel obtained from `InboundRequest` event | call `Behaviour` method after receiving `Message::Request` event | +| receiving response(s) | from a channel obtained from `OutboundRequestSentAwaitingResponses` event | `Message::Response` in `Message` event | +| number of responses per request | ≥ 0 | 1 | +| user defined R & W protocol codec | ✔ | ✔ | +| response codec should delimit messages | ✔ | n/a | +| partial protocol support
(ie. only upstream or downstream) | * | ✔ | +| out of the box cbor and json codecs | * | ✔ | + +
+ +*): [`pathfinder`](https://github.com/eqlabs/pathfinder) uses this crate with its own [Starknet](https://www.starknet.io/) specific [protocol](https://github.com/starknet-io/starknet-p2p-specs) and decided to drop unnecessary features + +# Acknowledgements + +Thanks to the [rust-libp2p contributors](https://github.com/libp2p/rust-libp2p/graphs/contributors) and [Parity Technologies](https://www.parity.io/) for making [`rust-libp2p`](https://github.com/libp2p/rust-libp2p) possible. + +# FAQ + +1. Q: I'd like to see the scope of changes compared to the original crate.
+ A: Please diff with [`libp2p-v0.53.2`](https://github.com/libp2p/rust-libp2p/tree/libp2p-v0.53.2) diff --git a/crates/madara/client/p2p_stream/src/codec.rs b/crates/madara/client/p2p_stream/src/codec.rs new file mode 100644 index 000000000..c6387d65f --- /dev/null +++ b/crates/madara/client/p2p_stream/src/codec.rs @@ -0,0 +1,65 @@ +// Equilibrium Labs: This work is an extension of libp2p's request-response +// protocol, hence the original copyright notice is included below. +// +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use std::io; + +use async_trait::async_trait; +use futures::prelude::*; + +/// A `Codec` defines the request and response types +/// for a request/streaming-response [`Behaviour`](crate::Behaviour) protocol or +/// protocol family and how they are encoded / decoded on an I/O stream. +#[async_trait] +pub trait Codec { + /// The type of protocol(s) or protocol versions being negotiated. + type Protocol: AsRef + Send + Clone; + /// The type of inbound and outbound requests. + type Request: Send; + /// The type of inbound and outbound responses. + type Response: Send; + + /// Reads a request from the given I/O stream according to the + /// negotiated protocol. + async fn read_request(&mut self, protocol: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send; + + /// Reads a response from the given I/O stream according to the + /// negotiated protocol. + async fn read_response(&mut self, protocol: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send; + + /// Writes a request to the given I/O stream according to the + /// negotiated protocol. + async fn write_request(&mut self, protocol: &Self::Protocol, io: &mut T, req: Self::Request) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send; + + /// Writes a response to the given I/O stream according to the + /// negotiated protocol. + async fn write_response(&mut self, protocol: &Self::Protocol, io: &mut T, res: Self::Response) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send; +} diff --git a/crates/madara/client/p2p_stream/src/handler.rs b/crates/madara/client/p2p_stream/src/handler.rs new file mode 100644 index 000000000..b282e95e2 --- /dev/null +++ b/crates/madara/client/p2p_stream/src/handler.rs @@ -0,0 +1,469 @@ +// Equilibrium Labs: This work is an extension of libp2p's request-response +// protocol, hence the original copyright notice is included below. +// +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +pub(crate) mod protocol; + +use std::collections::VecDeque; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; +use std::{fmt, io}; + +use futures::channel::mpsc; +use futures::prelude::*; +use libp2p::swarm::handler::{ + ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError, FullyNegotiatedInbound, + FullyNegotiatedOutbound, ListenUpgradeError, StreamUpgradeError, +}; +use libp2p::swarm::SubstreamProtocol; + +use crate::codec::Codec; +use crate::handler::protocol::Protocol; +use crate::{InboundRequestId, OutboundRequestId, EMPTY_QUEUE_SHRINK_THRESHOLD}; + +/// A connection handler for a request/streaming-response +/// [`Behaviour`](super::Behaviour) protocol. +pub struct Handler +where + TCodec: Codec, +{ + /// The supported inbound protocols. + inbound_protocols: Vec, + /// The request/streaming-response message codec. + codec: TCodec, + /// Queue of events to emit in `poll()`. + pending_events: VecDeque>, + /// Outbound upgrades waiting to be emitted as an + /// `OutboundSubstreamRequest`. + pending_outbound: VecDeque>, + + requested_outbound: VecDeque>, + /// A channel for receiving inbound requests. + inbound_receiver: mpsc::Receiver<(InboundRequestId, TCodec::Request, mpsc::Sender)>, + /// The [`mpsc::Sender`] for the above receiver. Cloned for each inbound + /// request. + inbound_sender: mpsc::Sender<(InboundRequestId, TCodec::Request, mpsc::Sender)>, + /// A channel for signalling that an outbound request has been sent. Cloned + /// for each outbound request. + outbound_sender: mpsc::Sender<(OutboundRequestId, mpsc::Receiver>)>, + /// The [`mpsc::Receiver`] for the above sender. + outbound_receiver: mpsc::Receiver<(OutboundRequestId, mpsc::Receiver>)>, + + inbound_request_id: Arc, + + worker_streams: futures_bounded::FuturesMap, io::Error>>, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +enum RequestId { + Inbound(InboundRequestId), + Outbound(OutboundRequestId), +} + +impl Handler +where + TCodec: Codec + Send + Clone + 'static, +{ + pub(super) fn new( + inbound_protocols: Vec, + codec: TCodec, + substream_timeout: Duration, + inbound_request_id: Arc, + max_concurrent_streams: usize, + ) -> Self { + let (inbound_sender, inbound_receiver) = mpsc::channel(0); + let (outbound_sender, outbound_receiver) = mpsc::channel(0); + Self { + inbound_protocols, + codec, + pending_outbound: VecDeque::new(), + requested_outbound: Default::default(), + inbound_receiver, + inbound_sender, + outbound_sender, + outbound_receiver, + pending_events: VecDeque::new(), + inbound_request_id, + worker_streams: futures_bounded::FuturesMap::new(substream_timeout, max_concurrent_streams), + } + } + + /// Returns the next inbound request ID. + fn next_inbound_request_id(&mut self) -> InboundRequestId { + InboundRequestId(self.inbound_request_id.fetch_add(1, Ordering::Relaxed)) + } + + fn on_fully_negotiated_inbound( + &mut self, + FullyNegotiatedInbound { protocol: (mut stream, protocol), info: () }: FullyNegotiatedInbound< + ::InboundProtocol, + ::InboundOpenInfo, + >, + ) { + let mut codec = self.codec.clone(); + let request_id = self.next_inbound_request_id(); + let mut sender = self.inbound_sender.clone(); + + let recv_request_then_fwd_outgoing_responses = async move { + let (rs_send, mut rs_recv) = mpsc::channel(0); + + let read = codec.read_request(&protocol, &mut stream); + let request = read.await?; + + sender + .send((request_id, request, rs_send)) + .await + .expect("`ConnectionHandler` owns both ends of the channel"); + drop(sender); + + // Keep on forwarding until the channel is closed + while let Some(response) = rs_recv.next().await { + let write = codec.write_response(&protocol, &mut stream, response); + write.await?; + } + + stream.close().await?; + + Ok(Event::OutboundResponseStreamClosed(request_id)) + }; + + if self + .worker_streams + .try_push(RequestId::Inbound(request_id), recv_request_then_fwd_outgoing_responses.boxed()) + .is_err() + { + tracing::warn!("Dropping inbound stream because we are at capacity") + } + } + + fn on_fully_negotiated_outbound( + &mut self, + FullyNegotiatedOutbound { protocol: (mut stream, protocol), info: () }: FullyNegotiatedOutbound< + ::OutboundProtocol, + ::OutboundOpenInfo, + >, + ) { + let message = self.requested_outbound.pop_front().expect("negotiated a stream without a pending message"); + + let mut codec = self.codec.clone(); + let request_id = message.request_id; + + let (mut rs_send, rs_recv) = mpsc::channel(0); + + let mut sender = self.outbound_sender.clone(); + + let send_req_then_fwd_incoming_responses = async move { + let write = codec.write_request(&protocol, &mut stream, message.request); + write.await?; + + stream.close().await?; + + sender.send((request_id, rs_recv)).await.expect("`ConnectionHandler` owns both ends of the channel"); + drop(sender); + + // Keep on forwarding until the channel is closed or error occurs + loop { + match codec.read_response(&protocol, &mut stream).await { + Ok(response) => { + rs_send + .send(Ok(response)) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + } + // The stream is closed, there's nothing more to receive + Err(error) if error.kind() == io::ErrorKind::UnexpectedEof => break, + // An error occurred, propagate it + Err(error) => { + let error_clone = io::Error::new(error.kind(), error.to_string()); + rs_send + .send(Err(error_clone)) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; + return Err(error); + } + } + } + + Ok(Event::InboundResponseStreamClosed(request_id)) + }; + + if self + .worker_streams + .try_push(RequestId::Outbound(request_id), send_req_then_fwd_incoming_responses.boxed()) + .is_err() + { + tracing::warn!("Dropping outbound stream because we are at capacity") + } + } + + fn on_dial_upgrade_error( + &mut self, + DialUpgradeError { error, info: () }: DialUpgradeError< + ::OutboundOpenInfo, + ::OutboundProtocol, + >, + ) { + let message = self.requested_outbound.pop_front().expect("negotiated a stream without a pending message"); + + match error { + StreamUpgradeError::Timeout => { + self.pending_events.push_back(Event::OutboundTimeout(message.request_id)); + } + StreamUpgradeError::NegotiationFailed => { + // The remote merely doesn't support the protocol(s) we requested. + // This is no reason to close the connection, which may + // successfully communicate with other protocols already. + // An event is reported to permit user code to react to the fact that + // the remote peer does not support the requested protocol(s). + self.pending_events.push_back(Event::OutboundUnsupportedProtocols(message.request_id)); + } + StreamUpgradeError::Apply(e) => unreachable!("{e:?}"), + StreamUpgradeError::Io(e) => { + tracing::debug!("outbound stream for request {} failed: {e}, retrying", message.request_id); + self.requested_outbound.push_back(message); + } + } + } + fn on_listen_upgrade_error( + &mut self, + ListenUpgradeError { error, .. }: ListenUpgradeError< + ::InboundOpenInfo, + ::InboundProtocol, + >, + ) { + unreachable!("{error:?}") + } +} + +/// The events emitted by the [`Handler`]. +pub enum Event +where + TCodec: Codec, +{ + /// A request has been received. + InboundRequest { + /// The ID of the request. + request_id: InboundRequestId, + /// The request message. + request: TCodec::Request, + /// The channel through which we are expected to send responses. + sender: mpsc::Sender, + }, + /// A request has been sent and we are awaiting responses. + OutboundRequestSentAwaitingResponses { + /// The ID of the outbound request. + request_id: OutboundRequestId, + /// The channel through which we can receive the responses. + receiver: mpsc::Receiver>, + }, + /// An outbound response stream to an inbound request was closed. + OutboundResponseStreamClosed(InboundRequestId), + /// An inbound response stream to an outbound request was closed. + InboundResponseStreamClosed(OutboundRequestId), + /// An outbound request timed out while sending the request + /// or waiting for the response. + OutboundTimeout(OutboundRequestId), + /// An outbound request failed to negotiate a mutually supported protocol. + OutboundUnsupportedProtocols(OutboundRequestId), + OutboundStreamFailed { + request_id: OutboundRequestId, + error: io::Error, + }, + /// An inbound request timed out while waiting for the request + /// or sending the response. + InboundTimeout(InboundRequestId), + InboundStreamFailed { + request_id: InboundRequestId, + error: io::Error, + }, +} + +impl fmt::Debug for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Event::InboundRequest { request_id, request: _, sender: _ } => { + f.debug_struct("Event::InboundRequest").field("request_id", request_id).finish() + } + Event::OutboundRequestSentAwaitingResponses { request_id, receiver: _ } => { + f.debug_struct("Event::OutboundRequestSentAwaitingResponses").field("request_id", request_id).finish() + } + Event::InboundResponseStreamClosed(request_id) => { + f.debug_struct("Event::InboundResponseStreamClosed").field("request_id", request_id).finish() + } + Event::OutboundResponseStreamClosed(request_id) => { + f.debug_struct("Event::OutboundResponseStreamClosed").field("request_id", request_id).finish() + } + Event::OutboundTimeout(request_id) => f.debug_tuple("Event::OutboundTimeout").field(request_id).finish(), + Event::OutboundUnsupportedProtocols(request_id) => { + f.debug_tuple("Event::OutboundUnsupportedProtocols").field(request_id).finish() + } + Event::OutboundStreamFailed { request_id, error } => f + .debug_struct("Event::OutboundStreamFailed") + .field("request_id", &request_id) + .field("error", &error) + .finish(), + Event::InboundTimeout(request_id) => f.debug_tuple("Event::InboundTimeout").field(request_id).finish(), + Event::InboundStreamFailed { request_id, error } => f + .debug_struct("Event::InboundStreamFailed") + .field("request_id", &request_id) + .field("error", &error) + .finish(), + } + } +} + +pub struct OutboundMessage { + pub(crate) request_id: OutboundRequestId, + pub(crate) request: TCodec::Request, + pub(crate) protocols: Vec, +} + +impl fmt::Debug for OutboundMessage +where + TCodec: Codec, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OutboundMessage").finish_non_exhaustive() + } +} + +impl ConnectionHandler for Handler +where + TCodec: Codec + Send + Clone + 'static, +{ + type FromBehaviour = OutboundMessage; + type ToBehaviour = Event; + type InboundProtocol = Protocol; + type OutboundProtocol = Protocol; + type OutboundOpenInfo = (); + type InboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(Protocol { protocols: self.inbound_protocols.clone() }, ()) + } + + fn on_behaviour_event(&mut self, request: Self::FromBehaviour) { + self.pending_outbound.push_back(request); + } + + #[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))] + fn poll( + &mut self, + cx: &mut Context<'_>, + ) -> Poll, (), Self::ToBehaviour>> { + match self.worker_streams.poll_unpin(cx) { + Poll::Ready((_, Ok(Ok(event)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } + Poll::Ready((RequestId::Inbound(id), Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::InboundStreamFailed { + request_id: id, + error: e, + })); + } + Poll::Ready((RequestId::Outbound(id), Ok(Err(e)))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::OutboundStreamFailed { + request_id: id, + error: e, + })); + } + Poll::Ready((RequestId::Inbound(id), Err(futures_bounded::Timeout { .. }))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::InboundTimeout(id))); + } + Poll::Ready((RequestId::Outbound(id), Err(futures_bounded::Timeout { .. }))) => { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::OutboundTimeout(id))); + } + Poll::Pending => {} + } + + // Drain pending events that were produced by `worker_streams`. + if let Some(event) = self.pending_events.pop_front() { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(event)); + } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_events.shrink_to_fit(); + } + + // Check for inbound requests. + if let Poll::Ready(Some((id, rq, rs_sender))) = self.inbound_receiver.poll_next_unpin(cx) { + // We received an inbound request. + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::InboundRequest { + request_id: id, + request: rq, + sender: rs_sender, + })); + } + + // Emit outbound requests. + if let Some(request) = self.pending_outbound.pop_front() { + let protocols = request.protocols.clone(); + self.requested_outbound.push_back(request); + + return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(Protocol { protocols }, ()), + }); + } + + // Check for readiness to receive inbound responses. + if let Poll::Ready(Some((id, rs_receiver))) = self.outbound_receiver.poll_next_unpin(cx) { + return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Event::OutboundRequestSentAwaitingResponses { + request_id: id, + receiver: rs_receiver, + })); + } + + debug_assert!(self.pending_outbound.is_empty()); + + if self.pending_outbound.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_outbound.shrink_to_fit(); + } + + Poll::Pending + } + + fn on_connection_event( + &mut self, + event: ConnectionEvent< + '_, + Self::InboundProtocol, + Self::OutboundProtocol, + Self::InboundOpenInfo, + Self::OutboundOpenInfo, + >, + ) { + match event { + ConnectionEvent::FullyNegotiatedInbound(fully_negotiated_inbound) => { + self.on_fully_negotiated_inbound(fully_negotiated_inbound) + } + ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => { + self.on_fully_negotiated_outbound(fully_negotiated_outbound) + } + ConnectionEvent::DialUpgradeError(dial_upgrade_error) => self.on_dial_upgrade_error(dial_upgrade_error), + ConnectionEvent::ListenUpgradeError(listen_upgrade_error) => { + self.on_listen_upgrade_error(listen_upgrade_error) + } + _ => {} + } + } +} diff --git a/crates/madara/client/p2p_stream/src/handler/protocol.rs b/crates/madara/client/p2p_stream/src/handler/protocol.rs new file mode 100644 index 000000000..67f68b503 --- /dev/null +++ b/crates/madara/client/p2p_stream/src/handler/protocol.rs @@ -0,0 +1,78 @@ +// Equilibrium Labs: This work is an extension of libp2p's request-response +// protocol, hence the original copyright notice is included below. +// +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! The definition of a request/streaming-response protocol via inbound +//! and outbound substream upgrades. The inbound upgrade receives a request +//! and allows for sending a series of responses, whereas the outbound upgrade +//! sends a request and allows for receivung several responses. + +use futures::future::{ready, Ready}; +use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::swarm::Stream; + +/// Response substream upgrade protocol. +/// +/// Receives a request and sends responses. +#[derive(Debug)] +pub struct Protocol

{ + pub(crate) protocols: Vec

, +} + +impl

UpgradeInfo for Protocol

+where + P: AsRef + Clone, +{ + type Info = P; + type InfoIter = std::vec::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.protocols.clone().into_iter() + } +} + +impl

InboundUpgrade for Protocol

+where + P: AsRef + Clone, +{ + type Output = (Stream, P); + type Error = (); + type Future = Ready>; + + fn upgrade_inbound(self, io: Stream, protocol: Self::Info) -> Self::Future { + ready(Ok((io, protocol))) + } +} + +impl

OutboundUpgrade for Protocol

+where + P: AsRef + Clone, +{ + type Output = (Stream, P); + type Error = (); + type Future = Ready>; + + fn upgrade_outbound(self, io: Stream, protocol: Self::Info) -> Self::Future { + ready(Ok((io, protocol))) + } +} diff --git a/crates/madara/client/p2p_stream/src/lib.rs b/crates/madara/client/p2p_stream/src/lib.rs new file mode 100644 index 000000000..a987dbcb2 --- /dev/null +++ b/crates/madara/client/p2p_stream/src/lib.rs @@ -0,0 +1,756 @@ +// Equilibrium Labs: This work is an extension of libp2p's request-response +// protocol, hence the original copyright notice is included below. +// +// +// Copyright 2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +//! Generic single-request/response-stream protocols, later referred to as +//! request/streaming-response. +//! +//! ## General Usage +//! +//! The [`Behaviour`] struct is a [`NetworkBehaviour`] that implements a generic +//! request/streaming-response protocol or protocol family, whereby each request +//! is sent over a new substream on a connection. `Behaviour` is generic +//! over the actual messages being sent, which are defined in terms of a +//! [`Codec`]. Creating a request/streaming-response protocol thus amounts +//! to providing an implementation of this trait which can then be +//! given to [`Behaviour::with_codec`]. Further configuration options are +//! available via the [`Config`]. +//! +//! Outbound requests are sent using [`Behaviour::send_request`] and the +//! responses received via +//! [`Event::OutboundRequestSentAwaitingResponses::channel`]. +//! +//! Inbound requests are received via [`Event::InboundRequest`] and responses +//! are sent via [`Event::InboundRequest::channel`]. +//! +//! ## Protocol Families +//! +//! A single [`Behaviour`] instance can be used with an entire +//! protocol family that share the same request and response types. +//! For that purpose, [`Codec::Protocol`] is typically +//! instantiated with a sum type. + +mod codec; +mod handler; + +use std::collections::{HashMap, HashSet, VecDeque}; +use std::sync::atomic::AtomicU64; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; +use std::{fmt, io}; + +pub use codec::Codec; +use futures::channel::mpsc; +use handler::Handler; +use libp2p::core::transport::PortUse; +use libp2p::core::{ConnectedPoint, Endpoint, Multiaddr}; +use libp2p::identity::PeerId; +use libp2p::swarm::behaviour::{AddressChange, ConnectionClosed, DialFailure, FromSwarm}; +use libp2p::swarm::dial_opts::DialOpts; +use libp2p::swarm::{ + ConnectionDenied, ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, THandler, THandlerInEvent, + THandlerOutEvent, ToSwarm, +}; + +use crate::handler::OutboundMessage; + +/// The events emitted by a request/streaming-response [`Behaviour`]. +#[derive(Debug)] +pub enum Event { + /// An incoming request from another peer. + InboundRequest { + /// The peer who sent the request. + peer: PeerId, + /// The ID of the request. + request_id: InboundRequestId, + /// The request message. + request: TRequest, + /// The channel through which we are expected to send responses. + channel: mpsc::Sender, + }, + /// Outbound request to another peer was accepted and we can now await + /// responses. + OutboundRequestSentAwaitingResponses { + /// The peer who received our request. + peer: PeerId, + /// The ID of the outbound request. + request_id: OutboundRequestId, + /// The channel through which we can receive the responses. + channel: mpsc::Receiver>, + }, + /// An outbound request failed. + OutboundFailure { + /// The peer to whom the request was sent. + peer: PeerId, + /// The (local) ID of the failed request. + request_id: OutboundRequestId, + /// The error that occurred. + error: OutboundFailure, + }, + /// An inbound request failed. + InboundFailure { + /// The peer from whom the request was received. + peer: PeerId, + /// The ID of the failed inbound request. + request_id: InboundRequestId, + /// The error that occurred. + error: InboundFailure, + }, + OutboundResponseStreamClosed { + /// The peer to whom the responses were sent. + peer: PeerId, + /// The ID of the inbound request to which responses were sent. + request_id: InboundRequestId, + }, + InboundResponseStreamClosed { + /// The peer from whom the responses were received. + peer: PeerId, + /// The ID of the outbound request to which responses were received. + request_id: OutboundRequestId, + }, +} + +/// Possible failures occurring in the context of sending +/// an outbound request and receiving the response. +#[derive(Debug)] +pub enum OutboundFailure { + /// The request could not be sent because a dialing attempt failed. + DialFailure, + /// The request timed out before a response was received. + /// + /// It is not known whether the request may have been + /// received (and processed) by the remote peer. + Timeout, + /// The connection closed before a response was received. + /// + /// It is not known whether the request may have been + /// received (and processed) by the remote peer. + ConnectionClosed, + /// The remote supports none of the requested protocols. + UnsupportedProtocols, + /// An IO failure happened on an outbound stream. + Io(io::Error), +} + +impl fmt::Display for OutboundFailure { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + OutboundFailure::DialFailure => write!(f, "Failed to dial the requested peer"), + OutboundFailure::Timeout => write!(f, "Timeout while waiting for a response"), + OutboundFailure::ConnectionClosed => { + write!(f, "Connection was closed before a response was received") + } + OutboundFailure::UnsupportedProtocols => { + write!(f, "The remote supports none of the requested protocols") + } + OutboundFailure::Io(e) => write!(f, "IO error on outbound stream: {e}"), + } + } +} + +impl std::error::Error for OutboundFailure {} + +/// Possible failures occurring in the context of receiving an +/// inbound request and sending a response. +#[derive(Debug)] +pub enum InboundFailure { + /// The inbound request timed out, either while reading the + /// incoming request or before a response is sent, e.g. if + /// `Event::InboundRequest::channel::send` is not called in a + /// timely manner. + Timeout, + /// The connection closed before a response could be send. + ConnectionClosed, + /// An IO failure happened on an inbound stream. + Io(io::Error), +} + +impl fmt::Display for InboundFailure { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + InboundFailure::Timeout => { + write!(f, "Timeout while receiving request or sending response") + } + InboundFailure::ConnectionClosed => { + write!(f, "Connection was closed before a response could be sent") + } + InboundFailure::Io(e) => write!(f, "IO error on inbound stream: {e}"), + } + } +} + +impl std::error::Error for InboundFailure {} + +/// The ID of an inbound request. +/// +/// Note: [`InboundRequestId`]'s uniqueness is only guaranteed between +/// inbound requests of the same originating [`Behaviour`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct InboundRequestId(u64); + +impl fmt::Display for InboundRequestId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// The ID of an outbound request. +/// +/// Note: [`OutboundRequestId`]'s uniqueness is only guaranteed between +/// outbound requests of the same originating [`Behaviour`]. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +pub struct OutboundRequestId(u64); + +impl fmt::Display for OutboundRequestId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +/// The configuration for a `Behaviour` protocol. +#[derive(Debug, Clone, Copy)] +pub struct Config { + request_timeout: Duration, + max_concurrent_streams: usize, +} + +impl Default for Config { + fn default() -> Self { + Self { request_timeout: Duration::from_secs(60), max_concurrent_streams: 10000 } + } +} + +impl Config { + /// Sets the timeout for inbound and outbound requests. + pub fn request_timeout(mut self, v: Duration) -> Self { + self.request_timeout = v; + self + } + + /// Sets the upper bound for the number of concurrent inbound + outbound + /// streams. + pub fn max_concurrent_streams(mut self, num_streams: usize) -> Self { + self.max_concurrent_streams = num_streams; + self + } +} + +/// A request/streaming-response protocol for some message codec. +#[allow(clippy::type_complexity)] +pub struct Behaviour +where + TCodec: Codec + Clone + Send + 'static, +{ + /// The supported protocols. + protocols: Vec, + /// The next (local) request ID. + next_outbound_request_id: OutboundRequestId, + /// The next (inbound) request ID. + next_inbound_request_id: Arc, + /// The protocol configuration. + config: Config, + /// The protocol codec for reading and writing requests and responses. + codec: TCodec, + /// Pending events to return from `poll`. + pending_events: VecDeque, OutboundMessage>>, + /// The currently connected peers, their pending outbound and inbound + /// responses and their known, reachable addresses, if any. + connected: HashMap>, + /// Requests that have not yet been sent and are waiting for a connection + /// to be established. + pending_outbound_requests: HashMap>>, +} + +impl Behaviour +where + TCodec: Codec + Default + Clone + Send + 'static, +{ + /// Creates a new `Behaviour` for the given configuration, + /// using [`Default`] to construct the codec and the protocol. + pub fn new(cfg: Config) -> Self + where + TCodec::Protocol: Default, + { + Self::with_codec_and_protocols(TCodec::default(), std::iter::once(TCodec::Protocol::default()), cfg) + } +} + +impl Behaviour +where + TCodec: Codec + Clone + Send + 'static, +{ + /// Creates a new `Behaviour` with a default protocol name for the given + /// codec and configuration. + pub fn with_codec(codec: TCodec, cfg: Config) -> Self + where + TCodec::Protocol: Default, + { + Self::with_codec_and_protocols(codec, std::iter::once(TCodec::Protocol::default()), cfg) + } + + /// Creates a new `Behaviour` for the given + /// protocols, codec and configuration. + pub fn with_codec_and_protocols(codec: TCodec, protocols: I, cfg: Config) -> Self + where + I: IntoIterator, + { + Behaviour { + protocols: protocols.into_iter().collect(), + next_outbound_request_id: OutboundRequestId(1), + next_inbound_request_id: Arc::new(AtomicU64::new(1)), + config: cfg, + codec, + pending_events: VecDeque::new(), + connected: HashMap::new(), + pending_outbound_requests: HashMap::new(), + } + } + + /// Initiates sending a request. + /// + /// If the targeted peer is currently not connected, a dialing + /// attempt is initiated and the request is sent as soon as a + /// connection is established. + /// + /// > **Note**: In order for such a dialing attempt to succeed, + /// > the `RequestResponse` protocol must be embedded + /// > in another `NetworkBehaviour` that provides peer and + /// > address discovery. + pub fn send_request(&mut self, peer: &PeerId, request: TCodec::Request) -> OutboundRequestId { + let request_id = self.next_outbound_request_id(); + + let request = OutboundMessage { request_id, request, protocols: self.protocols.clone() }; + + if let Some(request) = self.try_send_request(peer, request) { + self.pending_events.push_back(ToSwarm::Dial { opts: DialOpts::peer_id(*peer).build() }); + + self.pending_outbound_requests.entry(*peer).or_default().push(request); + } + + request_id + } + + /// Checks whether a peer is currently connected. + pub fn is_connected(&self, peer: &PeerId) -> bool { + if let Some(connections) = self.connected.get(peer) { + !connections.is_empty() + } else { + false + } + } + + /// Returns the next outbound request ID. + fn next_outbound_request_id(&mut self) -> OutboundRequestId { + let request_id = self.next_outbound_request_id; + self.next_outbound_request_id.0 += 1; + request_id + } + + /// Tries to send a request by queueing an appropriate event to be + /// emitted to the `Swarm`. If the peer is not currently connected, + /// the given request is return unchanged. + fn try_send_request(&mut self, peer: &PeerId, request: OutboundMessage) -> Option> { + if let Some(connections) = self.connected.get_mut(peer) { + if connections.is_empty() { + return Some(request); + } + let ix = (request.request_id.0 as usize) % connections.len(); + let conn = &mut connections[ix]; + conn.pending_outbound_response_streams.insert(request.request_id); + self.pending_events.push_back(ToSwarm::NotifyHandler { + peer_id: *peer, + handler: NotifyHandler::One(conn.id), + event: request, + }); + None + } else { + Some(request) + } + } + + /// Remove pending outbound response stream for the given peer and + /// connection. + /// + /// Returns `true` if the provided connection to the given peer is still + /// alive and the [`OutboundRequestId`] was previously present and is now + /// removed. Returns `false` otherwise. + fn remove_pending_outbound_response_stream( + &mut self, + peer: &PeerId, + connection: ConnectionId, + request: OutboundRequestId, + ) -> bool { + self.get_connection_mut(peer, connection) + .map(|c| c.pending_outbound_response_streams.remove(&request)) + .unwrap_or(false) + } + + /// Remove pending inbound response stream for the given peer and + /// connection. + /// + /// Returns `true` if the provided connection to the given peer is still + /// alive and the [`InboundRequestId`] was previously present and is now + /// removed. Returns `false` otherwise. + fn remove_pending_inbound_response_stream( + &mut self, + peer: &PeerId, + connection: ConnectionId, + request: InboundRequestId, + ) -> bool { + self.get_connection_mut(peer, connection) + .map(|c| c.pending_inbound_response_streams.remove(&request)) + .unwrap_or(false) + } + + /// Returns a mutable reference to the connection in `self.connected` + /// corresponding to the given [`PeerId`] and [`ConnectionId`]. + fn get_connection_mut(&mut self, peer: &PeerId, connection: ConnectionId) -> Option<&mut Connection> { + self.connected.get_mut(peer).and_then(|connections| connections.iter_mut().find(|c| c.id == connection)) + } + + fn on_address_change(&mut self, AddressChange { peer_id, connection_id, new, .. }: AddressChange<'_>) { + let new_address = match new { + ConnectedPoint::Dialer { address, .. } => Some(address.clone()), + ConnectedPoint::Listener { .. } => None, + }; + let connections = + self.connected.get_mut(&peer_id).expect("Address change can only happen on an established connection."); + + let connection = connections + .iter_mut() + .find(|c| c.id == connection_id) + .expect("Address change can only happen on an established connection."); + connection.remote_address = new_address; + } + + fn on_connection_closed( + &mut self, + ConnectionClosed { peer_id, connection_id, remaining_established, .. }: ConnectionClosed<'_>, + ) { + let connections = + self.connected.get_mut(&peer_id).expect("Expected some established connection to peer before closing."); + + let connection = connections + .iter() + .position(|c| c.id == connection_id) + .map(|p: usize| connections.remove(p)) + .expect("Expected connection to be established before closing."); + + debug_assert_eq!(connections.is_empty(), remaining_established == 0); + if connections.is_empty() { + self.connected.remove(&peer_id); + } + + for request_id in connection.pending_inbound_response_streams { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer: peer_id, + request_id, + error: InboundFailure::ConnectionClosed, + })); + } + + for request_id in connection.pending_outbound_response_streams { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer: peer_id, + request_id, + error: OutboundFailure::ConnectionClosed, + })); + } + } + + fn on_dial_failure(&mut self, DialFailure { peer_id, .. }: DialFailure<'_>) { + if let Some(peer) = peer_id { + // If there are pending outgoing requests when a dial failure occurs, + // it is implied that we are not connected to the peer, since pending + // outgoing requests are drained when a connection is established and + // only created when a peer is not connected when a request is made. + // Thus these requests must be considered failed, even if there is + // another, concurrent dialing attempt ongoing. + if let Some(pending) = self.pending_outbound_requests.remove(&peer) { + for request in pending { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id: request.request_id, + error: OutboundFailure::DialFailure, + })); + } + } + } + } + + /// Preloads a new [`Handler`] with requests that are waiting to be sent to + /// the newly connected peer. + fn preload_new_handler( + &mut self, + handler: &mut Handler, + peer: PeerId, + connection_id: ConnectionId, + remote_address: Option, + ) { + let mut connection = Connection::new(connection_id, remote_address); + + if let Some(pending_requests) = self.pending_outbound_requests.remove(&peer) { + for request in pending_requests { + connection.pending_outbound_response_streams.insert(request.request_id); + handler.on_behaviour_event(request); + } + } + + self.connected.entry(peer).or_default().push(connection); + } +} + +impl NetworkBehaviour for Behaviour +where + TCodec: Codec + Send + Clone + 'static, +{ + type ConnectionHandler = Handler; + type ToSwarm = Event; + + fn handle_established_inbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + _: &Multiaddr, + _: &Multiaddr, + ) -> Result, ConnectionDenied> { + let mut handler = Handler::new( + self.protocols.clone(), + self.codec.clone(), + self.config.request_timeout, + self.next_inbound_request_id.clone(), + self.config.max_concurrent_streams, + ); + + self.preload_new_handler(&mut handler, peer, connection_id, None); + + Ok(handler) + } + + fn handle_pending_outbound_connection( + &mut self, + _connection_id: ConnectionId, + maybe_peer: Option, + _addresses: &[Multiaddr], + _effective_role: Endpoint, + ) -> Result, ConnectionDenied> { + let peer = match maybe_peer { + None => return Ok(vec![]), + Some(peer) => peer, + }; + + let mut addresses = Vec::new(); + if let Some(connections) = self.connected.get(&peer) { + addresses.extend(connections.iter().filter_map(|c| c.remote_address.clone())) + } + + Ok(addresses) + } + + fn handle_established_outbound_connection( + &mut self, + connection_id: ConnectionId, + peer: PeerId, + remote_address: &Multiaddr, + _: Endpoint, + _: PortUse, + ) -> Result, ConnectionDenied> { + let mut handler = Handler::new( + self.protocols.clone(), + self.codec.clone(), + self.config.request_timeout, + self.next_inbound_request_id.clone(), + self.config.max_concurrent_streams, + ); + + self.preload_new_handler(&mut handler, peer, connection_id, Some(remote_address.clone())); + + Ok(handler) + } + + fn on_swarm_event(&mut self, event: FromSwarm<'_>) { + match event { + FromSwarm::ConnectionEstablished(_) => {} + FromSwarm::ConnectionClosed(connection_closed) => self.on_connection_closed(connection_closed), + FromSwarm::AddressChange(address_change) => self.on_address_change(address_change), + FromSwarm::DialFailure(dial_failure) => self.on_dial_failure(dial_failure), + _ => {} + } + } + + fn on_connection_handler_event(&mut self, peer: PeerId, connection: ConnectionId, event: THandlerOutEvent) { + match event { + handler::Event::OutboundRequestSentAwaitingResponses { request_id, receiver } => { + let removed = self.remove_pending_outbound_response_stream(&peer, connection, request_id); + debug_assert!(removed, "Expect request_id to be pending before getting the response channel.",); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundRequestSentAwaitingResponses { + peer, + request_id, + channel: receiver, + })); + } + handler::Event::InboundRequest { request_id, request, sender } => { + match self.get_connection_mut(&peer, connection) { + Some(connection) => { + let inserted = connection.pending_inbound_response_streams.insert(request_id); + debug_assert!(inserted, "Expect id of new request to be unknown."); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::InboundRequest { + peer, + request_id, + request, + channel: sender, + })) + } + None => { + tracing::debug!( + "Connection ({connection}) closed after `Event::Request` ({request_id}) \ + has been emitted." + ); + } + } + } + handler::Event::OutboundResponseStreamClosed(request_id) => { + let removed = self.remove_pending_inbound_response_stream(&peer, connection, request_id); + + debug_assert!(removed, "Expect request_id to be pending before response is sent."); + + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::OutboundResponseStreamClosed { peer, request_id })); + } + handler::Event::InboundResponseStreamClosed(request_id) => { + let removed = self.remove_pending_outbound_response_stream(&peer, connection, request_id); + + debug_assert!( + !removed, + "Expect request_id to have been removed from pending because the response \ + channel has already been available." + ); + + self.pending_events + .push_back(ToSwarm::GenerateEvent(Event::InboundResponseStreamClosed { peer, request_id })); + } + handler::Event::OutboundTimeout(request_id) => { + self.remove_pending_outbound_response_stream(&peer, connection, request_id); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id, + error: OutboundFailure::Timeout, + })); + } + handler::Event::OutboundUnsupportedProtocols(request_id) => { + let removed = self.remove_pending_outbound_response_stream(&peer, connection, request_id); + debug_assert!(removed, "Expect request_id to be pending before failing to connect.",); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id, + error: OutboundFailure::UnsupportedProtocols, + })); + } + handler::Event::OutboundStreamFailed { request_id, error } => { + self.remove_pending_outbound_response_stream(&peer, connection, request_id); + + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::OutboundFailure { + peer, + request_id, + error: OutboundFailure::Io(error), + })) + } + handler::Event::InboundTimeout(request_id) => { + let removed = self.remove_pending_inbound_response_stream(&peer, connection, request_id); + + if removed { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer, + request_id, + error: InboundFailure::Timeout, + })); + } else { + // This happens when timeout is emitted before `read_request` finishes. + tracing::debug!("Inbound request timeout for an unknown request_id ({request_id})"); + } + } + handler::Event::InboundStreamFailed { request_id, error } => { + let removed = self.remove_pending_inbound_response_stream(&peer, connection, request_id); + + if removed { + self.pending_events.push_back(ToSwarm::GenerateEvent(Event::InboundFailure { + peer, + request_id, + error: InboundFailure::Io(error), + })); + } else { + // This happens when `read_request` fails. + tracing::debug!( + "Inbound failure is reported for an unknown request_id ({request_id}): \ + {error}" + ); + } + } + } + } + + fn poll(&mut self, _: &mut Context<'_>) -> Poll>> { + if let Some(ev) = self.pending_events.pop_front() { + return Poll::Ready(ev); + } else if self.pending_events.capacity() > EMPTY_QUEUE_SHRINK_THRESHOLD { + self.pending_events.shrink_to_fit(); + } + + Poll::Pending + } +} + +/// Internal threshold for when to shrink the capacity +/// of empty queues. If the capacity of an empty queue +/// exceeds this threshold, the associated memory is +/// released. +const EMPTY_QUEUE_SHRINK_THRESHOLD: usize = 100; + +/// Internal information tracked for an established connection. +struct Connection { + id: ConnectionId, + remote_address: Option, + /// Pending outbound responses where corresponding inbound requests have + /// been received on this connection and emitted via `poll` but have not yet + /// been answered. + pending_outbound_response_streams: HashSet, + /// Pending inbound responses for previously sent requests on this + /// connection. + pending_inbound_response_streams: HashSet, +} + +impl Connection { + fn new(id: ConnectionId, remote_address: Option) -> Self { + Self { + id, + remote_address, + pending_outbound_response_streams: Default::default(), + pending_inbound_response_streams: Default::default(), + } + } +} diff --git a/crates/madara/client/p2p_stream/tests/error_reporting.rs b/crates/madara/client/p2p_stream/tests/error_reporting.rs new file mode 100644 index 000000000..23a578970 --- /dev/null +++ b/crates/madara/client/p2p_stream/tests/error_reporting.rs @@ -0,0 +1,356 @@ +use std::io; +use std::time::Duration; + +use futures::prelude::*; +use libp2p_swarm_test::SwarmExt; +use p2p_stream::{InboundFailure, OutboundFailure}; + +pub mod utils; + +use utils::{ + new_swarm, new_swarm_with_timeout, wait_inbound_failure, wait_inbound_request, wait_inbound_response_stream_closed, + wait_no_events, wait_outbound_failure, wait_outbound_request_sent_awaiting_responses, Action, +}; + +#[tokio::test] +async fn report_outbound_failure_on_read_response_failure() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, mut resp_channel) = wait_inbound_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::FailOnReadResponse); + + resp_channel.send(Action::FailOnReadResponse).await.unwrap(); + + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead Wait for swarm2 disconnecting + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, InboundFailure::ConnectionClosed)); + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::FailOnReadResponse); + + let (peer, req_id_done, mut resp_channel) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!( + matches!(resp_channel.next().await, Some(Err(x)) if x.kind() == io::ErrorKind::Other && x.to_string() == "FailOnReadResponse") + ); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + OutboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!(error.into_inner().unwrap().to_string(), "FailOnReadResponse"); + }; + + // Make sure both run to completion + tokio::join!(server_task, client_task); +} + +#[tokio::test] +async fn report_outbound_failure_on_write_request_failure() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (_peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::Request` is produced after `read_request`. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead. + let server_task = async move { + wait_no_events(&mut swarm1).await; + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::FailOnWriteRequest); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + OutboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!(error.into_inner().unwrap().to_string(), "FailOnWriteRequest"); + }; + + // Server should always "outrun" the client + tokio::spawn(server_task); + + // Make sure client runs to completion + client_task.await; +} + +#[tokio::test] +async fn report_outbound_timeout_on_read_response_timeout() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + // `swarm1` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); + let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, mut resp_tx) = wait_inbound_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::TimeoutOnReadResponse); + + resp_tx.send(Action::TimeoutOnReadResponse).await.unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, InboundFailure::ConnectionClosed)); + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::TimeoutOnReadResponse); + + let (peer, req_id_done, mut resp_rx) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_rx.next().await.is_none()); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, OutboundFailure::Timeout)); + }; + + // Make sure both run to completion + tokio::join!(server_task, client_task); +} + +#[tokio::test] +async fn report_inbound_closure_on_read_request_failure() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (_peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::IncomingRequest` is produced after + // `read_request`. Keep the connection alive, otherwise swarm2 may receive + // `ConnectionClosed` instead. + let server_task = async move { + wait_no_events(&mut swarm1).await; + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::FailOnReadRequest); + + let (peer, req_id_done, mut resp_rx) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_rx.next().await.is_none()); + + let (peer, req_id_done) = wait_inbound_response_stream_closed(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + }; + + // Server should always "outrun" the client + tokio::spawn(server_task); + + // Make sure client runs to completion + client_task.await; +} + +#[tokio::test] +async fn report_inbound_failure_on_write_response_failure() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let (peer1_id, mut swarm1) = new_swarm(); + let (peer2_id, mut swarm2) = new_swarm(); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, mut resp_tx) = wait_inbound_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::FailOnWriteResponse); + + resp_tx.send(Action::FailOnWriteResponse).await.unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + + let error = match error { + InboundFailure::Io(e) => e, + e => panic!("Unexpected error: {e:?}"), + }; + + assert_eq!(error.kind(), io::ErrorKind::Other); + assert_eq!(error.into_inner().unwrap().to_string(), "FailOnWriteResponse"); + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::FailOnWriteResponse); + + let (peer, req_id_done, mut resp_rx) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_rx.next().await.is_none()); + + // We cannot know if writing response failed or there was no response written at + // all. + wait_inbound_response_stream_closed(&mut swarm2).await.unwrap(); + }; + + // Make sure both run to completion + tokio::join!(client_task, server_task); +} + +#[tokio::test] +async fn report_inbound_timeout_on_write_response_timeout() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + // `swarm2` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(100)); + let (peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(200)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + let (peer, req_id, action, mut resp_channel) = wait_inbound_request(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(action, Action::TimeoutOnWriteResponse); + + resp_channel.send(Action::TimeoutOnWriteResponse).await.unwrap(); + + let (peer, req_id_done, error) = wait_inbound_failure(&mut swarm1).await.unwrap(); + assert_eq!(peer, peer2_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, InboundFailure::Timeout)); + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::TimeoutOnWriteResponse); + + let (peer, req_id_done, mut resp_channel) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_channel.next().await.is_none()); + + let (peer, req_id_done) = wait_inbound_response_stream_closed(&mut swarm2).await.unwrap(); + + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + }; + + // Make sure both run to completion + tokio::join!(client_task, server_task); +} + +#[tokio::test] +async fn report_outbound_timeout_on_write_request_timeout() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + // `swarm1` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); + let (_peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + // Expects no events because `Event::Request` is produced after `read_request`. + // Keep the connection alive, otherwise swarm2 may receive `ConnectionClosed` + // instead. + let server_task = async move { + wait_no_events(&mut swarm1).await; + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::TimeoutOnWriteRequest); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(matches!(error, OutboundFailure::Timeout)); + }; + + // Server should always "outrun" the client + tokio::spawn(server_task); + + // Make sure client runs to completion + client_task.await; +} + +#[tokio::test] +async fn report_outbound_timeout_on_read_request_timeout() { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + // `swarm2` needs to have a bigger timeout to avoid racing + let (peer1_id, mut swarm1) = new_swarm_with_timeout(Duration::from_millis(200)); + let (_peer2_id, mut swarm2) = new_swarm_with_timeout(Duration::from_millis(100)); + + swarm1.listen().with_memory_addr_external().await; + swarm2.connect(&mut swarm1).await; + + let server_task = async move { + wait_no_events(&mut swarm1).await; + }; + + let client_task = async move { + let req_id = swarm2.behaviour_mut().send_request(&peer1_id, Action::TimeoutOnReadRequest); + + let (peer, req_id_done, mut resp_channel) = + wait_outbound_request_sent_awaiting_responses(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + + assert!(resp_channel.next().await.is_none()); + + let (peer, req_id_done, error) = wait_outbound_failure(&mut swarm2).await.unwrap(); + assert_eq!(peer, peer1_id); + assert_eq!(req_id_done, req_id); + assert!(matches!(error, OutboundFailure::Timeout)); + }; + + // Server should always "outrun" the client + tokio::spawn(server_task); + + // Make sure client runs to completion + client_task.await; +} diff --git a/crates/madara/client/p2p_stream/tests/sanity.rs b/crates/madara/client/p2p_stream/tests/sanity.rs new file mode 100644 index 000000000..1b22a8345 --- /dev/null +++ b/crates/madara/client/p2p_stream/tests/sanity.rs @@ -0,0 +1,112 @@ +use futures::prelude::*; +use libp2p::PeerId; +use libp2p_swarm_test::SwarmExt; +use rstest::rstest; +use std::time::Duration; + +pub mod utils; + +use utils::{ + new_swarm_with_timeout, wait_inbound_request, wait_inbound_response_stream_closed, + wait_outbound_request_sent_awaiting_responses, wait_outbound_response_stream_closed, Action, TestSwarm, +}; + +struct Requester { + peer_id: PeerId, + swarm: TestSwarm, +} + +struct Responder { + peer_id: PeerId, + swarm: TestSwarm, +} + +struct Scenario { + requester: Requester, + responder: Responder, +} + +// peer1 is the server, peer2 is the client +async fn setup() -> (PeerId, TestSwarm, PeerId, TestSwarm) { + let (srv_peer_id, mut srv_swarm) = new_swarm_with_timeout(Duration::from_secs(10)); + let (cli_peer_id, mut cli_swarm) = new_swarm_with_timeout(Duration::from_secs(10)); + + srv_swarm.listen().with_memory_addr_external().await; + cli_swarm.connect(&mut srv_swarm).await; + + (srv_peer_id, srv_swarm, cli_peer_id, cli_swarm) +} + +async fn client_request_to_server() -> Scenario { + let (srv_peer_id, srv_swarm, cli_peer_id, cli_swarm) = setup().await; + + Scenario { + requester: Requester { peer_id: cli_peer_id, swarm: cli_swarm }, + responder: Responder { peer_id: srv_peer_id, swarm: srv_swarm }, + } +} + +async fn server_request_to_client() -> Scenario { + let (srv_peer_id, srv_swarm, cli_peer_id, cli_swarm) = setup().await; + + Scenario { + requester: Requester { peer_id: srv_peer_id, swarm: srv_swarm }, + responder: Responder { peer_id: cli_peer_id, swarm: cli_swarm }, + } +} + +#[rstest] +#[case::client_request_to_server(client_request_to_server())] +#[case::server_request_to_client(server_request_to_client())] +#[tokio::test] + +async fn sanity( + #[values(0, 1, 322)] num_responses: usize, + #[case] + #[future] + scenario: Scenario, +) { + let _ = tracing_subscriber::fmt().with_test_writer().try_init(); + + let Scenario { mut requester, mut responder } = scenario.await; + + let responder_task = async move { + let (peer, req_id, action, mut resp_tx) = wait_inbound_request(&mut responder.swarm).await.unwrap(); + + assert_eq!(peer, requester.peer_id); + assert_eq!(action, Action::SanityRequest); + + for i in 0..num_responses { + resp_tx.send(Action::SanityResponse(i as u32)).await.unwrap(); + } + + // Force close the stream + drop(resp_tx); + + let (peer, req_id_done) = wait_outbound_response_stream_closed(&mut responder.swarm).await.unwrap(); + + assert_eq!(peer, requester.peer_id); + assert_eq!(req_id_done, req_id); + }; + + let requester_task = async move { + let req_id = requester.swarm.behaviour_mut().send_request(&responder.peer_id, Action::SanityRequest); + + let (peer, req_id_done, mut resp_rx) = + wait_outbound_request_sent_awaiting_responses(&mut requester.swarm).await.unwrap(); + + assert_eq!(peer, responder.peer_id); + assert_eq!(req_id_done, req_id); + + for i in 0..num_responses { + assert_eq!(resp_rx.next().await.unwrap().unwrap(), Action::SanityResponse(i as u32)); + } + + let (peer, req_id_done) = wait_inbound_response_stream_closed(&mut requester.swarm).await.unwrap(); + + assert_eq!(peer, responder.peer_id); + assert_eq!(req_id_done, req_id); + }; + + tokio::join!(responder_task, requester_task); +} diff --git a/crates/madara/client/p2p_stream/tests/utils/mod.rs b/crates/madara/client/p2p_stream/tests/utils/mod.rs new file mode 100644 index 000000000..530cd71c2 --- /dev/null +++ b/crates/madara/client/p2p_stream/tests/utils/mod.rs @@ -0,0 +1,282 @@ +//! Common utilities for p2p_stream integration tests. +use std::fmt::Debug; +use std::time::Duration; +use std::{io, iter}; + +use anyhow::{bail, Result}; +use async_trait::async_trait; +use futures::channel::mpsc; +use futures::prelude::*; +use libp2p::core::transport::MemoryTransport; +use libp2p::core::upgrade::Version; +use libp2p::identity::{Keypair, PeerId}; +use libp2p::swarm::{self, NetworkBehaviour, StreamProtocol, Swarm}; +use libp2p::{yamux, Transport}; +use p2p_stream::{Codec, InboundFailure, InboundRequestId, OutboundFailure, OutboundRequestId}; + +#[derive(Clone, Default)] +pub struct TestCodec; + +pub type TestSwarm = Swarm>; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Action { + FailOnReadRequest, + FailOnReadResponse, + TimeoutOnReadResponse, + FailOnWriteRequest, + FailOnWriteResponse, + TimeoutOnWriteResponse, + SanityRequest, + SanityResponse(u32), // The highest byte is ignored + TimeoutOnWriteRequest, + TimeoutOnReadRequest, +} + +impl From for u32 { + fn from(value: Action) -> Self { + match value { + Action::FailOnReadRequest => 0, + Action::FailOnReadResponse => 1, + Action::TimeoutOnReadResponse => 2, + Action::FailOnWriteRequest => 3, + Action::FailOnWriteResponse => 4, + Action::TimeoutOnWriteResponse => 5, + Action::SanityRequest => 6, + Action::SanityResponse(id) => 7 | ((id & 0x00FFFFFF) << 8), + Action::TimeoutOnWriteRequest => 8, + Action::TimeoutOnReadRequest => 9, + } + } +} + +impl TryFrom for Action { + type Error = io::Error; + + fn try_from(value: u32) -> Result { + match value & 0x000000FF { + 0 => Ok(Action::FailOnReadRequest), + 1 => Ok(Action::FailOnReadResponse), + 2 => Ok(Action::TimeoutOnReadResponse), + 3 => Ok(Action::FailOnWriteRequest), + 4 => Ok(Action::FailOnWriteResponse), + 5 => Ok(Action::TimeoutOnWriteResponse), + 6 => Ok(Action::SanityRequest), + 7 => Ok(Action::SanityResponse((value & 0xFFFFFF00) >> 8)), + 8 => Ok(Action::TimeoutOnWriteRequest), + 9 => Ok(Action::TimeoutOnReadRequest), + _ => Err(io::Error::new(io::ErrorKind::Other, "invalid action")), + } + } +} + +#[async_trait] +impl Codec for TestCodec { + type Protocol = StreamProtocol; + type Request = Action; + type Response = Action; + + async fn read_request(&mut self, _protocol: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut buf = [0u8; std::mem::size_of::()]; + + io.read_exact(&mut buf).await?; + + match u32::from_be_bytes(buf).try_into()? { + Action::FailOnReadRequest => Err(io::Error::new(io::ErrorKind::Other, "FailOnReadRequest")), + Action::TimeoutOnReadRequest => loop { + tokio::time::sleep(Duration::MAX).await; + }, + action => Ok(action), + } + } + + async fn read_response(&mut self, _protocol: &Self::Protocol, io: &mut T) -> io::Result + where + T: AsyncRead + Unpin + Send, + { + let mut buf = [0u8; std::mem::size_of::()]; + + io.read_exact(&mut buf).await?; + + match u32::from_be_bytes(buf).try_into()? { + Action::FailOnReadResponse => Err(io::Error::new(io::ErrorKind::Other, "FailOnReadResponse")), + Action::TimeoutOnReadResponse => loop { + tokio::time::sleep(Duration::MAX).await; + }, + action => Ok(action), + } + } + + async fn write_request(&mut self, _protocol: &Self::Protocol, io: &mut T, req: Self::Request) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match req { + Action::FailOnWriteRequest => Err(io::Error::new(io::ErrorKind::Other, "FailOnWriteRequest")), + Action::TimeoutOnWriteRequest => loop { + tokio::time::sleep(Duration::MAX).await; + }, + action => { + let bytes = u32::from(action).to_be_bytes(); + io.write_all(&bytes).await?; + Ok(()) + } + } + } + + async fn write_response(&mut self, _protocol: &Self::Protocol, io: &mut T, res: Self::Response) -> io::Result<()> + where + T: AsyncWrite + Unpin + Send, + { + match res { + Action::FailOnWriteResponse => Err(io::Error::new(io::ErrorKind::Other, "FailOnWriteResponse")), + Action::TimeoutOnWriteResponse => loop { + tokio::time::sleep(Duration::MAX).await; + }, + action => { + let bytes = u32::from(action).to_be_bytes(); + io.write_all(&bytes).await?; + Ok(()) + } + } + } +} + +/// [`SwarmExt::new_ephemeral`] uses `async_std` executor, but we're using +/// `tokio` +pub(crate) fn new_ephemeral_with_tokio_executor(behaviour_fn: impl FnOnce(Keypair) -> B) -> Swarm +where + B: NetworkBehaviour + Send, + ::ToSwarm: Debug, +{ + let identity = Keypair::generate_ed25519(); + let peer_id = PeerId::from(identity.public()); + + let transport = MemoryTransport::default() + .or_transport(libp2p::tcp::tokio::Transport::default()) + .upgrade(Version::V1) + .authenticate(libp2p_plaintext::Config::new(&identity)) + .multiplex(yamux::Config::default()) + .timeout(Duration::from_secs(20)) + .boxed(); + + Swarm::new( + transport, + behaviour_fn(identity), + peer_id, + swarm::Config::with_tokio_executor().with_idle_connection_timeout(Duration::from_secs(5)), /* Some tests need connections to be kept alive beyond what the individual behaviour configures., */ + ) +} + +pub fn new_swarm_with_timeout(timeout: Duration) -> (PeerId, Swarm>) { + let protocols = iter::once(StreamProtocol::new("/test/1")); + let cfg = p2p_stream::Config::default().request_timeout(timeout); + + // SwarmExt::new_ephemeral uses async::std + let swarm = new_ephemeral_with_tokio_executor(|_| { + p2p_stream::Behaviour::::with_codec_and_protocols(TestCodec, protocols, cfg) + }); + + let peed_id = *swarm.local_peer_id(); + + (peed_id, swarm) +} + +pub fn new_swarm() -> (PeerId, Swarm>) { + new_swarm_with_timeout(Duration::from_millis(100)) +} + +pub async fn wait_no_events(swarm: &mut Swarm>) { + loop { + if let Ok(ev) = swarm.select_next_some().await.try_into_behaviour_event() { + panic!("Unexpected event: {ev:?}") + } + } +} + +pub async fn wait_inbound_request( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId, Action, mpsc::Sender)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::InboundRequest { peer, request_id, request, channel }) => { + return Ok((peer, request_id, request, channel)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_outbound_request_sent_awaiting_responses( + swarm: &mut Swarm>, +) -> Result<(PeerId, OutboundRequestId, mpsc::Receiver>)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::OutboundRequestSentAwaitingResponses { peer, request_id, channel }) => { + return Ok((peer, request_id, channel)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_outbound_response_stream_closed( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::OutboundResponseStreamClosed { peer, request_id, .. }) => { + return Ok((peer, request_id)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_inbound_response_stream_closed( + swarm: &mut Swarm>, +) -> Result<(PeerId, OutboundRequestId)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::InboundResponseStreamClosed { peer, request_id, .. }) => { + return Ok((peer, request_id)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_inbound_failure( + swarm: &mut Swarm>, +) -> Result<(PeerId, InboundRequestId, InboundFailure)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::InboundFailure { peer, request_id, error }) => { + return Ok((peer, request_id, error)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} + +pub async fn wait_outbound_failure( + swarm: &mut Swarm>, +) -> Result<(PeerId, OutboundRequestId, OutboundFailure)> { + loop { + match swarm.select_next_some().await.try_into_behaviour_event() { + Ok(p2p_stream::Event::OutboundFailure { peer, request_id, error }) => { + return Ok((peer, request_id, error)); + } + Ok(ev) => bail!("Unexpected event: {ev:?}"), + Err(..) => {} + } + } +} diff --git a/crates/madara/client/rpc/src/test_utils.rs b/crates/madara/client/rpc/src/test_utils.rs index 79a786e08..11625838e 100644 --- a/crates/madara/client/rpc/src/test_utils.rs +++ b/crates/madara/client/rpc/src/test_utils.rs @@ -243,8 +243,6 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); @@ -268,8 +266,6 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); @@ -337,8 +333,6 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); @@ -375,8 +369,6 @@ pub fn make_sample_chain_for_block_getters(backend: &MadaraBackend) -> SampleCha }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); } @@ -558,8 +550,6 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[0].clone(), vec![], - None, - None, ) .unwrap(); @@ -582,8 +572,6 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[1].clone(), vec![], - None, - None, ) .unwrap(); @@ -606,8 +594,6 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[2].clone(), vec![], - None, - None, ) .unwrap(); @@ -627,8 +613,6 @@ pub fn make_sample_chain_for_state_updates(backend: &MadaraBackend) -> SampleCha }, state_diffs[3].clone(), vec![], - None, - None, ) .unwrap(); } diff --git a/crates/madara/client/rpc/src/versions/user/v0_7_1/methods/read/block_hash_and_number.rs b/crates/madara/client/rpc/src/versions/user/v0_7_1/methods/read/block_hash_and_number.rs index 43c173051..e2abf6207 100644 --- a/crates/madara/client/rpc/src/versions/user/v0_7_1/methods/read/block_hash_and_number.rs +++ b/crates/madara/client/rpc/src/versions/user/v0_7_1/methods/read/block_hash_and_number.rs @@ -53,8 +53,6 @@ mod tests { }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); @@ -72,8 +70,6 @@ mod tests { }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); @@ -94,8 +90,6 @@ mod tests { }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); @@ -123,8 +117,6 @@ mod tests { }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); diff --git a/crates/madara/client/rpc/src/versions/user/v0_7_1/methods/read/get_block_with_receipts.rs b/crates/madara/client/rpc/src/versions/user/v0_7_1/methods/read/get_block_with_receipts.rs index 3438dca3a..1d145b952 100644 --- a/crates/madara/client/rpc/src/versions/user/v0_7_1/methods/read/get_block_with_receipts.rs +++ b/crates/madara/client/rpc/src/versions/user/v0_7_1/methods/read/get_block_with_receipts.rs @@ -251,8 +251,6 @@ mod tests { }, StateDiff::default(), vec![], - None, - None, ) .unwrap(); diff --git a/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/ws/subscribe_events.rs b/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/ws/subscribe_events.rs index 77c4897d9..c01975f6a 100644 --- a/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/ws/subscribe_events.rs +++ b/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/ws/subscribe_events.rs @@ -101,7 +101,6 @@ mod test { /// * `base` - Base number used as prefix for all values /// * `num_events` - Number of events to generate /// * `keys_per_event` - Number of keys per event - fn generate_receipt(base: u64, num_events: usize, keys_per_event: usize) -> TransactionReceipt { // Transaction hash is just the base shifted let tx_hash = Felt::from(base << 32); @@ -149,8 +148,6 @@ mod test { }, mp_state_update::StateDiff::default(), vec![], - None, - None, ) .expect("Storing block"); diff --git a/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/ws/subscribe_new_heads.rs b/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/ws/subscribe_new_heads.rs index 742981c66..e75b8083a 100644 --- a/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/ws/subscribe_new_heads.rs +++ b/crates/madara/client/rpc/src/versions/user/v0_8_0/methods/ws/subscribe_new_heads.rs @@ -162,8 +162,6 @@ mod test { }, mp_state_update::StateDiff::default(), vec![], - None, - None, ) .expect("Storing block"); diff --git a/crates/madara/client/sync/Cargo.toml b/crates/madara/client/sync/Cargo.toml index 09843cdc2..ce72dc9fa 100644 --- a/crates/madara/client/sync/Cargo.toml +++ b/crates/madara/client/sync/Cargo.toml @@ -23,7 +23,6 @@ rstest.workspace = true regex.workspace = true mc-db = { workspace = true, features = ["testing"] } -mc-block-import = { workspace = true, features = ["testing"] } mp-utils = { workspace = true, features = ["testing"] } # Compile the test contracts in test cfg. m-cairo-test-contracts.workspace = true @@ -32,7 +31,6 @@ m-cairo-test-contracts.workspace = true # Madara mc-analytics.workspace = true -mc-block-import.workspace = true mc-db.workspace = true mc-gateway-client.workspace = true mc-rpc.workspace = true diff --git a/crates/madara/client/sync/src/l2.rs b/crates/madara/client/sync/src/l2.rs index b2a6041c0..f3bc27b3c 100644 --- a/crates/madara/client/sync/src/l2.rs +++ b/crates/madara/client/sync/src/l2.rs @@ -236,6 +236,8 @@ pub struct L2SyncConfig { pub flush_every_n_seconds: u64, pub pending_block_poll_interval: Duration, pub ignore_block_order: bool, + /// See [`mp_block_import::BlockValidationContext::compute_v0_13_2_hashes`]. + pub compute_v0_13_2_hashes: bool, pub chain_id: ChainId, pub telemetry: Arc, pub block_importer: Arc, @@ -270,6 +272,7 @@ pub async fn sync( chain_id: config.chain_id, trust_class_hashes: false, ignore_block_order: config.ignore_block_order, + compute_v0_13_2_hashes: config.compute_v0_13_2_hashes, }; let mut join_set = JoinSet::new(); diff --git a/crates/madara/client/sync/src/lib.rs b/crates/madara/client/sync/src/lib.rs index 2cda8af8c..178ba862a 100644 --- a/crates/madara/client/sync/src/lib.rs +++ b/crates/madara/client/sync/src/lib.rs @@ -71,6 +71,7 @@ pub async fn l2_sync_worker( telemetry: sync_config.telemetry, block_importer: sync_config.block_importer, warp_update: fetch_config.warp_update, + compute_v0_13_2_hashes: false, }; l2::sync(backend, provider, ctx, l2_config).await?; diff --git a/crates/madara/client/sync2/Cargo.toml b/crates/madara/client/sync2/Cargo.toml new file mode 100644 index 000000000..1591cbf13 --- /dev/null +++ b/crates/madara/client/sync2/Cargo.toml @@ -0,0 +1,85 @@ +[package] +name = "mc-sync2" +description = "This crate is responsible to sync data" +authors.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +version.workspace = true +homepage.workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dev-dependencies] + +httpmock.workspace = true +tempfile.workspace = true +rstest.workspace = true +regex.workspace = true + +mc-db = { workspace = true, features = ["testing"] } +# Compile the test contracts in test cfg. +m-cairo-test-contracts.workspace = true + +[dependencies] + +# Madara +bonsai-trie.workspace = true + +mc-analytics.workspace = true +mc-db.workspace = true +mc-eth.workspace = true +mc-gateway-client.workspace = true +mc-p2p.workspace = true +mc-telemetry.workspace = true + +mp-block.workspace = true +mp-chain-config.workspace = true +mp-class.workspace = true +mp-convert.workspace = true +mp-gateway.workspace = true +mp-receipt.workspace = true +mp-state-update.workspace = true +mp-transactions.workspace = true +mp-utils.workspace = true + +# Starknet +starknet-core.workspace = true +starknet-types-core.workspace = true +starknet_api.workspace = true + +#Instrumentation +opentelemetry = { workspace = true, features = ["metrics", "logs"] } +opentelemetry-appender-tracing = { workspace = true, default-features = false } +opentelemetry-otlp = { workspace = true, features = [ + "tonic", + "metrics", + "logs", +] } +opentelemetry-semantic-conventions = { workspace = true } +opentelemetry-stdout = { workspace = true } +opentelemetry_sdk = { workspace = true, features = ["rt-tokio", "logs"] } +tracing = { workspace = true } +tracing-core = { workspace = true, default-features = false } +tracing-opentelemetry = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } + +# Other +anyhow.workspace = true +num-traits.workspace = true +async-trait.workspace = true +bitvec.workspace = true +futures = { workspace = true, default-features = true } +hyper.workspace = true +rand.workspace = true +rayon.workspace = true +serde_json.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = [ + "macros", + "parking_lot", + "test-util", + "signal", +] } +url.workspace = true diff --git a/crates/madara/client/sync2/src/apply_state.rs b/crates/madara/client/sync2/src/apply_state.rs new file mode 100644 index 000000000..d88c91abc --- /dev/null +++ b/crates/madara/client/sync2/src/apply_state.rs @@ -0,0 +1,58 @@ +use crate::{ + import::BlockImporter, + pipeline::{ApplyOutcome, PipelineController, PipelineSteps}, +}; +use mc_db::MadaraBackend; +use mp_state_update::StateDiff; +use std::{ops::Range, sync::Arc}; + +pub type ApplyStateSync = PipelineController; +pub fn apply_state_pipeline( + backend: Arc, + importer: Arc, + parallelization: usize, + batch_size: usize, + disable_tries: bool, +) -> ApplyStateSync { + PipelineController::new(ApplyStateSteps { backend, importer, disable_tries }, parallelization, batch_size) +} +pub struct ApplyStateSteps { + backend: Arc, + importer: Arc, + disable_tries: bool, +} + +impl PipelineSteps for ApplyStateSteps { + type InputItem = StateDiff; + type SequentialStepInput = Vec; + type Output = (); + + async fn parallel_step( + self: Arc, + _block_range: Range, + input: Vec, + ) -> anyhow::Result { + Ok(input) + } + + async fn sequential_step( + self: Arc, + block_range: Range, + input: Self::SequentialStepInput, + ) -> anyhow::Result> { + if self.disable_tries { + return Ok(ApplyOutcome::Success(())); + } + if let Some(last_block_n) = block_range.clone().last() { + tracing::debug!("Apply state sequential step {block_range:?}"); + self.importer.apply_to_global_trie(block_range, input).await?; + self.backend.head_status().global_trie.set(Some(last_block_n)); + self.backend.save_head_status_to_db()?; + } + Ok(ApplyOutcome::Success(())) + } + + fn starting_block_n(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} diff --git a/crates/madara/client/sync2/src/counter.rs b/crates/madara/client/sync2/src/counter.rs new file mode 100644 index 000000000..b6e4856d2 --- /dev/null +++ b/crates/madara/client/sync2/src/counter.rs @@ -0,0 +1,69 @@ +use std::{ + collections::VecDeque, + time::{Duration, Instant}, +}; + +/// Rolling average implementation. +pub struct ThroughputCounter { + buckets: VecDeque<(Instant, u64)>, + bucket_size: Duration, + window_size: Duration, + current_count: u64, + current_bucket_start: Instant, +} +impl ThroughputCounter { + pub fn new(window_size: Duration) -> Self { + let now = Instant::now(); + Self { + buckets: VecDeque::new(), + bucket_size: window_size / 60, + window_size, + current_count: 0, + current_bucket_start: now, + } + } + + pub fn increment(&mut self) { + let now = Instant::now(); + if now.duration_since(self.current_bucket_start) >= self.bucket_size { + // Clean-up expired buckets. + while let Some((time, _)) = self.buckets.front() { + if now.duration_since(*time) < self.window_size { + break; + } + self.buckets.pop_front(); + } + + // Make a new bucket. + if self.current_count > 0 { + self.buckets.push_back((self.current_bucket_start, self.current_count)); + } + self.current_count = 0; + self.current_bucket_start = now; + } + self.current_count += 1; + } + + /// Returns ops/s + pub fn get_throughput(&self) -> f64 { + let now = Instant::now(); + let total_ops = self + .buckets + .iter() + .skip_while(|(time, _)| now.duration_since(*time) >= self.window_size) + .map(|(_, count)| count) + .sum::() + + self.current_count; + let window_duration = if let Some((oldest_time, _)) = self.buckets.front() { + now.duration_since(*oldest_time).as_secs_f64() + } else { + now.duration_since(self.current_bucket_start).as_secs_f64() + }; + + if window_duration > 0.0 { + total_ops as f64 / window_duration + } else { + 0.0 + } + } +} diff --git a/crates/madara/client/sync2/src/gateway/classes.rs b/crates/madara/client/sync2/src/gateway/classes.rs new file mode 100644 index 000000000..320feec91 --- /dev/null +++ b/crates/madara/client/sync2/src/gateway/classes.rs @@ -0,0 +1,115 @@ +use crate::{ + import::BlockImporter, + pipeline::{ApplyOutcome, PipelineController, PipelineSteps}, + util::AbortOnDrop, +}; +use anyhow::Context; +use mc_db::MadaraBackend; +use mc_gateway_client::GatewayProvider; +use mp_block::BlockId; +use mp_class::{ClassInfo, ClassInfoWithHash, ConvertedClass, LegacyClassInfo, SierraClassInfo}; +use mp_state_update::DeclaredClassCompiledClass; +use starknet_core::types::Felt; +use std::{collections::HashMap, ops::Range, sync::Arc}; + +pub type ClassesSync = PipelineController; +pub fn classes_pipeline( + backend: Arc, + importer: Arc, + client: Arc, + parallelization: usize, + batch_size: usize, +) -> ClassesSync { + PipelineController::new(ClassesSyncSteps { backend, importer, client }, parallelization, batch_size) +} + +pub struct ClassesSyncSteps { + backend: Arc, + importer: Arc, + client: Arc, +} +impl PipelineSteps for ClassesSyncSteps { + type InputItem = HashMap; + type SequentialStepInput = Vec>; + type Output = (); + + async fn parallel_step( + self: Arc, + block_range: Range, + input: Vec, + ) -> anyhow::Result { + if input.iter().all(|i| i.is_empty()) { + return Ok(vec![]); + } + AbortOnDrop::spawn(async move { + tracing::debug!("Gateway classes parallel step: {block_range:?}"); + let mut out = vec![]; + for (block_n, classes) in block_range.zip(input) { + let mut declared_classes = vec![]; + for (&class_hash, &compiled_class_hash) in classes.iter() { + let class = self + .client + .get_class_by_hash(class_hash, BlockId::Number(block_n)) + .await + .with_context(|| format!("Getting class_hash={class_hash:#x} with block_n={block_n}"))?; + + let class_info = match &class { + mp_class::ContractClass::Sierra(class) => { + let DeclaredClassCompiledClass::Sierra(compiled_class_hash) = compiled_class_hash else { + anyhow::bail!("Expected a Sierra class, found a Legacy class") + }; + ClassInfo::Sierra(SierraClassInfo { contract_class: class.clone(), compiled_class_hash }) + } + mp_class::ContractClass::Legacy(class) => { + if compiled_class_hash != DeclaredClassCompiledClass::Legacy { + anyhow::bail!("Expected a Legacy class, found a Sierra class") + } + ClassInfo::Legacy(LegacyClassInfo { contract_class: class.clone() }) + } + }; + + declared_classes.push(ClassInfoWithHash { class_info, class_hash }); + } + + let ret = self + .importer + .run_in_rayon_pool(move |importer| { + importer.verify_compile_classes(block_n, declared_classes, &classes) + }) + .await?; + + out.push(ret); + } + Ok(out) + }) + .await + } + + async fn sequential_step( + self: Arc, + block_range: Range, + input: Self::SequentialStepInput, + ) -> anyhow::Result> { + tracing::debug!("Gateway classes sequential step: {block_range:?}"); + // Save classes in sequential step, because some chains have duplicate class declarations, and we want to be sure + // we always record the earliest block_n + let block_range_ = block_range.clone(); + self.importer + .run_in_rayon_pool(move |importer| { + for (block_n, input) in block_range_.zip(input) { + importer.save_classes(block_n, input)?; + } + anyhow::Ok(()) + }) + .await?; + if let Some(block_n) = block_range.last() { + self.backend.head_status().classes.set(Some(block_n)); + self.backend.save_head_status_to_db()?; + } + Ok(ApplyOutcome::Success(())) + } + + fn starting_block_n(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} diff --git a/crates/madara/client/sync2/src/gateway/mod.rs b/crates/madara/client/sync2/src/gateway/mod.rs new file mode 100644 index 000000000..4064e7bec --- /dev/null +++ b/crates/madara/client/sync2/src/gateway/mod.rs @@ -0,0 +1,382 @@ +use crate::{ + apply_state::ApplyStateSync, + import::BlockImporter, + metrics::SyncMetrics, + pipeline::{ApplyOutcome, PipelineController, PipelineSteps}, + sync::{ForwardPipeline, Probe, SyncController, SyncControllerConfig}, + util::AbortOnDrop, +}; +use anyhow::Context; +use classes::ClassesSync; +use mc_db::MadaraBackend; +use mc_gateway_client::GatewayProvider; +use mp_block::{BlockHeaderWithSignatures, BlockId, BlockTag, Header, TransactionWithReceipt}; +use mp_chain_config::{StarknetVersion, StarknetVersionError}; +use mp_gateway::state_update::{ProviderStateUpdateWithBlock, ProviderStateUpdateWithBlockPendingMaybe}; +use mp_receipt::EventWithTransactionHash; +use mp_state_update::StateDiff; +use starknet_core::types::Felt; +use std::{iter, ops::Range, sync::Arc}; + +mod classes; + +struct GatewayBlock { + block_hash: Felt, + header: Header, + state_diff: StateDiff, + transactions: Vec, + events: Vec, +} + +#[derive(Debug, thiserror::Error)] +enum FromGatewayError { + #[error("Transaction count is not equal to receipt count")] + TransactionCountNotEqualToReceiptCount, + #[error("Invalid starknet version: {0:#}")] + StarknetVersion(#[from] StarknetVersionError), + #[error("Unable to determine Starknet version for block {0:#x}")] + FromMainnetStarknetVersion(Felt), +} + +impl TryFrom for GatewayBlock { + type Error = FromGatewayError; + fn try_from(value: ProviderStateUpdateWithBlock) -> Result { + if value.block.transactions.len() != value.block.transaction_receipts.len() { + return Err(FromGatewayError::TransactionCountNotEqualToReceiptCount); + } + let state_diff = mp_state_update::StateDiff::from(value.state_update.state_diff); + Ok(GatewayBlock { + block_hash: value.block.block_hash, + header: Header { + parent_block_hash: value.block.parent_block_hash, + sequencer_address: value.block.sequencer_address.unwrap_or_default(), + block_timestamp: mp_block::header::BlockTimestamp(value.block.timestamp), + protocol_version: value + .block + .starknet_version + .as_deref() + .map(|version| Ok(version.parse()?)) + .unwrap_or_else(|| { + StarknetVersion::try_from_mainnet_block_number(value.block.block_number) + .ok_or(FromGatewayError::FromMainnetStarknetVersion(value.block.block_hash)) + })?, + l1_gas_price: mp_block::header::GasPrices { + eth_l1_gas_price: value.block.l1_gas_price.price_in_wei, + strk_l1_gas_price: value.block.l1_gas_price.price_in_fri, + eth_l1_data_gas_price: value.block.l1_data_gas_price.price_in_wei, + strk_l1_data_gas_price: value.block.l1_data_gas_price.price_in_fri, + }, + l1_da_mode: value.block.l1_da_mode, + block_number: value.block.block_number, + global_state_root: value.block.state_root, + transaction_count: value.block.transactions.len() as u64, + transaction_commitment: value.block.transaction_commitment, + event_count: value.block.transaction_receipts.iter().map(|r| r.events.len() as u64).sum(), + event_commitment: value.block.event_commitment, + state_diff_length: Some(state_diff.len() as u64), + state_diff_commitment: value.block.state_diff_commitment, + receipt_commitment: value.block.receipt_commitment, + }, + events: value + .block + .transaction_receipts + .iter() + .flat_map(|receipt| { + receipt + .events + .iter() + .cloned() + .map(|event| EventWithTransactionHash { transaction_hash: receipt.transaction_hash, event }) + }) + .collect(), + transactions: value + .block + .transactions + .into_iter() + .zip(value.block.transaction_receipts) + .map(|(transaction, receipt)| TransactionWithReceipt { + receipt: receipt.into_mp(&transaction), + transaction: transaction.into(), + }) + .collect(), + state_diff, + }) + } +} + +pub type GatewayBlockSync = PipelineController; +pub fn block_with_state_update_pipeline( + backend: Arc, + importer: Arc, + client: Arc, + parallelization: usize, + batch_size: usize, +) -> GatewayBlockSync { + PipelineController::new(GatewaySyncSteps { backend, importer, client }, parallelization, batch_size) +} + +// TODO: check that the headers follow each other +pub struct GatewaySyncSteps { + backend: Arc, + importer: Arc, + client: Arc, +} +impl PipelineSteps for GatewaySyncSteps { + type InputItem = (); + type SequentialStepInput = Vec; + type Output = Vec; + + async fn parallel_step( + self: Arc, + block_range: Range, + _input: Vec, + ) -> anyhow::Result { + AbortOnDrop::spawn(async move { + let mut out = vec![]; + tracing::debug!("Gateway sync parallel step {:?}", block_range); + for block_n in block_range { + let block = self + .client + .get_state_update_with_block(BlockId::Number(block_n)) + .await + .with_context(|| format!("Getting state update with block_n={block_n}"))?; + + let ProviderStateUpdateWithBlockPendingMaybe::NonPending(block) = block else { + anyhow::bail!("Asked for a block_n, got a pending one") + }; + + let gateway_block: GatewayBlock = block.try_into().context("Parsing gateway block")?; + + let state_diff = self + .importer + .run_in_rayon_pool(move |importer| { + let mut signed_header = BlockHeaderWithSignatures { + header: gateway_block.header, + block_hash: gateway_block.block_hash, + consensus_signatures: vec![], + }; + + // Fill in the header with the commitments missing in pre-v0.13.2 headers from the gateway. + let allow_pre_v0_13_2 = true; + + let state_diff_commitment = importer.verify_state_diff( + block_n, + &gateway_block.state_diff, + &signed_header.header, + allow_pre_v0_13_2, + )?; + let (transaction_commitment, receipt_commitment) = importer.verify_transactions( + block_n, + &gateway_block.transactions, + &signed_header.header, + allow_pre_v0_13_2, + )?; + let event_commitment = importer.verify_events( + block_n, + &gateway_block.events, + &signed_header.header, + allow_pre_v0_13_2, + )?; + signed_header.header = Header { + state_diff_commitment: Some(state_diff_commitment), + transaction_commitment, + event_commitment, + receipt_commitment: Some(receipt_commitment), + ..signed_header.header + }; + importer.verify_header(block_n, &signed_header)?; + + importer.save_header(block_n, signed_header)?; + importer.save_state_diff(block_n, gateway_block.state_diff.clone())?; + importer.save_transactions(block_n, gateway_block.transactions)?; + importer.save_events(block_n, gateway_block.events)?; + + anyhow::Ok(gateway_block.state_diff) + }) + .await?; + out.push(state_diff); + } + Ok(out) + }) + .await + } + async fn sequential_step( + self: Arc, + block_range: Range, + input: Self::SequentialStepInput, + ) -> anyhow::Result> { + tracing::debug!("Gateway sync sequential step: {block_range:?}"); + if let Some(block_n) = block_range.last() { + self.backend.head_status().headers.set(Some(block_n)); + self.backend.head_status().state_diffs.set(Some(block_n)); + self.backend.head_status().transactions.set(Some(block_n)); + self.backend.head_status().events.set(Some(block_n)); + self.backend.save_head_status_to_db()?; + } + Ok(ApplyOutcome::Success(input)) + } + + fn starting_block_n(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} + +pub struct ForwardSyncConfig { + pub block_parallelization: usize, + pub block_batch_size: usize, + pub classes_parallelization: usize, + pub classes_batch_size: usize, + pub apply_state_parallelization: usize, + pub apply_state_batch_size: usize, + pub disable_tries: bool, +} + +impl Default for ForwardSyncConfig { + fn default() -> Self { + Self { + block_parallelization: 100, + block_batch_size: 1, + classes_parallelization: 200, + classes_batch_size: 1, + apply_state_parallelization: 3, + apply_state_batch_size: 5, + disable_tries: false, + } + } +} + +impl ForwardSyncConfig { + pub fn disable_tries(self, val: bool) -> Self { + Self { disable_tries: val, ..self } + } +} + +pub type GatewaySync = SyncController; +pub fn forward_sync( + backend: Arc, + importer: Arc, + client: Arc, + controller_config: SyncControllerConfig, + config: ForwardSyncConfig, +) -> GatewaySync { + let probe = GatewayLatestProbe::new(client.clone()); + SyncController::new( + GatewayForwardSync::new(backend, importer, client, config), + Some(probe.into()), + controller_config, + ) +} + +pub struct GatewayForwardSync { + blocks_pipeline: GatewayBlockSync, + classes_pipeline: ClassesSync, + apply_state_pipeline: ApplyStateSync, + backend: Arc, +} + +impl GatewayForwardSync { + pub fn new( + backend: Arc, + importer: Arc, + client: Arc, + config: ForwardSyncConfig, + ) -> Self { + let blocks_pipeline = block_with_state_update_pipeline( + backend.clone(), + importer.clone(), + client.clone(), + config.block_parallelization, + config.block_batch_size, + ); + let classes_pipeline = classes::classes_pipeline( + backend.clone(), + importer.clone(), + client.clone(), + config.classes_parallelization, + config.classes_batch_size, + ); + let apply_state_pipeline = super::apply_state::apply_state_pipeline( + backend.clone(), + importer.clone(), + config.apply_state_parallelization, + config.apply_state_batch_size, + config.disable_tries, + ); + Self { blocks_pipeline, classes_pipeline, apply_state_pipeline, backend } + } +} + +impl ForwardPipeline for GatewayForwardSync { + async fn run(&mut self, target_height: u64, metrics: &mut SyncMetrics) -> anyhow::Result<()> { + tracing::debug!("Run pipeline to height={target_height:?}"); + loop { + while self.blocks_pipeline.can_schedule_more() && self.blocks_pipeline.next_input_block_n() <= target_height + { + let next_input_block_n = self.blocks_pipeline.next_input_block_n(); + self.blocks_pipeline.push(next_input_block_n..next_input_block_n + 1, iter::once(())); + } + + let next_full_block = self.backend.head_status().next_full_block(); + + tokio::select! { + Some(res) = self.apply_state_pipeline.next() => { + res?; + } + Some(res) = self.classes_pipeline.next() => { + res?; + } + Some(res) = self.blocks_pipeline.next(), if self.classes_pipeline.can_schedule_more() && self.apply_state_pipeline.can_schedule_more() => { + let (range, state_diffs) = res?; + self.classes_pipeline.push(range.clone(), state_diffs.iter().map(|s| s.all_declared_classes())); + self.apply_state_pipeline.push(range, state_diffs); + } + // all pipelines are empty, we're done :) + else => break Ok(()) + } + + let new_next_full_block = self.backend.head_status().next_full_block(); + for block_n in next_full_block..new_next_full_block { + // Notify of a new full block here. + metrics.update(block_n, &self.backend).context("Updating metrics")?; + } + } + } + + fn next_input_block_n(&self) -> u64 { + self.blocks_pipeline.next_input_block_n() + } + + fn is_empty(&self) -> bool { + self.blocks_pipeline.is_empty() && self.classes_pipeline.is_empty() && self.apply_state_pipeline.is_empty() + } + + fn show_status(&self) { + tracing::info!( + "📥 Blocks: {} | Classes: {} | State: {}", + self.blocks_pipeline.status(), + self.classes_pipeline.status(), + self.apply_state_pipeline.status(), + ); + } + + fn latest_block(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} + +pub struct GatewayLatestProbe { + client: Arc, +} + +impl GatewayLatestProbe { + pub fn new(client: Arc) -> Self { + Self { client } + } +} + +impl Probe for GatewayLatestProbe { + async fn forward_probe(self: Arc, _next_block_n: u64) -> anyhow::Result> { + let header = self.client.get_header(BlockId::Tag(BlockTag::Latest)).await.context("Getting latest header")?; + Ok(Some(header.block_number)) + } +} diff --git a/crates/madara/client/sync2/src/import.rs b/crates/madara/client/sync2/src/import.rs new file mode 100644 index 000000000..4c57d36ac --- /dev/null +++ b/crates/madara/client/sync2/src/import.rs @@ -0,0 +1,448 @@ +use mc_db::{MadaraBackend, MadaraStorageError}; +use mp_block::{ + commitments::{compute_event_commitment, compute_receipt_commitment, compute_transaction_commitment}, + BlockHeaderWithSignatures, Header, TransactionWithReceipt, +}; +use mp_chain_config::StarknetVersion; +use mp_class::{ + class_hash::ComputeClassHashError, compile::ClassCompilationError, ClassInfo, ClassInfoWithHash, ClassType, + ConvertedClass, LegacyClassInfo, LegacyConvertedClass, SierraClassInfo, SierraConvertedClass, +}; +use mp_convert::ToFelt; +use mp_receipt::EventWithTransactionHash; +use mp_state_update::{DeclaredClassCompiledClass, StateDiff}; +use mp_utils::rayon::{global_spawn_rayon_task, RayonPool}; +use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}; +use starknet_core::types::Felt; +use std::{borrow::Cow, collections::HashMap, ops::Range, sync::Arc}; + +#[derive(Clone, Debug, Eq, PartialEq, Default)] +pub struct BlockValidationConfig { + /// Trust class hashes. + pub trust_class_hashes: bool, + // /// Ignore the order of the blocks to allow starting at some height. + // pub trust_parent_hash: bool, +} + +#[derive(Debug, thiserror::Error)] +pub enum BlockImportError { + // #[error("Transaction hash mismatch for index #{index}: expected {expected:#x}, got {got:#x}")] + // TransactionHash { index: usize, got: Felt, expected: Felt }, + #[error("Transaction count mismatch: expected {expected}, got {got}")] + TransactionCount { got: u64, expected: u64 }, + #[error("Transaction commitment mismatch: expected {expected:#x}, got {got:#x}")] + TransactionCommitment { got: Felt, expected: Felt }, + + #[error("Event count mismatch: expected {expected}, got {got}")] + EventCount { got: u64, expected: u64 }, + #[error("Event commitment mismatch: expected {expected:#x}, got {got:#x}")] + EventCommitment { got: Felt, expected: Felt }, + + #[error("State diff length mismatch: expected {expected}, got {got}")] + StateDiffLength { got: u64, expected: u64 }, + #[error("State diff commitment mismatch: expected {expected:#x}, got {got:#x}")] + StateDiffCommitment { got: Felt, expected: Felt }, + + #[error("Receipt commitment mismatch: expected {expected:#x}, got {got:#x}")] + ReceiptCommitment { got: Felt, expected: Felt }, + + #[error("Unexpected class: {class_hash:#x}")] + UnexpectedClass { class_hash: Felt }, + #[error("Class type mismatch for class hash {class_hash:#x}: expected {expected}, got {got}")] + ClassType { class_hash: Felt, got: ClassType, expected: ClassType }, + #[error("Class hash mismatch: expected {expected:#x}, got {got:#x}")] + ClassHash { got: Felt, expected: Felt }, + #[error("Class count mismatch: expected {expected}, got {got}")] + ClassCount { got: u64, expected: u64 }, + #[error("Compiled class hash mismatch for class hash {class_hash:#x}: expected {expected:#x}, got {got:#x}")] + CompiledClassHash { class_hash: Felt, got: Felt, expected: Felt }, + #[error("Class with hash {class_hash:#x} failed to compile: {error}")] + CompilationClassError { class_hash: Felt, error: ClassCompilationError }, + #[error("Failed to compute class hash {class_hash:#x}: {error}")] + ComputeClassHash { class_hash: Felt, error: ComputeClassHashError }, + + // #[error("Block hash mismatch: expected {expected:#x}, got {got:#x}")] + // BlockHash { got: Felt, expected: Felt }, + #[error("Block number mismatch: expected {expected:#x}, got {got:#x}")] + BlockNumber { got: u64, expected: u64 }, + + // #[error("Block order mismatch: database expects to import block #{expected}, trying to import #{got}. To import a block out of order, use the `ignore_block_order` flag.")] + // LatestBlockN { expected: u64, got: u64 }, + // #[error("Parent hash mismatch: expected {expected:#x}, got {got:#x}")] + // ParentHash { got: Felt, expected: Felt }, + #[error("Global state root mismatch: expected {expected:#x}, got {got:#x}")] + GlobalStateRoot { got: Felt, expected: Felt }, + /// Internal error, see [`BlockImportError::is_internal`]. + #[error("Internal database error while {context}: {error:#}")] + InternalDb { context: Cow<'static, str>, error: MadaraStorageError }, + /// Internal error, see [`BlockImportError::is_internal`]. + #[error("Internal error: {0}")] + Internal(Cow<'static, str>), +} +impl BlockImportError { + /// Unrecoverable errors. + pub fn is_internal(&self) -> bool { + matches!(self, BlockImportError::InternalDb { .. } | BlockImportError::Internal(_)) + } +} + +/// Shared verification & saving logic between gateway and p2p. +#[derive(Clone)] +pub struct BlockImporter { + db: Arc, + config: BlockValidationConfig, + rayon_pool: Arc, +} + +impl BlockImporter { + pub fn new(db: Arc, config: BlockValidationConfig) -> BlockImporter { + Self { db, config, rayon_pool: Arc::new(RayonPool::new()) } + } + + /// Does class compilation. + pub async fn run_in_rayon_pool(&self, func: F) -> R + where + F: FnOnce(&BlockImporter) -> R + Send + 'static, + R: Send + 'static, + { + let this = self.clone(); + self.rayon_pool.spawn_rayon_task(move || func(&this)).await + } + + // HEADERS + + pub fn verify_header( + &self, + block_n: u64, + signed_header: &BlockHeaderWithSignatures, + ) -> Result<(), BlockImportError> { + // TODO + + // TODO: verify signatures + + // verify block_number + if block_n != signed_header.header.block_number { + return Err(BlockImportError::BlockNumber { expected: block_n, got: signed_header.header.block_number }); + } + + // verify block_hash + // TODO: pre_v0_13_2_override + let _block_hash = signed_header + .header + .compute_hash(self.db.chain_config().chain_id.to_felt(), /* pre_v0_13_2_override */ true); + // if signed_header.block_hash != block_hash { + // return Err(P2pError::peer_error(format!( + // "Mismatched block_hash: {:#x}, expected {:#x}", + // signed_header.block_hash, block_hash + // ))); + // } + + Ok(()) + } + + pub fn save_header(&self, block_n: u64, signed_header: BlockHeaderWithSignatures) -> Result<(), BlockImportError> { + self.db.store_block_header(signed_header).map_err(|error| BlockImportError::InternalDb { + error, + context: format!("Storing block header for {block_n}").into(), + })?; + Ok(()) + } + + // TRANSACTIONS & RECEIPTS + + /// Called in a rayon-pool context. + /// Returns the transactions and receipt commitment. + pub fn verify_transactions( + &self, + _block_n: u64, + transactions: &[TransactionWithReceipt], + check_against: &Header, + allow_pre_v0_13_2: bool, + ) -> Result<(Felt, Felt), BlockImportError> { + // Override pre-v0.13.2 transaction hash computation + let starknet_version = StarknetVersion::max(check_against.protocol_version, StarknetVersion::V0_13_2); + let is_pre_v0_13_2_special_case = + allow_pre_v0_13_2 && check_against.protocol_version < StarknetVersion::V0_13_2; + + // Verify transaction hashes. Also compute the (hash with signature, receipt hash). + let tx_hashes_with_signature_and_receipt_hashes: Vec<_> = transactions + .par_iter() + .enumerate() + .map(|(_index, tx)| { + let got = tx.transaction.compute_hash( + self.db.chain_config().chain_id.to_felt(), + starknet_version, + /* is_query */ false, + ); + // For pre-v0.13.2, our tx hash is only used for commitment computation. + // let expected = tx.receipt.transaction_hash(); + // // if expected != got { + // // return Err(BlockImportError::TransactionHash { index, got, expected }); + // // } + Ok((tx.transaction.compute_hash_with_signature(got, starknet_version), tx.receipt.compute_hash())) + }) + .collect::>()?; + + // Verify transaction count (we want to check it when the block does not come from p2p). + let expected = check_against.transaction_count; + let got = transactions.len() as _; + if expected != got { + return Err(BlockImportError::TransactionCount { got, expected }); + } + + // Verify transaction commitment. + let expected = check_against.transaction_commitment; + let transaction_commitment = compute_transaction_commitment( + tx_hashes_with_signature_and_receipt_hashes.iter().map(|(fst, _)| *fst), + starknet_version, + ); + if !is_pre_v0_13_2_special_case && expected != transaction_commitment { + return Err(BlockImportError::TransactionCommitment { got: transaction_commitment, expected }); + } + + // Verify receipt commitment. + let expected = check_against.receipt_commitment.unwrap_or_default(); + let receipt_commitment = compute_receipt_commitment( + tx_hashes_with_signature_and_receipt_hashes.iter().map(|(_, snd)| *snd), + starknet_version, + ); + if !is_pre_v0_13_2_special_case && expected != receipt_commitment { + return Err(BlockImportError::ReceiptCommitment { got: receipt_commitment, expected }); + } + + Ok((transaction_commitment, receipt_commitment)) + } + + /// Called in a rayon-pool context. + pub fn save_transactions( + &self, + block_n: u64, + transactions: Vec, + ) -> Result<(), BlockImportError> { + tracing::debug!("Storing transactions for {block_n:?}"); + self.db.store_transactions(block_n, transactions).map_err(|error| BlockImportError::InternalDb { + error, + context: format!("Storing transactions for {block_n}").into(), + })?; + Ok(()) + } + + // CLASSES + + /// Called in a rayon-pool context. + pub fn verify_compile_classes( + &self, + _block_n: u64, + declared_classes: Vec, + check_against: &HashMap, + ) -> Result, BlockImportError> { + if check_against.len() != declared_classes.len() { + return Err(BlockImportError::ClassCount { + got: declared_classes.len() as _, + expected: check_against.len() as _, + }); + } + let classes = declared_classes + .into_par_iter() + .map(|class| self.verify_compile_class(class, check_against)) + .collect::>()?; + Ok(classes) + } + + /// Called in a rayon-pool context. + fn verify_compile_class( + &self, + class: ClassInfoWithHash, + check_against: &HashMap, + ) -> Result { + let class_hash = class.class_hash; + + let check_against = *check_against.get(&class_hash).ok_or(BlockImportError::UnexpectedClass { class_hash })?; + + match class.class_info { + ClassInfo::Sierra(sierra) => { + tracing::trace!("Converting class with hash {:#x}", class_hash); + + let DeclaredClassCompiledClass::Sierra(expected) = check_against else { + return Err(BlockImportError::ClassType { + class_hash, + got: ClassType::Legacy, + expected: ClassType::Sierra, + }); + }; + if sierra.compiled_class_hash != expected { + return Err(BlockImportError::CompiledClassHash { + class_hash, + got: sierra.compiled_class_hash, + expected, + }); + } + + // Verify class hash + if !self.config.trust_class_hashes { + let expected = sierra + .contract_class + .compute_class_hash() + .map_err(|error| BlockImportError::ComputeClassHash { class_hash, error })?; + if class_hash != expected { + return Err(BlockImportError::ClassHash { got: class_hash, expected }); + } + } + + // Compile + let (compiled_class_hash, compiled_class) = sierra + .contract_class + .compile_to_casm() + .map_err(|e| BlockImportError::CompilationClassError { class_hash, error: e })?; + + // Verify compiled class hash + if compiled_class_hash != sierra.compiled_class_hash { + return Err(BlockImportError::CompiledClassHash { + class_hash, + got: sierra.compiled_class_hash, + expected: compiled_class_hash, + }); + } + Ok(ConvertedClass::Sierra(SierraConvertedClass { + class_hash, + info: SierraClassInfo { contract_class: sierra.contract_class, compiled_class_hash }, + compiled: Arc::new(compiled_class), + })) + } + ClassInfo::Legacy(legacy) => { + tracing::trace!("Converting legacy class with hash {:#x}", class_hash); + + if check_against != DeclaredClassCompiledClass::Legacy { + return Err(BlockImportError::ClassType { + class_hash, + got: ClassType::Sierra, + expected: ClassType::Legacy, + }); + } + + // Verify class hash + if !self.config.trust_class_hashes { + let expected = legacy + .contract_class + .compute_class_hash() + .map_err(|e| BlockImportError::ComputeClassHash { class_hash, error: e })?; + + if class_hash != expected { + return Err(BlockImportError::ClassHash { got: class_hash, expected }); + } + } + + Ok(ConvertedClass::Legacy(LegacyConvertedClass { + class_hash, + info: LegacyClassInfo { contract_class: legacy.contract_class }, + })) + } + } + } + + /// Called in a rayon-pool context. + pub fn save_classes(&self, block_n: u64, classes: Vec) -> Result<(), BlockImportError> { + self.db.class_db_store_block(block_n, &classes).map_err(|error| BlockImportError::InternalDb { + error, + context: format!("Storing classes for {block_n}").into(), + })?; + Ok(()) + } + + // STATE DIFF + + /// Called in a rayon-pool context. + /// Returns the state diff commitment. + pub fn verify_state_diff( + &self, + _block_n: u64, + state_diff: &StateDiff, + check_against: &Header, + allow_pre_v0_13_2: bool, + ) -> Result { + let is_pre_v0_13_2_special_case = + allow_pre_v0_13_2 && check_against.protocol_version < StarknetVersion::V0_13_2; + + // Verify state diff length (we want to check it when the block does not come from p2p). + let expected = check_against.state_diff_length.unwrap_or_default(); + let got = state_diff.len() as _; + if expected != got { + return Err(BlockImportError::StateDiffLength { got, expected }); + } + + // Verify state diff commitment. + let expected = check_against.state_diff_commitment.unwrap_or_default(); + let got = state_diff.compute_hash(); + if !is_pre_v0_13_2_special_case && expected != got { + return Err(BlockImportError::StateDiffCommitment { got, expected }); + } + Ok(got) + } + + /// Called in a rayon-pool context. + pub fn save_state_diff(&self, block_n: u64, state_diff: StateDiff) -> Result<(), BlockImportError> { + self.db.store_state_diff(block_n, state_diff).map_err(|error| BlockImportError::InternalDb { + error, + context: format!("Storing state_diff for {block_n}").into(), + })?; + Ok(()) + } + + // EVENTS + + /// Called in a rayon-pool context. + /// Returns the event commitment. + pub fn verify_events( + &self, + _block_n: u64, + events: &[EventWithTransactionHash], + check_against: &Header, + allow_pre_v0_13_2: bool, + ) -> Result { + // Override pre-v0.13.2 transaction hash computation + let starknet_version = StarknetVersion::max(check_against.protocol_version, StarknetVersion::V0_13_2); + let is_pre_v0_13_2_special_case = + allow_pre_v0_13_2 && check_against.protocol_version < StarknetVersion::V0_13_2; + + let event_hashes: Vec<_> = + events.par_iter().map(|ev| ev.event.compute_hash(ev.transaction_hash, starknet_version)).collect(); + + // Verify event count (we want to check it when the block does not come from p2p). + let expected = check_against.event_count; + let got = events.len() as _; + if expected != got { + return Err(BlockImportError::EventCount { got, expected }); + } + + // Verify events commitment. + let expected = check_against.event_commitment; + let got = compute_event_commitment(event_hashes, starknet_version); + if !is_pre_v0_13_2_special_case && expected != got { + return Err(BlockImportError::EventCommitment { got, expected }); + } + + Ok(got) + } + + /// Called in a rayon-pool context. + pub fn save_events(&self, block_n: u64, events: Vec) -> Result<(), BlockImportError> { + self.db.store_events(block_n, events).map_err(|error| BlockImportError::InternalDb { + error, + context: format!("Storing events for {block_n}").into(), + })?; + Ok(()) + } + + // GLOBAL TRIE + + pub async fn apply_to_global_trie( + &self, + block_range: Range, + state_diffs: Vec, + ) -> anyhow::Result<()> { + let this = self.clone(); + // do not use the shared permits for a sequential step + global_spawn_rayon_task(move || this.db.apply_state(block_range.start, state_diffs.iter())).await?; + Ok(()) + } +} diff --git a/crates/madara/client/sync2/src/lib.rs b/crates/madara/client/sync2/src/lib.rs new file mode 100644 index 000000000..e12a9adf4 --- /dev/null +++ b/crates/madara/client/sync2/src/lib.rs @@ -0,0 +1,12 @@ +mod apply_state; +mod pipeline; +mod sync; +mod util; +mod counter; +mod metrics; + +pub use sync::SyncControllerConfig; + +pub mod gateway; +pub mod import; +pub mod p2p; diff --git a/crates/madara/client/sync2/src/metrics.rs b/crates/madara/client/sync2/src/metrics.rs new file mode 100644 index 000000000..a6f66fcad --- /dev/null +++ b/crates/madara/client/sync2/src/metrics.rs @@ -0,0 +1,171 @@ +use crate::counter::ThroughputCounter; +use anyhow::Context; +use mc_analytics::register_gauge_metric_instrument; +use mc_db::db_block_id::DbBlockId; +use mc_db::MadaraBackend; +use num_traits::cast::FromPrimitive; +use opentelemetry::metrics::Gauge; +use opentelemetry::{global, KeyValue}; +use std::time::{Duration, Instant}; + +pub struct SyncMetrics { + /// Built-in throughput counter, for logging purposes + pub counter: ThroughputCounter, + + /// Starting block + pub starting_block: u64, + pub starting_time: Instant, + pub last_update_instant: Option, + pub last_db_metrics_update_instant: Option, + + // L2 network metrics + pub l2_block_number: Gauge, + pub l2_sync_time: Gauge, + pub l2_avg_sync_time: Gauge, + pub l2_latest_sync_time: Gauge, + pub l2_state_size: Gauge, // TODO: remove this, as well as the return value from db_metrics update. + pub transaction_count: Gauge, + pub event_count: Gauge, + // L1 network metrics + pub l1_gas_price_wei: Gauge, + pub l1_gas_price_strk: Gauge, +} + +impl SyncMetrics { + pub fn register(starting_block: u64) -> Self { + let common_scope_attributes = vec![KeyValue::new("crate", "block_import")]; + let block_import_meter = global::meter_with_version( + "crates.block_import.opentelemetry", + Some("0.17"), + Some("https://opentelemetry.io/schemas/1.2.0"), + Some(common_scope_attributes.clone()), + ); + + let l2_block_number = register_gauge_metric_instrument( + &block_import_meter, + "l2_block_number".to_string(), + "Current block number".to_string(), + "".to_string(), + ); + + let l2_sync_time = register_gauge_metric_instrument( + &block_import_meter, + "l2_sync_time".to_string(), + "Complete sync time since startup in secs (does not account for restarts)".to_string(), + "".to_string(), + ); + + let l2_avg_sync_time = register_gauge_metric_instrument( + &block_import_meter, + "l2_avg_sync_time".to_string(), + "Average time spent between blocks since startup in secs".to_string(), + "".to_string(), + ); + + let l2_latest_sync_time = register_gauge_metric_instrument( + &block_import_meter, + "l2_latest_sync_time".to_string(), + "Latest time spent between blocks in secs".to_string(), + "".to_string(), + ); + + let l2_state_size = register_gauge_metric_instrument( + &block_import_meter, + "l2_state_size".to_string(), + "Node storage usage in GB".to_string(), + "".to_string(), + ); + + let transaction_count = register_gauge_metric_instrument( + &block_import_meter, + "transaction_count".to_string(), + "Latest block transaction count".to_string(), + "".to_string(), + ); + + let event_count = register_gauge_metric_instrument( + &block_import_meter, + "event_count".to_string(), + "Latest block event count".to_string(), + "".to_string(), + ); + + let l1_gas_price_wei = register_gauge_metric_instrument( + &block_import_meter, + "l1_gas_price_wei".to_string(), + "Latest block L1 ETH gas price".to_string(), + "".to_string(), + ); + + let l1_gas_price_strk = register_gauge_metric_instrument( + &block_import_meter, + "l1_gas_price_strk".to_string(), + "Latest block L1 STRK gas price".to_string(), + "".to_string(), + ); + + Self { + counter: ThroughputCounter::new(Duration::from_secs(5 * 60)), + + starting_block, + starting_time: Instant::now(), + last_update_instant: Default::default(), + last_db_metrics_update_instant: Default::default(), + + l2_block_number, + l2_sync_time, + l2_avg_sync_time, + l2_latest_sync_time, + l2_state_size, + + transaction_count, + event_count, + + l1_gas_price_wei, + l1_gas_price_strk, + } + } + + pub fn update(&mut self, block_n: u64, backend: &MadaraBackend) -> anyhow::Result<()> { + let now = Instant::now(); + + // Update Block sync time metrics + let latest_sync_time = self.last_update_instant.map(|inst| now.duration_since(inst)).unwrap_or_default(); + let latest_sync_time = latest_sync_time.as_secs_f64(); + self.last_update_instant = Some(now); + + self.counter.increment(); + + let header = backend + .get_block_info(&DbBlockId::Number(block_n)) + .context("Getting block info")? + .context("No block info")? + .as_nonpending_owned() + .context("Block is pending")? + .header; + + let total_sync_time = now.duration_since(self.starting_time).as_secs_f64(); + + self.l2_sync_time.record(total_sync_time, &[]); + self.l2_latest_sync_time.record(latest_sync_time, &[]); + self.l2_avg_sync_time.record(total_sync_time / (header.block_number - self.starting_block) as f64, &[]); + + self.l2_block_number.record(header.block_number, &[]); + self.transaction_count.record(header.transaction_count, &[]); + self.event_count.record(header.event_count, &[]); + + self.l1_gas_price_wei.record(f64::from_u128(header.l1_gas_price.eth_l1_gas_price).unwrap_or(0f64), &[]); + self.l1_gas_price_strk.record(f64::from_u128(header.l1_gas_price.strk_l1_gas_price).unwrap_or(0f64), &[]); + + let last_update_duration = self.last_db_metrics_update_instant.map(|inst| now.duration_since(inst)); + + if last_update_duration.is_none() || last_update_duration.is_some_and(|d| d >= Duration::from_secs(5)) { + self.last_db_metrics_update_instant = Some(now); + let storage_size = backend.update_metrics(); + let size_gb = storage_size as f64 / (1024 * 1024 * 1024) as f64; + self.l2_state_size.record(size_gb, &[]); + } + + Ok(()) + } +} diff --git a/crates/madara/client/sync2/src/p2p/classes.rs b/crates/madara/client/sync2/src/p2p/classes.rs new file mode 100644 index 000000000..c653c8e77 --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/classes.rs @@ -0,0 +1,102 @@ +use super::{ + pipeline::{P2pError, P2pPipelineController, P2pPipelineSteps}, + P2pPipelineArguments, +}; +use crate::{import::BlockImporter, pipeline::PipelineController}; +use anyhow::Context; +use futures::TryStreamExt; +use mc_db::{stream::BlockStreamConfig, MadaraBackend}; +use mc_p2p::{P2pCommands, PeerId}; +use mp_class::ConvertedClass; +use mp_state_update::DeclaredClassCompiledClass; +use starknet_core::types::Felt; +use std::{collections::HashMap, ops::Range, sync::Arc}; + +pub type ClassesSync = PipelineController>; +pub fn classes_pipeline( + P2pPipelineArguments { backend, peer_set, p2p_commands, importer }: P2pPipelineArguments, + parallelization: usize, + batch_size: usize, +) -> ClassesSync { + PipelineController::new( + P2pPipelineController::new(peer_set, ClassesSyncSteps { backend, p2p_commands, importer }), + parallelization, + batch_size, + ) +} +pub struct ClassesSyncSteps { + backend: Arc, + p2p_commands: P2pCommands, + importer: Arc, +} + +impl P2pPipelineSteps for ClassesSyncSteps { + /// All declared classes, extracted from state diff. + type InputItem = HashMap; + type SequentialStepInput = Vec>; + type Output = (); + + async fn p2p_parallel_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Vec, + ) -> Result { + if input.iter().all(|i| i.is_empty()) { + return Ok(vec![]); + } + + tracing::debug!("p2p classes parallel step: {block_range:?}, peer_id: {peer_id}"); + let strm = self + .p2p_commands + .clone() + .make_classes_stream( + peer_id, + BlockStreamConfig::default().with_block_range(block_range.clone()), + input.iter(), + ) + .await; + tokio::pin!(strm); + + let mut out = vec![]; + for (block_n, check_against) in block_range.zip(input.iter().cloned()) { + let classes = strm.try_next().await?.ok_or(P2pError::peer_error("Expected to receive item"))?; + + let ret = self + .importer + .run_in_rayon_pool(move |importer| importer.verify_compile_classes(block_n, classes, &check_against)) + .await?; + + out.push(ret); + } + + Ok(out) + } + + async fn p2p_sequential_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Self::SequentialStepInput, + ) -> Result { + tracing::debug!("p2p classes sequential step: {block_range:?}, peer_id: {peer_id}"); + let block_range_ = block_range.clone(); + self.importer + .run_in_rayon_pool(move |importer| { + for (block_n, input) in block_range_.zip(input) { + importer.save_classes(block_n, input)?; + } + anyhow::Ok(()) + }) + .await?; + if let Some(block_n) = block_range.last() { + self.backend.head_status().classes.set(Some(block_n)); + self.backend.save_head_status_to_db().context("Saving head status to db")?; + } + Ok(()) + } + + fn starting_block_n(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} diff --git a/crates/madara/client/sync2/src/p2p/events.rs b/crates/madara/client/sync2/src/p2p/events.rs new file mode 100644 index 000000000..142af0efd --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/events.rs @@ -0,0 +1,89 @@ +use super::{ + pipeline::{P2pError, P2pPipelineController, P2pPipelineSteps}, + P2pPipelineArguments, +}; +use crate::{import::BlockImporter, pipeline::PipelineController}; +use anyhow::Context; +use futures::TryStreamExt; +use mc_db::{stream::BlockStreamConfig, MadaraBackend}; +use mc_p2p::{P2pCommands, PeerId}; +use mp_block::Header; +use std::{ops::Range, sync::Arc}; + +pub type EventsSync = PipelineController>; +pub fn events_pipeline( + P2pPipelineArguments { backend, peer_set, p2p_commands, importer }: P2pPipelineArguments, + parallelization: usize, + batch_size: usize, +) -> EventsSync { + PipelineController::new( + P2pPipelineController::new(peer_set, EventsSyncSteps { backend, p2p_commands, importer }), + parallelization, + batch_size, + ) +} + +pub struct EventsSyncSteps { + backend: Arc, + p2p_commands: P2pCommands, + importer: Arc, +} + +impl P2pPipelineSteps for EventsSyncSteps { + type InputItem = Header; + type SequentialStepInput = (); + type Output = (); + + async fn p2p_parallel_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Vec, + ) -> Result { + if input.iter().all(|i| i.event_count == 0) { + return Ok(()); + } + + tracing::debug!("p2p events parallel step: {block_range:?}, peer_id: {peer_id}"); + let strm = self + .p2p_commands + .clone() + .make_events_stream( + peer_id, + BlockStreamConfig::default().with_block_range(block_range.clone()), + input.iter().map(|header| header.event_count as _).collect::>(), + ) + .await; + tokio::pin!(strm); + + for (block_n, header) in block_range.zip(input) { + let events = strm.try_next().await?.ok_or(P2pError::peer_error("Expected to receive item"))?; + self.importer + .run_in_rayon_pool(move |importer| { + importer.verify_events(block_n, &events, &header, /* allow_pre_v0_13_2 */ false)?; + importer.save_events(block_n, events) + }) + .await? + } + + Ok(()) + } + + async fn p2p_sequential_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + _input: Self::SequentialStepInput, + ) -> Result { + tracing::debug!("p2p events sequential step: {block_range:?}, peer_id: {peer_id}"); + if let Some(block_n) = block_range.last() { + self.backend.head_status().events.set(Some(block_n)); + self.backend.save_head_status_to_db().context("Saving head status to db")?; + } + Ok(()) + } + + fn starting_block_n(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} diff --git a/crates/madara/client/sync2/src/p2p/forward_sync.rs b/crates/madara/client/sync2/src/p2p/forward_sync.rs new file mode 100644 index 000000000..674b7e6a3 --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/forward_sync.rs @@ -0,0 +1,331 @@ +use super::{ + classes::ClassesSync, events::EventsSync, headers::HeadersSync, state_diffs::StateDiffsSync, + transactions::TransactionsSync, +}; +use crate::import::BlockImporter; +use crate::metrics::SyncMetrics; +use crate::p2p::pipeline::P2pError; +use crate::sync::{ForwardPipeline, Probe, SyncController, SyncControllerConfig}; +use crate::{apply_state::ApplyStateSync, p2p::P2pPipelineArguments}; +use anyhow::Context; +use futures::TryStreamExt; +use mc_db::stream::BlockStreamConfig; +use mc_db::MadaraBackend; +use mc_p2p::{P2pCommands, PeerId}; +use std::collections::HashSet; +use std::iter; +use std::sync::Arc; + +/// Pipeline order: +/// ```plaintext +/// ┌───────┐ ┌───────────┐ ┌───────┐ +/// │headers├─┬─►│state_diffs├────┬─►│classes│ +/// └───────┘ │ └───────────┘ │ └───────┘ +/// │ │ +/// │ ┌────────────┐ │ ┌──────────────────┐ +/// └─►│tx, receipts├─┐ └─►│update_global_trie│ +/// └────────────┘ │ └──────────────────┘ +/// │ +/// │ ┌──────┐ +/// └─►│events│ +/// └──────┘ +/// ``` +/// State diffs, transactions with receipt, and events are checked against their corresponding commitments +/// in the header. +/// However, there is no commitment for classes: we instead check them against the state diff. +/// The update_global_trie step is a pipeline that only has a sequential part. It is separated from the state_diff step +/// because we want to apply big batches of state-diffs all at once in db, as an optimization. We want the batch size related +/// to that to be different from the batch size used to get state_diffs from p2p. +/// +/// Headers are checked using the consensus signatures for forward sync. +/// +/// ## Backwards mode +/// +/// This is not implemented yet; however there will be a mode where we check the blocks +/// instead by going backwards from the latest block_hash verified on L1, and matching each earlier block with +/// the parent_hash of its successor. This mode won't need to check block signatures, but it can only help us +/// catch up with the latest block on L1. Syncing will switch in forward mode after that point, and consensus signatures +/// will be checked from that point on. +/// Until snap-sync is a thing, we also have to sync all state diffs in forward more. +pub struct ForwardSyncConfig { + pub headers_parallelization: usize, + pub headers_batch_size: usize, + pub transactions_parallelization: usize, + pub transactions_batch_size: usize, + pub state_diffs_parallelization: usize, + pub state_diffs_batch_size: usize, + pub events_parallelization: usize, + pub events_batch_size: usize, + pub classes_parallelization: usize, + pub classes_batch_size: usize, + pub apply_state_parallelization: usize, + pub apply_state_batch_size: usize, + pub disable_tries: bool, +} + +impl Default for ForwardSyncConfig { + fn default() -> Self { + Self { + headers_parallelization: 32, + headers_batch_size: 8, + transactions_parallelization: 32, + transactions_batch_size: 4, + state_diffs_parallelization: 32, + state_diffs_batch_size: 4, + events_parallelization: 32, + events_batch_size: 4, + classes_parallelization: 128, + classes_batch_size: 1, + apply_state_parallelization: 3, + apply_state_batch_size: 5, + disable_tries: false, + } + } +} + +impl ForwardSyncConfig { + #[allow(unused)] + fn low() -> Self { + Self { + headers_parallelization: 1, + headers_batch_size: 1, + transactions_parallelization: 1, + transactions_batch_size: 1, + state_diffs_parallelization: 1, + state_diffs_batch_size: 1, + events_parallelization: 1, + events_batch_size: 1, + classes_parallelization: 1, + classes_batch_size: 1, + apply_state_parallelization: 1, + apply_state_batch_size: 1, + disable_tries: false, + } + } + pub fn disable_tries(self, val: bool) -> Self { + Self { disable_tries: val, ..self } + } +} + +pub type P2pSync = SyncController; +pub fn forward_sync( + args: P2pPipelineArguments, + controller_config: SyncControllerConfig, + config: ForwardSyncConfig, +) -> P2pSync { + let probe = P2pHeadersProbe::new(args.p2p_commands.clone(), args.importer.clone()); + SyncController::new(P2pForwardSync::new(args, config), Some(probe.into()), controller_config) +} + +/// Events pipeline is currently always done after tx and receipts for now. +/// TODO: fix that when the db supports saving events separately. +pub struct P2pForwardSync { + headers_pipeline: HeadersSync, + transactions_pipeline: TransactionsSync, + state_diffs_pipeline: StateDiffsSync, + classes_pipeline: ClassesSync, + events_pipeline: EventsSync, + apply_state_pipeline: ApplyStateSync, + backend: Arc, +} + +impl P2pForwardSync { + pub fn new(args: P2pPipelineArguments, config: ForwardSyncConfig) -> Self { + let headers_pipeline = + super::headers::headers_pipeline(args.clone(), config.headers_parallelization, config.headers_batch_size); + let transactions_pipeline = super::transactions::transactions_pipeline( + args.clone(), + config.transactions_parallelization, + config.transactions_batch_size, + ); + let state_diffs_pipeline = super::state_diffs::state_diffs_pipeline( + args.clone(), + config.state_diffs_parallelization, + config.state_diffs_batch_size, + ); + let classes_pipeline = + super::classes::classes_pipeline(args.clone(), config.classes_parallelization, config.classes_batch_size); + let events_pipeline = + super::events::events_pipeline(args.clone(), config.events_parallelization, config.events_batch_size); + let apply_state_pipeline = crate::apply_state::apply_state_pipeline( + args.backend.clone(), + args.importer.clone(), + config.apply_state_parallelization, + config.apply_state_batch_size, + config.disable_tries, + ); + + Self { + headers_pipeline, + transactions_pipeline, + state_diffs_pipeline, + classes_pipeline, + events_pipeline, + apply_state_pipeline, + backend: args.backend, + } + } +} + +impl ForwardPipeline for P2pForwardSync { + async fn run(&mut self, target_height: u64, metrics: &mut SyncMetrics) -> anyhow::Result<()> { + loop { + tracing::trace!("stop_block={target_height:?}, hl={}", self.headers_pipeline.is_empty()); + + while self.headers_pipeline.can_schedule_more() + && self.headers_pipeline.next_input_block_n() <= target_height + { + let next_input_block_n = self.headers_pipeline.next_input_block_n(); + self.headers_pipeline.push(next_input_block_n..next_input_block_n + 1, iter::once(())); + } + + let next_full_block = self.backend.head_status().next_full_block(); + + // We poll the consumers first. This seems to help bring the blocks/s higher. + // Poll order being important makes me worry that we're not polled enough. + // We would need to bring our own `FuturesOrdered` replacement to ensure that + // we can `poll` the inner `FuturesUnordered` so that it can make progress even + // when we are not trying to get a next element. + // We can emulate this behaviour by wrapping all of the futures in `tokio::spawn`, + // but it seems there is not much of a perf gain vs this order of polling. + tokio::select! { + Some(res) = self.apply_state_pipeline.next() => { + res?; + } + Some(res) = self.classes_pipeline.next() => { + res?; + } + Some(res) = self.state_diffs_pipeline.next(), + if self.classes_pipeline.can_schedule_more() && self.apply_state_pipeline.can_schedule_more() => + { + let (range, state_diffs) = res?; + self.classes_pipeline.push(range.clone(), state_diffs.iter().map(|s| s.all_declared_classes())); + self.apply_state_pipeline.push(range, state_diffs); + } + Some(res) = self.events_pipeline.next() => { + res?; + } + Some(res) = self.transactions_pipeline.next(), if self.events_pipeline.can_schedule_more() => { + let (range, headers) = res?; + self.events_pipeline.push(range, headers); + } + Some(res) = self.headers_pipeline.next(), if self.transactions_pipeline.can_schedule_more() && self.state_diffs_pipeline.can_schedule_more() => { + let (range, headers) = res?; + self.transactions_pipeline.push(range.clone(), headers.iter().cloned()); + self.state_diffs_pipeline.push(range, headers); + } + // all pipelines are empty, we're done :) + else => break Ok(()) + } + + let new_next_full_block = self.backend.head_status().next_full_block(); + for block_n in next_full_block..new_next_full_block { + // Notify of a new full block here. + metrics.update(block_n, &self.backend).context("Updating metrics")?; + } + } + } + + fn next_input_block_n(&self) -> u64 { + self.headers_pipeline.next_input_block_n() + } + + fn is_empty(&self) -> bool { + self.headers_pipeline.is_empty() + && self.transactions_pipeline.is_empty() + && self.state_diffs_pipeline.is_empty() + && self.classes_pipeline.is_empty() + && self.events_pipeline.is_empty() + && self.apply_state_pipeline.is_empty() + } + + fn show_status(&self) { + tracing::info!( + "📥 Headers: {} | Txs: {} | StateDiffs: {} | Classes: {} | Events: {} | State: {}", + self.headers_pipeline.status(), + self.transactions_pipeline.status(), + self.state_diffs_pipeline.status(), + self.classes_pipeline.status(), + self.events_pipeline.status(), + self.apply_state_pipeline.status(), + ); + } + + fn latest_block(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} + +pub struct P2pHeadersProbe { + p2p_commands: P2pCommands, + importer: Arc, +} +impl P2pHeadersProbe { + pub fn new(p2p_commands: P2pCommands, importer: Arc) -> Self { + Self { p2p_commands, importer } + } + + async fn block_exists_at(self: Arc, block_n: u64, peers: &HashSet) -> anyhow::Result { + let max_attempts = 4; + + for peer_id in peers.iter().take(max_attempts) { + tracing::debug!("Probe exists_at checking {peer_id}, {block_n}"); + match self.clone().block_exists_at_inner(*peer_id, block_n).await { + // Found it + Ok(true) => return Ok(true), + // Retry with another peer + Ok(false) => continue, + + Err(P2pError::Peer(err)) => { + tracing::debug!( + "Retrying probing step (block_n={block_n:?}) due to peer error: {err:#} [peer_id={peer_id}]" + ); + } + Err(P2pError::Internal(err)) => return Err(err.context("Peer to peer probing step")), + } + } + Ok(false) + } + async fn block_exists_at_inner(self: Arc, peer_id: PeerId, block_n: u64) -> Result { + let strm = self + .p2p_commands + .clone() + .make_headers_stream(peer_id, BlockStreamConfig::default().with_limit(1).with_start(block_n)) + .await; + tokio::pin!(strm); + let signed_header = match strm.try_next().await { + Ok(None) | Err(mc_p2p::SyncHandlerError::EndOfStream) => return Ok(false), + Ok(Some(signed_header)) => signed_header, + Err(err) => return Err(err.into()), + }; + + self.importer.verify_header(block_n, &signed_header)?; + + Ok(true) + } +} + +impl Probe for P2pHeadersProbe { + async fn forward_probe(self: Arc, current_next_block: u64) -> anyhow::Result> { + tracing::debug!("Forward probe current_next_block={current_next_block}",); + + // powers of two + let checks = iter::successors(Some(1u64), |n| n.checked_mul(2)).map(|n| current_next_block + n - 1); + + let mut peers = self.p2p_commands.clone().get_random_peers().await; + peers.remove(&self.p2p_commands.peer_id()); // remove ourselves + tracing::debug!("Probe got {} peers", peers.len()); + + let mut highest_known_block = current_next_block.checked_sub(1); + for (i, block_n) in checks.into_iter().enumerate() { + tracing::debug!("Probe check {i} current_next_block={current_next_block} {block_n}"); + + if !self.clone().block_exists_at(block_n, &peers).await? { + return Ok(highest_known_block); + } + highest_known_block = Some(block_n); + } + + Ok(highest_known_block) + } +} diff --git a/crates/madara/client/sync2/src/p2p/headers.rs b/crates/madara/client/sync2/src/p2p/headers.rs new file mode 100644 index 000000000..cf9c369dc --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/headers.rs @@ -0,0 +1,129 @@ +use super::{ + pipeline::{P2pError, P2pPipelineController, P2pPipelineSteps}, + P2pPipelineArguments, +}; +use crate::{import::BlockImporter, pipeline::PipelineController}; +use anyhow::Context; +use futures::{StreamExt, TryStreamExt}; +use mc_db::{stream::BlockStreamConfig, MadaraBackend}; +use mc_p2p::{P2pCommands, PeerId}; +use mp_block::{BlockHeaderWithSignatures, BlockId, Header}; +use starknet_core::types::Felt; +use std::{ops::Range, sync::Arc}; + +pub type HeadersSync = PipelineController>; +pub fn headers_pipeline( + P2pPipelineArguments { backend, peer_set, p2p_commands, importer }: P2pPipelineArguments, + parallelization: usize, + batch_size: usize, +) -> HeadersSync { + PipelineController::new( + P2pPipelineController::new(peer_set, HeadersSyncSteps { backend, p2p_commands, importer }), + parallelization, + batch_size, + ) +} + +pub struct HeadersSyncSteps { + backend: Arc, + p2p_commands: P2pCommands, + importer: Arc, +} + +impl P2pPipelineSteps for HeadersSyncSteps { + type InputItem = (); + type SequentialStepInput = Vec; + type Output = Vec

; + + async fn p2p_parallel_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + _input: Vec, + ) -> Result { + tracing::debug!("p2p headers parallel step: {block_range:?}, peer_id: {peer_id}"); + let mut previous_block_hash = None; + let mut block_n = block_range.start; + let limit = block_range.end.saturating_sub(block_range.start); + let res: Vec<_> = self + .p2p_commands + .clone() + .make_headers_stream(peer_id, BlockStreamConfig::default().with_block_range(block_range.clone())) + .await + .take(limit as _) + .map(move |signed_header| { + let signed_header = signed_header?; + // verify parent hash for batch + if let Some(latest_block_hash) = previous_block_hash { + if latest_block_hash != signed_header.header.parent_block_hash { + return Err(P2pError::peer_error(format!( + "Mismatched parent_block_hash: {:#x}, expected {:#x}", + signed_header.header.parent_block_hash, latest_block_hash + ))); + } + } + + self.importer.verify_header(block_n, &signed_header)?; + + previous_block_hash = Some(signed_header.block_hash); + block_n += 1; + + Ok(signed_header) + }) + .try_collect() + .await?; + + if res.len() as u64 != limit { + return Err(P2pError::peer_error(format!( + "Unexpected returned batch len: {}, expected {}", + res.len(), + limit + ))); + } + Ok(res) + } + + async fn p2p_sequential_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Self::SequentialStepInput, + ) -> Result { + tracing::debug!("p2p headers sequential step: {block_range:?}, peer_id: {peer_id}"); + let Some(first_block) = input.first() else { + return Ok(vec![]); + }; + + // verify first block_hash matches with db + let parent_block_n = first_block.header.block_number.checked_sub(1); + let parent_block_hash = if let Some(block_n) = parent_block_n { + self.backend + .get_block_hash(&BlockId::Number(block_n)) + .context("Getting latest block hash from database.")? + .context("Mismatched headers / chain head number.")? + } else { + Felt::ZERO // genesis' parent block + }; + + if first_block.header.parent_block_hash != parent_block_hash { + return Err(P2pError::peer_error(format!( + "Mismatched parent_block_hash: {:#x}, expected {parent_block_hash:#x}", + first_block.header.parent_block_hash + ))); + } + + tracing::debug!("Storing headers for {block_range:?}, peer_id: {peer_id}"); + for header in input.iter().cloned() { + self.importer.save_header(header.header.block_number, header)?; + } + + self.backend.head_status().headers.set(block_range.last()); + self.backend.save_head_status_to_db().context("Saving head status to db")?; + + Ok(input.into_iter().map(|h| h.header).collect()) + } + + fn starting_block_n(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} diff --git a/crates/madara/client/sync2/src/p2p/mod.rs b/crates/madara/client/sync2/src/p2p/mod.rs new file mode 100644 index 000000000..2ee8739a9 --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/mod.rs @@ -0,0 +1,30 @@ +use crate::import::BlockImporter; +use mc_db::MadaraBackend; +use mc_p2p::P2pCommands; +use peer_set::PeerSet; +use std::sync::Arc; + +mod classes; +mod events; +mod forward_sync; +mod headers; +mod peer_set; +mod pipeline; +mod state_diffs; +mod transactions; + +pub use forward_sync::*; + +#[derive(Clone)] +pub struct P2pPipelineArguments { + pub(crate) backend: Arc, + pub(crate) peer_set: Arc, + pub(crate) p2p_commands: P2pCommands, + pub(crate) importer: Arc, +} + +impl P2pPipelineArguments { + pub fn new(backend: Arc, p2p_commands: P2pCommands, importer: Arc) -> Self { + Self { importer, backend, peer_set: Arc::new(PeerSet::new(p2p_commands.clone())), p2p_commands } + } +} diff --git a/crates/madara/client/sync2/src/p2p/peer_set.rs b/crates/madara/client/sync2/src/p2p/peer_set.rs new file mode 100644 index 000000000..ddcddf3de --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/peer_set.rs @@ -0,0 +1,312 @@ +use mc_p2p::{P2pCommands, PeerId}; +use rand::{thread_rng, Rng}; +use std::cmp; +use std::collections::{hash_map, BTreeSet, HashMap, HashSet}; +use std::num::Saturating; +use std::ops::Deref; +use std::sync::Arc; +use tokio::time::{Duration, Instant}; + +// TODO: add bandwidth metric +#[derive(Default, Debug, Clone)] +struct PeerStats { + // prioritize peers that are behaving correctly + successes: Saturating, + // avoid peers that are behaving incorrectly + // TODO: we may want to differenciate timeout failures and bad-data failures. + // => we probably want to evict bad-data failures in every case. + failures: Saturating, + // avoid peers that are currently in use + in_use_counter: Saturating, + rand_additional: Saturating, +} + +impl PeerStats { + fn increment_successes(&mut self) { + self.successes += 1; + } + fn increment_failures(&mut self) { + self.failures += 1; + } + fn increment_in_use(&mut self) { + self.in_use_counter += 1; + } + fn decrement_in_use(&mut self) { + self.in_use_counter -= 1; + } + fn reroll_rand(&mut self) { + self.rand_additional = Saturating(thread_rng().gen_range(-5..5)); + } + fn score(&self) -> i64 { + // it's okay to use peers that are currently in use, but we don't want to rely on only one peer all the time + // so, we put a temporary small malus if the peer is already in use. + // if we are using the peer a lot, we want that malus to be higher - we really don't want to spam a single peer. + let in_use_malus = if self.in_use_counter < Saturating(16) { + self.in_use_counter * Saturating(2) + } else { + self.in_use_counter * Saturating(5) + }; + let in_use_malus = Saturating(in_use_malus.0 as i32); + + // we only count up to 20 successes, to avoid having a score go too high. + let successes = self.successes.min(Saturating(20)); + + (Saturating(-10) * self.failures + successes - in_use_malus + self.rand_additional).0.into() + } + + fn should_evict(&self) -> bool { + self.failures >= Saturating(5) + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +struct PeerSortedByScore { + peer_id: PeerId, + score: i64, +} +impl PeerSortedByScore { + pub fn new(peer_id: PeerId, stats: &PeerStats) -> Self { + Self { peer_id, score: stats.score() } + } +} + +impl Ord for PeerSortedByScore { + fn cmp(&self, other: &Self) -> cmp::Ordering { + // other and self are swapped because we want the highest score to lowest + + // score can have collisions, so we compare by peer_id next + other.score.cmp(&self.score).then(other.peer_id.cmp(&self.peer_id)) + } +} +impl PartialOrd for PeerSortedByScore { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +// TODO: add a mode to get a random peer regardless of score. +// Invariants: +// 1) there should be a one-to-one correspondance between the `queue` and `stats_by_peer` variable. +#[derive(Default, Debug)] +struct PeerSetInner { + queue: BTreeSet, + stats_by_peer: HashMap, + evicted_peers_ban_deadlines: HashMap, +} + +impl PeerSetInner { + const EVICTION_BAN_DELAY: Duration = Duration::from_secs(3); + + fn peek_next(&self) -> Option { + self.queue.first().map(|p| p.peer_id) + } + + fn update_stats(&mut self, peer: PeerId, f: impl FnOnce(&mut PeerStats)) { + match self.stats_by_peer.entry(peer) { + hash_map::Entry::Occupied(mut entry) => { + // Remove old queue entry. + let removed = self.queue.remove(&PeerSortedByScore::new(peer, entry.get())); + debug_assert!(removed, "Invariant 1 violated"); + + // Update the stats in-place + f(entry.get_mut()); + + if entry.get().should_evict() { + // evict + entry.remove(); + tracing::debug!("Peer Set Evicting {peer} for {:?}", Self::EVICTION_BAN_DELAY); + self.evicted_peers_ban_deadlines.insert(peer, Instant::now() + Self::EVICTION_BAN_DELAY); + } else { + entry.get_mut().reroll_rand(); + + // Reinsert the queue entry with the new score. + // If insert returns true, the value is already in the queue - which would mean that the peer id is duplicated in the queue. + // `stats_by_peer` has PeerId as key and as such cannot have a duplicate peer id. This means that if there is a duplicated + // peer_id in the queue, there is not a one-to-one correspondance between the two datastructures. + let inserted = self.queue.insert(PeerSortedByScore::new(peer, entry.get())); + debug_assert!(inserted, "Invariant 1 violated"); + } + } + hash_map::Entry::Vacant(_entry) => {} + } + tracing::debug!("Peer Set Update stats for {peer}"); + } + + fn append_new_peers(&mut self, new_peers: impl IntoIterator) { + let now = Instant::now(); + self.evicted_peers_ban_deadlines.retain(|_, v| *v > now); + + for peer_id in new_peers.into_iter() { + if self.evicted_peers_ban_deadlines.contains_key(&peer_id) { + continue; + } + + if let hash_map::Entry::Vacant(entry) = self.stats_by_peer.entry(peer_id) { + let stats = PeerStats::default(); + self.queue.insert(PeerSortedByScore::new(peer_id, &stats)); + entry.insert(stats); + } + } + tracing::debug!("Append new peers now: {:#?} peers", self.stats_by_peer.len()); + } +} + +pub struct GetPeersInner { + wait_until: Option, + commands: P2pCommands, +} + +impl GetPeersInner { + /// We avoid spamming get_random_peers: the start of each get_random_peers request must be separated by at least this duration. + /// This has no effect if the get_random_peers operation takes more time to complete than this delay. + const GET_RANDOM_PEERS_DELAY: Duration = Duration::from_millis(3000); + + pub fn new(commands: P2pCommands) -> Self { + // We have a start-up wait until, because the p2p service may take some time to boot up. + Self { commands, wait_until: Some(Instant::now() + Self::GET_RANDOM_PEERS_DELAY) } + } + + pub async fn get_new_peers(&mut self) -> HashSet { + let now = Instant::now(); + + if let Some(inst) = self.wait_until { + if inst > now { + tokio::time::sleep_until(inst).await; + } + } + self.wait_until = Some(now + Self::GET_RANDOM_PEERS_DELAY); + + let mut res = self.commands.get_random_peers().await; + tracing::trace!("Got get_random_peers answer: {res:?}"); + res.remove(&self.commands.peer_id()); // remove ourselves from the response, in case we find ourselves + if res.is_empty() { + tracing::warn!( + "Could not find any peer in network. Please check that your network configuration is correct." + ); + } + res + } +} + +// TODO: we may want to invalidate the peer list over time +// Mutex order: to statically ensure deadlocks are not possible, inner should always be locked after get_peers_mutex, if the two need to be taken at once. +pub struct PeerSet { + // Tokio mutex: when the peer set is empty, we want to .await to get new peers + // This is behind a mutex because we don't want to have concurrent get_more_peers requests. If there is already a request in flight, this mutex ensures we wait until that + // request finishes before trying to get even more peers. + get_more_peers_mutex: tokio::sync::Mutex, + // Std mutex: underlying datastructure, all accesses are sync + inner: std::sync::Mutex, +} + +impl PeerSet { + pub fn new(commands: P2pCommands) -> Self { + Self { + get_more_peers_mutex: tokio::sync::Mutex::new(GetPeersInner::new(commands)), + inner: std::sync::Mutex::new(PeerSetInner::default()), + } + } + + /// Returns the next peer to use. If there is are no peers currently in the set, + /// it will start a get random peers command. + pub async fn next_peer(self: &Arc) -> anyhow::Result { + let peer_id = self.next_peer_inner().await?; + Ok(PeerGuard { peer_set: Some(self.clone()), peer_id }) + } + + async fn next_peer_inner(&self) -> anyhow::Result { + fn next_from_set(inner: &mut PeerSetInner) -> Option { + inner.peek_next().inspect(|peer| { + // this will update the queue order, so that we can return another peer next time this function is called. + inner.update_stats(*peer, |stats| { + stats.increment_in_use(); + }); + }) + } + + if let Some(peer) = next_from_set(&mut self.inner.lock().expect("Poisoned lock")) { + return Ok(peer); + } + + loop { + let mut guard = self.get_more_peers_mutex.lock().await; + + // Some other task may have filled the peer set for us while we were waiting. + if let Some(peer) = next_from_set(&mut self.inner.lock().expect("Poisoned lock")) { + return Ok(peer); + } + + let new_peers = guard.get_new_peers().await; + // note: this is the only place where the two locks are taken at the same time. + // see structure detail for lock order. + let mut inner = self.inner.lock().expect("Poisoned lock"); + inner.append_new_peers(new_peers); + + if let Some(peer) = next_from_set(&mut inner) { + return Ok(peer); + } + } + } + + /// Signal that the peer did not follow the protocol correctly, sent bad data or timed out. + /// We may want to avoid this peer in the future. + fn peer_operation_error(&self, peer_id: PeerId) { + tracing::debug!("peer_operation_error: {peer_id:?}"); + let mut inner = self.inner.lock().expect("Poisoned lock"); + inner.update_stats(peer_id, |stats| { + stats.decrement_in_use(); + stats.increment_failures(); + }) + } + + /// Signal that the operation with the peer was successful. + /// + // TODO: add a bandwidth argument to allow the peer set to score and avoid being drip-fed. + fn peer_operation_success(&self, peer_id: PeerId) { + tracing::debug!("peer_operation_success: {peer_id:?}"); + let mut inner = self.inner.lock().expect("Poisoned lock"); + inner.update_stats(peer_id, |stats| { + stats.decrement_in_use(); + stats.increment_successes(); + }) + } + + /// Neutral signal that the operation was dropped by our decision. No malus nor bonus. + fn peer_operation_drop(&self, peer_id: PeerId) { + tracing::debug!("peer_operation_drop: {peer_id:?}"); + let mut inner = self.inner.lock().expect("Poisoned lock"); + inner.update_stats(peer_id, |stats| { + stats.decrement_in_use(); + }) + } +} + +pub struct PeerGuard { + peer_set: Option>, + peer_id: PeerId, +} + +impl Deref for PeerGuard { + type Target = PeerId; + fn deref(&self) -> &Self::Target { + &self.peer_id + } +} + +impl PeerGuard { + pub fn success(mut self) { + self.peer_set.take().expect("Peer set already taken").peer_operation_success(self.peer_id) + } + pub fn error(mut self) { + self.peer_set.take().expect("Peer set already taken").peer_operation_error(self.peer_id) + } +} + +impl Drop for PeerGuard { + // Note: we use an Option because success() and error() will still call the destructor. + fn drop(&mut self) { + if let Some(peer_set) = self.peer_set.take() { + peer_set.peer_operation_drop(self.peer_id) + } + } +} diff --git a/crates/madara/client/sync2/src/p2p/pipeline.rs b/crates/madara/client/sync2/src/p2p/pipeline.rs new file mode 100644 index 000000000..cc55037a5 --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/pipeline.rs @@ -0,0 +1,128 @@ +use super::peer_set::{PeerGuard, PeerSet}; +use crate::{ + import::BlockImportError, + pipeline::{ApplyOutcome, PipelineSteps}, + util::AbortOnDrop, +}; +use anyhow::Context; +use futures::Future; +use mc_p2p::PeerId; +use mc_p2p::SyncHandlerError; +use std::{borrow::Cow, ops::Range, sync::Arc}; + +#[derive(Debug, thiserror::Error)] +pub enum P2pError { + #[error("Internal error: {0:#}")] + Internal(#[from] anyhow::Error), + #[error("Peer error: {0}")] + Peer(Cow<'static, str>), +} + +impl From for P2pError { + fn from(value: SyncHandlerError) -> Self { + match value { + SyncHandlerError::Internal(err) => Self::Internal(err), + SyncHandlerError::BadRequest(err) => Self::Peer(err), + SyncHandlerError::EndOfStream => Self::peer_error("Stream ended unexpectedly"), + } + } +} + +impl From for P2pError { + fn from(value: BlockImportError) -> Self { + if value.is_internal() { + Self::Internal(anyhow::anyhow!(value)) + } else { + Self::Peer(value.to_string().into()) + } + } +} + +impl P2pError { + pub fn peer_error(err: impl Into>) -> Self { + Self::Peer(err.into()) + } +} + +pub trait P2pPipelineSteps: Send + Sync + 'static { + type InputItem: Send + Sync + Clone; + type SequentialStepInput: Send + Sync + 'static; + type Output: Send + Sync + Clone; + + fn p2p_parallel_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Vec, + ) -> impl Future> + Send; + + fn p2p_sequential_step( + self: Arc, + peer_id: PeerId, + block_n_range: Range, + input: Self::SequentialStepInput, + ) -> impl Future> + Send; + + fn starting_block_n(&self) -> Option; +} + +pub struct P2pPipelineController { + peer_set: Arc, + steps: Arc, +} + +impl P2pPipelineController { + pub fn new(peer_set: Arc, steps: S) -> Self { + Self { peer_set, steps: Arc::new(steps) } + } +} + +// Note: we wrap the tasks in [`AbortOnDrop`] so that they can advance even when they are not polled. +// This may also allow the runtime to execute these functions other threads, but I am unsure how this +// would actually affect perf. The main reason is to make them advance even when the futures_unordered is not being polled. +impl PipelineSteps for P2pPipelineController { + type InputItem = S::InputItem; + type SequentialStepInput = (PeerGuard, S::SequentialStepInput); + type Output = S::Output; + + async fn parallel_step( + self: Arc, + block_range: Range, + input: Vec, + ) -> anyhow::Result { + AbortOnDrop::spawn(async move {loop { + let peer_guard = self.peer_set.next_peer().await.context("Getting peer from peer set")?; + match self.steps.clone().p2p_parallel_step(*peer_guard, block_range.clone(), input.clone()).await { + Ok(out) => return Ok((peer_guard, out)), + Err(P2pError::Peer(err)) => { + tracing::debug!("Retrying pipeline parallel step (block_n_range={block_range:?}) due to peer error: {err:#} [peer_id={}]", *peer_guard); + peer_guard.error(); + } + Err(P2pError::Internal(err)) => return Err(err.context("Peer to peer pipeline parallel step")), + } + }}).await + } + + async fn sequential_step( + self: Arc, + block_range: Range, + (peer_guard, input): Self::SequentialStepInput, + ) -> anyhow::Result> { + match self.steps.clone().p2p_sequential_step(*peer_guard, block_range.clone(), input).await { + Ok(output) => { + peer_guard.success(); + Ok(ApplyOutcome::Success(output)) + } + Err(P2pError::Peer(err)) => { + tracing::debug!("Retrying pipeline for block (block_n={block_range:?}) due to peer error during sequential step: {err} [peer_id={}]", *peer_guard); + peer_guard.error(); + Ok(ApplyOutcome::Retry) + } + Err(P2pError::Internal(err)) => Err(err.context("Peer to peer pipeline sequential step")), + } + } + + fn starting_block_n(&self) -> Option { + self.steps.starting_block_n() + } +} diff --git a/crates/madara/client/sync2/src/p2p/state_diffs.rs b/crates/madara/client/sync2/src/p2p/state_diffs.rs new file mode 100644 index 000000000..7e3736b09 --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/state_diffs.rs @@ -0,0 +1,91 @@ +use super::{ + pipeline::{P2pError, P2pPipelineController, P2pPipelineSteps}, + P2pPipelineArguments, +}; +use crate::{import::BlockImporter, pipeline::PipelineController}; +use anyhow::Context; +use futures::TryStreamExt; +use mc_db::{stream::BlockStreamConfig, MadaraBackend}; +use mc_p2p::{P2pCommands, PeerId}; +use mp_block::Header; +use mp_state_update::StateDiff; +use std::{iter, ops::Range, sync::Arc}; + +pub type StateDiffsSync = PipelineController>; +pub fn state_diffs_pipeline( + P2pPipelineArguments { backend, peer_set, p2p_commands, importer }: P2pPipelineArguments, + parallelization: usize, + batch_size: usize, +) -> StateDiffsSync { + PipelineController::new( + P2pPipelineController::new(peer_set, StateDiffsSyncSteps { backend, p2p_commands, importer }), + parallelization, + batch_size, + ) +} +pub struct StateDiffsSyncSteps { + backend: Arc, + p2p_commands: P2pCommands, + importer: Arc, +} + +impl P2pPipelineSteps for StateDiffsSyncSteps { + type InputItem = Header; + type SequentialStepInput = Vec; + type Output = Vec; + + async fn p2p_parallel_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Vec, + ) -> Result { + if input.iter().all(|i| i.state_diff_length == Some(0)) { + return Ok(iter::repeat(StateDiff::default()).take(input.len()).collect()); + } + + tracing::debug!("p2p state_diffs parallel step: {block_range:?}, peer_id: {peer_id}"); + let strm = self + .p2p_commands + .clone() + .make_state_diffs_stream( + peer_id, + BlockStreamConfig::default().with_block_range(block_range.clone()), + input.iter().map(|header| header.state_diff_length.unwrap_or_default() as _).collect::>(), + ) + .await; + tokio::pin!(strm); + + let mut state_diffs = vec![]; + for (block_n, header) in block_range.zip(input) { + let state_diff = strm.try_next().await?.ok_or(P2pError::peer_error("Expected to receive item"))?; + state_diffs.push(state_diff.clone()); + self.importer + .run_in_rayon_pool(move |importer| { + importer.verify_state_diff(block_n, &state_diff, &header, /* allow_pre_v0_13_2 */ false)?; + importer.save_state_diff(block_n, state_diff) + }) + .await? + } + + Ok(state_diffs) + } + + async fn p2p_sequential_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Self::SequentialStepInput, + ) -> Result { + tracing::debug!("p2p state_diffs sequential step: {block_range:?}, peer_id: {peer_id}"); + if let Some(block_n) = block_range.last() { + self.backend.head_status().state_diffs.set(Some(block_n)); + self.backend.save_head_status_to_db().context("Saving head status to db")?; + } + Ok(input) + } + + fn starting_block_n(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} diff --git a/crates/madara/client/sync2/src/p2p/transactions.rs b/crates/madara/client/sync2/src/p2p/transactions.rs new file mode 100644 index 000000000..58e4d00c3 --- /dev/null +++ b/crates/madara/client/sync2/src/p2p/transactions.rs @@ -0,0 +1,93 @@ +use super::{ + pipeline::{P2pError, P2pPipelineController, P2pPipelineSteps}, + P2pPipelineArguments, +}; +use crate::{import::BlockImporter, pipeline::PipelineController}; +use anyhow::Context; +use futures::TryStreamExt; +use mc_db::{stream::BlockStreamConfig, MadaraBackend}; +use mc_p2p::{P2pCommands, PeerId}; +use mp_block::Header; +use std::{ops::Range, sync::Arc}; + +pub type TransactionsSync = PipelineController>; +pub fn transactions_pipeline( + P2pPipelineArguments { backend, peer_set, p2p_commands, importer }: P2pPipelineArguments, + parallelization: usize, + batch_size: usize, +) -> TransactionsSync { + PipelineController::new( + P2pPipelineController::new(peer_set, TransactionsSyncSteps { backend, p2p_commands, importer }), + parallelization, + batch_size, + ) +} +pub struct TransactionsSyncSteps { + backend: Arc, + p2p_commands: P2pCommands, + importer: Arc, +} + +impl P2pPipelineSteps for TransactionsSyncSteps { + type InputItem = Header; + type SequentialStepInput = Vec
; + type Output = Vec
; + + async fn p2p_parallel_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Vec
, + ) -> Result { + if input.iter().all(|i| i.transaction_count == 0) { + return Ok(input); + } + + tracing::debug!("p2p transactions parallel step: {block_range:?}, peer_id: {peer_id}"); + let strm = self + .p2p_commands + .clone() + .make_transactions_stream( + peer_id, + BlockStreamConfig::default().with_block_range(block_range.clone()), + input.iter().map(|input| input.transaction_count as _).collect::>(), + ) + .await; + tokio::pin!(strm); + + for (block_n, header) in block_range.zip(input.iter().cloned()) { + let transactions = strm.try_next().await?.ok_or(P2pError::peer_error("Expected to receive item"))?; + self.importer + .run_in_rayon_pool(move |importer| { + importer.verify_transactions( + block_n, + &transactions, + &header, + /* allow_pre_v0_13_2 */ false, + )?; + importer.save_transactions(block_n, transactions) + }) + .await? + } + + Ok(input) + } + + async fn p2p_sequential_step( + self: Arc, + peer_id: PeerId, + block_range: Range, + input: Self::SequentialStepInput, + ) -> Result { + tracing::debug!("p2p transactions sequential step: {block_range:?}, peer_id: {peer_id}"); + if let Some(block_n) = block_range.last() { + self.backend.head_status().transactions.set(Some(block_n)); + self.backend.save_head_status_to_db().context("Saving head status to db")?; + } + Ok(input) + } + + fn starting_block_n(&self) -> Option { + self.backend.head_status().latest_full_block_n() + } +} diff --git a/crates/madara/client/sync2/src/pipeline.rs b/crates/madara/client/sync2/src/pipeline.rs new file mode 100644 index 000000000..5c0c3ece7 --- /dev/null +++ b/crates/madara/client/sync2/src/pipeline.rs @@ -0,0 +1,200 @@ +use futures::{ + future::{BoxFuture, OptionFuture}, + stream::FuturesOrdered, + Future, FutureExt, StreamExt, +}; +use std::{collections::VecDeque, fmt, ops::Range, sync::Arc}; + +struct RetryInput { + block_range: Range, + input: Vec, +} + +#[derive(Debug)] +pub enum ApplyOutcome { + Success(Output), + Retry, +} + +pub trait PipelineSteps: Sync + Send + 'static { + type InputItem: Send + Sync + Clone; + type SequentialStepInput: Send + Sync; + type Output: Send + Sync + Clone; + + fn parallel_step( + self: Arc, + block_range: Range, + input: Vec, + ) -> impl Future> + Send; + fn sequential_step( + self: Arc, + block_range: Range, + input: Self::SequentialStepInput, + ) -> impl Future>> + Send; + + fn starting_block_n(&self) -> Option; +} + +pub struct PipelineController { + steps: Arc, + queue: FuturesOrdered>, + parallelization: usize, + batch_size: usize, + applying: Option>, + next_inputs: VecDeque, + next_block_n_to_batch: u64, + last_applied_block_n: Option, +} + +type ParallelStepFuture = BoxFuture< + 'static, + anyhow::Result<(::SequentialStepInput, RetryInput<::InputItem>)>, +>; +type SequentialStepFuture = BoxFuture< + 'static, + anyhow::Result<(ApplyOutcome<::Output>, RetryInput<::InputItem>)>, +>; + +impl PipelineController { + pub fn new(steps: S, parallelization: usize, batch_size: usize) -> Self { + let starting_block_n = steps.starting_block_n(); + let next_input_block_n = starting_block_n.map(|block_n| block_n + 1).unwrap_or(/* next is genesis */ 0); + Self { + steps: Arc::new(steps), + queue: Default::default(), + parallelization, + batch_size, + applying: None, + next_inputs: VecDeque::with_capacity(2 * batch_size), + next_block_n_to_batch: next_input_block_n, + last_applied_block_n: starting_block_n, + } + } + + pub fn next_input_block_n(&self) -> u64 { + self.next_block_n_to_batch + self.next_inputs.len() as u64 + } + pub fn last_applied_block_n(&self) -> Option { + self.last_applied_block_n + } + pub fn input_batch_size(&self) -> usize { + self.batch_size + } + + pub fn can_schedule_more(&self) -> bool { + if self.queue.len() >= self.parallelization { + return false; + } + let slots_remaining = self.parallelization - self.queue.len(); + self.next_inputs.len() <= slots_remaining * self.batch_size + } + + pub fn is_empty(&self) -> bool { + self.applying.is_none() && self.queue.is_empty() && self.next_inputs.is_empty() + } + pub fn queue_len(&self) -> usize { + self.queue.len() + } + pub fn is_applying(&self) -> bool { + self.applying.is_some() + } + + fn make_parallel_step_future(&self, input: RetryInput) -> ParallelStepFuture { + let steps = Arc::clone(&self.steps); + async move { steps.parallel_step(input.block_range.clone(), input.input.clone()).await.map(|el| (el, input)) } + .boxed() + } + fn make_sequential_step_future( + &self, + input: S::SequentialStepInput, + retry_input: RetryInput, + ) -> SequentialStepFuture { + let steps = Arc::clone(&self.steps); + async move { steps.sequential_step(retry_input.block_range.clone(), input).await.map(|el| (el, retry_input)) } + .boxed() + } + + fn schedule_new_batch(&mut self) { + // make batch + let size = usize::min(self.next_inputs.len(), self.batch_size); + + let new_next_input_block_n = self.next_block_n_to_batch + size as u64; + let block_range = self.next_block_n_to_batch..new_next_input_block_n; + self.next_block_n_to_batch = new_next_input_block_n; + let input = self.next_inputs.drain(0..size).collect(); + self.queue.push_back(self.make_parallel_step_future(RetryInput { block_range, input })); + } + + pub fn push(&mut self, block_range: Range, input: impl IntoIterator) { + let next_input_block_n = self.next_input_block_n(); + // Skip items that we have already handled. + self.next_inputs + .extend(input.into_iter().zip(block_range).skip_while(|(_, n)| next_input_block_n < *n).map(|(v, _)| v)); + } + + pub async fn next(&mut self) -> Option, S::Output)>> { + loop { + while self.next_inputs.len() >= self.batch_size && self.queue.len() <= self.parallelization { + // Prefer making full batches. + self.schedule_new_batch(); + } + if self.queue.is_empty() && !self.next_inputs.is_empty() { + // We make a smaller batch when we have nothing to do, to ensure progress. + self.schedule_new_batch(); + } + + tokio::select! { + Some(res) = OptionFuture::from(self.applying.as_mut()) => { + self.applying = None; + match res { + Err(err) => return Some(Err(err)), + Ok((ApplyOutcome::Success(out), retry_input)) => { + if let Some(last) = retry_input.block_range.clone().last() { + self.last_applied_block_n = Some(last); + } + return Some(Ok((retry_input.block_range, out))); + } + Ok((ApplyOutcome::Retry, retry_input)) => self.queue.push_front(self.make_parallel_step_future(retry_input)), + } + } + Some(res) = self.queue.next(), if self.applying.is_none() => { + match res { + Ok((input, retry_input)) => { + self.applying = Some(self.make_sequential_step_future(input, retry_input)); + } + Err(err) => return Some(Err(err)), + } + } + else => return None, + } + } + } +} + +pub struct PipelineStatus { + pub jobs: usize, + pub applying: bool, + pub latest_applied: Option, +} + +impl PipelineController { + pub fn status(&self) -> PipelineStatus { + PipelineStatus { + jobs: self.queue_len(), + applying: self.is_applying(), + latest_applied: self.last_applied_block_n(), + } + } +} + +impl fmt::Display for PipelineStatus { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use crate::util::fmt_option; + + write!(f, "{} [{}", fmt_option(self.latest_applied, "N"), self.jobs)?; + if self.applying { + write!(f, "+")?; + } + write!(f, "]") + } +} diff --git a/crates/madara/client/sync2/src/sync.rs b/crates/madara/client/sync2/src/sync.rs new file mode 100644 index 000000000..49a128521 --- /dev/null +++ b/crates/madara/client/sync2/src/sync.rs @@ -0,0 +1,187 @@ +use crate::metrics::SyncMetrics; +use futures::{ + future::{BoxFuture, OptionFuture}, + Future, FutureExt, +}; +use mc_eth::state_update::{L1HeadReceiver, L1StateUpdate}; +use std::{sync::Arc, time::Duration}; +use tokio::time::Instant; + +pub trait ForwardPipeline { + fn run( + &mut self, + target_block_n: u64, + metrics: &mut SyncMetrics, + ) -> impl Future> + Send; + fn next_input_block_n(&self) -> u64; + fn show_status(&self); + /// Return false when no work can be done. + fn is_empty(&self) -> bool; + fn latest_block(&self) -> Option; +} + +pub trait Probe { + /// Returns the new highest known block. + fn forward_probe( + self: Arc, + next_block_n: u64, + ) -> impl Future>> + Send + 'static; +} + +pub struct SyncControllerConfig { + pub l1_head_recv: L1HeadReceiver, + pub stop_at_block_n: Option, + pub stop_on_sync: bool, +} + +pub struct SyncController { + forward_pipeline: P, + probe: Option>, + config: SyncControllerConfig, + current_l1_head: Option, + current_probe_future: Option>>>, + probe_highest_known_block: Option, + probe_wait_deadline: Option, + sync_metrics: SyncMetrics, +} + +/// Avoid spamming the probe. +const PROBE_WAIT_DELAY: Duration = Duration::from_secs(2); +impl SyncController { + pub fn new(forward_pipeline: P, probe: Option>, config: SyncControllerConfig) -> Self { + Self { + sync_metrics: SyncMetrics::register(forward_pipeline.next_input_block_n()), + forward_pipeline, + probe, + config, + current_l1_head: None, + current_probe_future: None, + probe_highest_known_block: Default::default(), + probe_wait_deadline: None, + } + } + + pub async fn run(&mut self, mut ctx: mp_utils::service::ServiceContext) -> anyhow::Result<()> { + let mut interval = tokio::time::interval(Duration::from_secs(3)); + interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + loop { + tokio::select! { + _ = ctx.cancelled() => break Ok(()), + _ = interval.tick() => self.show_status(), + res = self.run_inner() => { + res?; + self.show_status(); + if self.config.stop_on_sync { + tracing::info!("🌐 Reached stop-on-sync condition, shutting down node..."); + ctx.cancel_global(); + } else { + tracing::info!("🌐 Sync process ended"); + } + break Ok(()) + } + } + } + } + + fn target_height(&self) -> Option { + fn aggregate_options(a: Option, b: Option, f: impl FnOnce(u64, u64) -> u64) -> Option { + match (a, b) { + (None, None) => None, + (None, Some(b)) => Some(b), + (Some(a), None) => Some(a), + (Some(a), Some(b)) => Some(f(a, b)), + } + } + + let mut target_block = self.current_l1_head.as_ref().map(|h| h.block_number); + target_block = aggregate_options(target_block, self.probe_highest_known_block, u64::max); + + // Bound by stop_at_block_n + + aggregate_options(target_block, self.config.stop_at_block_n, u64::min) + } + + async fn run_inner(&mut self) -> anyhow::Result<()> { + loop { + if self.forward_pipeline.is_empty() + && self + .config + .stop_at_block_n + .is_some_and(|stop_at| self.forward_pipeline.next_input_block_n() > stop_at) + { + // End condition + break Ok(()); + } + let target_height = self.target_height(); + + let can_run_pipeline = !self.forward_pipeline.is_empty() + || target_height.is_some_and(|b| b >= self.forward_pipeline.next_input_block_n()); + tracing::trace!( + "can run {:?} {:?} {}", + can_run_pipeline, + target_height, + self.forward_pipeline.next_input_block_n() + ); + + if let Some(probe) = &self.probe { + tracing::trace!("run inner {:?} {:?}", self.forward_pipeline.next_input_block_n(), target_height); + if self.current_probe_future.is_none() && !can_run_pipeline { + let fut = probe.clone().forward_probe(self.forward_pipeline.next_input_block_n()); + let delay = self.probe_wait_deadline; + + self.current_probe_future = Some( + async move { + if let Some(deadline) = delay { + tokio::time::sleep_until(deadline).await; + } + fut.await + } + .boxed(), + ); + } + } + + tokio::select! { + Ok(()) = self.config.l1_head_recv.changed() => { + self.current_l1_head = self.config.l1_head_recv.borrow_and_update().clone(); + } + Some(res) = OptionFuture::from(self.current_probe_future.as_mut()) => { + self.current_probe_future = None; + self.probe_wait_deadline = None; + let probe_new_highest_block = res?; + // Only delay the probe when it did not return any new block. + if self.probe_highest_known_block == probe_new_highest_block { + self.probe_wait_deadline = Some(Instant::now() + PROBE_WAIT_DELAY); + } + self.probe_highest_known_block = probe_new_highest_block; + tracing::trace!("probe result {:?}", self.probe_highest_known_block); + } + Some(res) = OptionFuture::from( + target_height.filter(|_| can_run_pipeline) + .map(|target| self.forward_pipeline.run(target, &mut self.sync_metrics)) + ) => + { + res?; + } + else => break Ok(()), + } + } + } + + fn show_status(&self) { + use crate::util::fmt_option; + + let latest_block = self.forward_pipeline.latest_block(); + let throughput_sec = self.sync_metrics.counter.get_throughput(); + let target_height = self.target_height(); + self.forward_pipeline.show_status(); + + // fmt_option will unwrap the Option or else show the given string + + tracing::info!( + "🔗 Sync is at {}/{} [{throughput_sec:.2} blocks/s]", + fmt_option(latest_block, "N"), + fmt_option(target_height, "?") + ); + } +} diff --git a/crates/madara/client/sync2/src/util.rs b/crates/madara/client/sync2/src/util.rs new file mode 100644 index 000000000..b39e048cc --- /dev/null +++ b/crates/madara/client/sync2/src/util.rs @@ -0,0 +1,39 @@ +use futures::Future; +use std::{fmt, pin::Pin, task}; +use tokio::task::JoinHandle; + +pub fn fmt_option(opt: Option, or_else: impl fmt::Display) -> impl fmt::Display { + DisplayFromFn(move |f| if let Some(val) = &opt { val.fmt(f) } else { or_else.fmt(f) }) +} + +pub struct DisplayFromFn) -> fmt::Result>(F); +impl) -> fmt::Result> fmt::Display for DisplayFromFn { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (self.0)(f) + } +} + +pub struct AbortOnDrop(JoinHandle); +impl AbortOnDrop { + #[track_caller] // forward the tokio track_caller + pub fn spawn + Send + 'static>(future: F) -> Self { + Self(tokio::spawn(future)) + } +} +impl Drop for AbortOnDrop { + fn drop(&mut self) { + self.0.abort() + } +} +impl Future for AbortOnDrop { + type Output = T; + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll { + // Panic: the task is never aborted, except on drop in which case it cannot be polled again. + Pin::new(&mut self.get_mut().0).poll(cx).map(|r| r.expect("Join error")) + } +} +impl From> for AbortOnDrop { + fn from(value: JoinHandle) -> Self { + Self(value) + } +} diff --git a/crates/madara/node/Cargo.toml b/crates/madara/node/Cargo.toml index 4b1e7c4bf..7317935a5 100644 --- a/crates/madara/node/Cargo.toml +++ b/crates/madara/node/Cargo.toml @@ -22,7 +22,6 @@ name = "madara" # Madara mc-analytics = { workspace = true } -mc-block-import = { workspace = true } mc-block-production = { workspace = true } mc-db = { workspace = true } mc-devnet = { workspace = true } @@ -30,8 +29,9 @@ mc-eth = { workspace = true } mc-gateway-client = { workspace = true } mc-gateway-server = { workspace = true } mc-mempool = { workspace = true } +mc-p2p = { workspace = true } mc-rpc = { workspace = true } -mc-sync = { workspace = true } +mc-sync2 = { workspace = true } mc-telemetry = { workspace = true } mp-block = { workspace = true } mp-chain-config = { workspace = true } @@ -52,6 +52,7 @@ futures = { workspace = true, features = ["thread-pool"] } http.workspace = true hyper = { version = "0.14", features = ["server"] } jsonrpsee.workspace = true +multiaddr.workspace = true rand.workspace = true rayon.workspace = true reqwest.workspace = true @@ -83,4 +84,3 @@ tracing-subscriber = { workspace = true, features = ["env-filter"] } [features] default = [] -sound = ["mc-sync/m"] diff --git a/crates/madara/node/src/cli/chain_config_overrides.rs b/crates/madara/node/src/cli/chain_config_overrides.rs index 61808505e..335375672 100644 --- a/crates/madara/node/src/cli/chain_config_overrides.rs +++ b/crates/madara/node/src/cli/chain_config_overrides.rs @@ -107,6 +107,7 @@ pub struct ChainConfigOverridesInner { #[serde(skip_serializing)] #[serde(deserialize_with = "deserialize_private_key")] pub private_key: ZeroingPrivateKey, + pub p2p_bootstrap_nodes: Vec, pub mempool_tx_limit: usize, pub mempool_declare_tx_limit: usize, #[serde(deserialize_with = "deserialize_optional_duration", serialize_with = "serialize_optional_duration")] @@ -131,6 +132,7 @@ impl ChainConfigOverrideParams { eth_core_contract_address: chain_config.eth_core_contract_address, eth_gps_statement_verifier: chain_config.eth_gps_statement_verifier, private_key: chain_config.private_key, + p2p_bootstrap_nodes: chain_config.p2p_bootstrap_nodes, mempool_tx_limit: chain_config.mempool_tx_limit, mempool_declare_tx_limit: chain_config.mempool_declare_tx_limit, mempool_tx_max_age: chain_config.mempool_tx_max_age, @@ -185,6 +187,7 @@ impl ChainConfigOverrideParams { versioned_constants, eth_gps_statement_verifier: chain_config_overrides.eth_gps_statement_verifier, private_key: chain_config_overrides.private_key, + p2p_bootstrap_nodes: chain_config_overrides.p2p_bootstrap_nodes, mempool_tx_limit: chain_config_overrides.mempool_tx_limit, mempool_declare_tx_limit: chain_config_overrides.mempool_declare_tx_limit, mempool_tx_max_age: chain_config_overrides.mempool_tx_max_age, diff --git a/crates/madara/node/src/cli/l2.rs b/crates/madara/node/src/cli/l2.rs index 3765f76db..d7ec7b9fb 100644 --- a/crates/madara/node/src/cli/l2.rs +++ b/crates/madara/node/src/cli/l2.rs @@ -1,11 +1,10 @@ -use std::{sync::Arc, time::Duration}; - -use mc_sync::fetch::fetchers::WarpUpdateConfig; +use anyhow::Context; +use http::HeaderName; +use http::HeaderValue; +use mc_gateway_client::GatewayProvider; use mp_chain_config::ChainConfig; -use starknet_api::core::ChainId; - -use mc_sync::fetch::fetchers::FetchConfig; -use mp_utils::parsers::{parse_duration, parse_url}; +use mp_utils::parsers::parse_url; +use std::sync::Arc; use url::Url; use super::FGW_DEFAULT_PORT; @@ -17,15 +16,15 @@ pub struct L2SyncParams { #[clap(env = "MADARA_SYNC_DISABLED", long, alias = "no-sync")] pub l2_sync_disabled: bool, - /// The block you want to start syncing from. This will most probably break your database. - #[clap(env = "MADARA_UNSAFE_STARTING_BLOCK", long, value_name = "BLOCK NUMBER")] - pub unsafe_starting_block: Option, - - /// Disable state root verification. When importing a block, the state root verification is the most expensive operation. - /// Disabling it will mean the sync service will have a huge speed-up, at a security cost - // TODO(docs): explain the security cost - #[clap(env = "MADARA_DISABLE_ROOT", long)] - pub disable_root: bool, + // /// The block you want to start syncing from. This will most probably break your database. + // #[clap(env = "MADARA_UNSAFE_STARTING_BLOCK", long, value_name = "BLOCK NUMBER")] + // pub unsafe_starting_block: Option, + /// Disable the global tries computation. + /// When importing a block, the state root computation is the most expensive operation. + /// Disabling it will mean a big speed-up in syncing speed, but storage proofs will be + /// unavailable, and producing blocks will fail to compute the state root. + #[clap(env = "MADARA_DISABLE_TRIES", long)] + pub disable_tries: bool, /// Gateway api key to avoid rate limiting (optional). #[clap(env = "MADARA_GATEWAY_KEY", long, value_name = "API KEY")] @@ -51,108 +50,72 @@ pub struct L2SyncParams { #[arg(env = "MADARA_WARP_UPDATE_SHUTDOWN_RECEIVER", long, default_value_t = false)] pub warp_update_shutdown_receiver: bool, - /// Polling interval, in seconds. This only affects the sync service once it has caught up with the blockchain tip. - #[clap( - env = "MADARA_SYNC_POLLING_INTERVAL", - long, - value_parser = parse_duration, - default_value = "4s", - value_name = "SYNC POLLING INTERVAL", - help = "Set the sync polling interval (e.g., '4s', '100ms', '1min')" - )] - pub sync_polling_interval: Duration, - - /// Pending block polling interval, in seconds. This only affects the sync service once it has caught up with the blockchain tip. - #[clap( - env = "MADARA_PENDING_BLOCK_POLL_INTERVAL", - long, - value_parser = parse_duration, - default_value = "2s", - value_name = "PENDING BLOCK POLL INTERVAL", - help = "Set the pending block poll interval (e.g., '2s', '500ms', '30s')" - )] - pub pending_block_poll_interval: Duration, - - /// Disable sync polling. This currently means that the sync process will not import any more block once it has caught up with the - /// blockchain tip. - #[clap(env = "MADARA_NO_SYNC_POLLING", long)] - pub no_sync_polling: bool, - - /// Number of blocks to sync. May be useful for benchmarking the sync service. - #[clap(env = "MADARA_N_BLOCKS_TO_SYNC", long, value_name = "NUMBER OF BLOCKS")] - pub n_blocks_to_sync: Option, + /// Stop sync at a specific block_n. May be useful for benchmarking the sync service. + #[clap(env = "MADARA_N_BLOCKS_TO_SYNC", long, value_name = "BLOCK NUMBER")] + pub sync_stop_at: Option, /// Gracefully shutdown Madara once it has finished synchronizing all /// blocks. This can either be once the node has caught up with the head of - /// the chain or when it has synced as many blocks as specified by - /// --n-blocks-to-sync. + /// the chain or when it has synced to the target height by using + /// `--sync-stop-at `. #[clap(env = "MADARA_STOP_ON_SYNC", long, default_value_t = false)] pub stop_on_sync: bool, - /// Periodically create a backup, for debugging purposes. Use it with `--backup-dir `. - #[clap(env = "MADARA_BACKUP_EVERY_N_BLOCKS", long, value_name = "NUMBER OF BLOCKS")] - pub backup_every_n_blocks: Option, - - /// Periodically flushes the database from ram to disk based on the number - /// of blocks synchronized since the last flush. You can set this to a - /// higher number depending on how fast your machine is at synchronizing - /// blocks and how much ram it has available. - /// - /// Be aware that blocks might still be flushed to db earlier based on the - /// value of --flush-every-n-seconds. - /// - /// Note that keeping this value high could lead to blocks being stored in - /// ram for longer periods of time before they are written to disk. This - /// might be an issue for chains which synchronize slowly. - #[clap( - env = "MADARA_FLUSH_EVERY_N_BLOCKS", - value_name = "FLUSH EVERY N BLOCKS", - long, - value_parser = clap::value_parser!(u64).range(..=10_000), - default_value_t = 1_000 - )] - pub flush_every_n_blocks: u64, - - /// Periodically flushes the database from ram to disk based on the elapsed - /// time since the last flush. You can set this to a higher number - /// depending on how fast your machine is at synchronizing blocks and how - /// much ram it has available. - /// - /// Be aware that blocks might still be flushed to db earlier based on the - /// value of --flush-every-n-blocks. - /// - /// Note that keeping this value high could lead to blocks being stored in - /// ram for longer periods of time before they are written to disk. This - /// might be an issue for chains which synchronize slowly. - #[clap( - env = "MADARA_FLUSH_EVERY_N_BLOCKS", - value_name = "FLUSH EVERY N BLOCKS", - long, - value_parser = clap::value_parser!(u64).range(..=3_600), - default_value_t = 5 - )] - pub flush_every_n_seconds: u64, - - /// Number of blocks to fetch in parallel. This only affects sync time, and - /// does not affect the node once it has reached the tip of the chain. - /// Increasing this can lead to lower sync times at the cost of higher cpu - /// and ram utilization. - #[clap( - env = "MADARA_SYNC_PARALLELISM", - long, value_name = "SYNC PARALLELISM", - default_value_t = 10, - value_parser = clap::value_parser!(u8).range(1..) - )] - pub sync_parallelism: u8, + // /// Periodically create a backup, for debugging purposes. Use it with `--backup-dir `. + // #[clap(env = "MADARA_BACKUP_EVERY_N_BLOCKS", long, value_name = "NUMBER OF BLOCKS")] + // pub backup_every_n_blocks: Option, + + // /// Periodically flushes the database from ram to disk based on the number + // /// of blocks synchronized since the last flush. You can set this to a + // /// higher number depending on how fast your machine is at synchronizing + // /// blocks and how much ram it has available. + // /// + // /// Be aware that blocks might still be flushed to db earlier based on the + // /// value of --flush-every-n-seconds. + // /// + // /// Note that keeping this value high could lead to blocks being stored in + // /// ram for longer periods of time before they are written to disk. This + // /// might be an issue for chains which synchronize slowly. + // #[clap( + // env = "MADARA_FLUSH_EVERY_N_BLOCKS", + // value_name = "FLUSH EVERY N BLOCKS", + // long, + // value_parser = clap::value_parser!(u64).range(..=10_000), + // default_value_t = 1_000 + // )] + // pub flush_every_n_blocks: u64, + + // /// Periodically flushes the database from ram to disk based on the elapsed + // /// time since the last flush. You can set this to a higher number + // /// depending on how fast your machine is at synchronizing blocks and how + // /// much ram it has available. + // /// + // /// Be aware that blocks might still be flushed to db earlier based on the + // /// value of --flush-every-n-blocks. + // /// + // /// Note that keeping this value high could lead to blocks being stored in + // /// ram for longer periods of time before they are written to disk. This + // /// might be an issue for chains which synchronize slowly. + // #[clap( + // env = "MADARA_FLUSH_EVERY_N_BLOCKS", + // value_name = "FLUSH EVERY N BLOCKS", + // long, + // value_parser = clap::value_parser!(u64).range(..=3_600), + // default_value_t = 5 + // )] + // pub flush_every_n_seconds: u64, + #[clap(env = "MADARA_P2P_SYNC", long)] + pub p2p_sync: bool, + // // Documentation needs to be kept in sync with [`mp_block_import::BlockValidationContext::compute_v0_13_2_hashes`]. + // /// UNSTABLE: Used for experimental p2p support. When p2p sync will be fully implemented, this field will go away, + // /// and we will always compute v0.13.2 hashes. However, we can't verify the old pre-v0.13.2 blocks yet during sync, + // /// so this field bridges the gap. When set, we will always trust the integrity of pre-v0.13.2 blocks during sync. + // #[clap(long)] + // pub compute_v0_13_2_hashes: bool, } impl L2SyncParams { - pub fn block_fetch_config( - &self, - chain_id: ChainId, - chain_config: Arc, - warp_update: Option, - ) -> FetchConfig { + pub fn create_feeder_client(&self, chain_config: Arc) -> anyhow::Result> { let (gateway, feeder_gateway) = match &self.gateway_url { Some(url) => ( url.join("/gateway/").expect("Error parsing url"), @@ -161,21 +124,15 @@ impl L2SyncParams { None => (chain_config.gateway_url.clone(), chain_config.feeder_gateway_url.clone()), }; - let polling = if self.no_sync_polling { None } else { Some(self.sync_polling_interval) }; - - FetchConfig { - gateway, - feeder_gateway, - chain_id, - verify: !self.disable_root, - api_key: self.gateway_key.clone(), - sync_polling_interval: polling, - n_blocks_to_sync: self.n_blocks_to_sync, - flush_every_n_blocks: self.flush_every_n_blocks, - flush_every_n_seconds: self.flush_every_n_seconds, - stop_on_sync: self.stop_on_sync, - sync_parallelism: self.sync_parallelism, - warp_update, + let mut client = GatewayProvider::new(gateway, feeder_gateway); + + if let Some(api_key) = &self.gateway_key { + client.add_header( + HeaderName::from_static("x-throttling-bypass"), + HeaderValue::from_str(api_key).with_context(|| "Invalid API key format")?, + ) } + + Ok(Arc::new(client)) } } diff --git a/crates/madara/node/src/cli/mod.rs b/crates/madara/node/src/cli/mod.rs index 6e0d46f4b..9f6b56825 100644 --- a/crates/madara/node/src/cli/mod.rs +++ b/crates/madara/node/src/cli/mod.rs @@ -1,3 +1,11 @@ +use anyhow::Context; +use clap::ArgGroup; +use l2::L2SyncParams; +use mp_chain_config::ChainConfig; +use std::path::PathBuf; +use std::str::FromStr; +use std::sync::Arc; + pub mod analytics; pub mod block_production; pub mod chain_config_overrides; @@ -5,25 +13,20 @@ pub mod db; pub mod gateway; pub mod l1; pub mod l2; +pub mod p2p; pub mod rpc; pub mod telemetry; -use crate::cli::l1::L1SyncParams; -use analytics::AnalyticsParams; -use anyhow::Context; + +pub use analytics::*; pub use block_production::*; pub use chain_config_overrides::*; pub use db::*; pub use gateway::*; -pub use l2::*; +pub use l1::*; +pub use p2p::*; pub use rpc::*; -use std::str::FromStr; pub use telemetry::*; -use clap::ArgGroup; -use mp_chain_config::ChainConfig; -use std::path::PathBuf; -use std::sync::Arc; - /// Combines multiple cli args into a single easy to use preset /// /// Some args configurations are getting pretty lengthy and easy to get wrong. @@ -171,6 +174,10 @@ pub struct RunCmd { #[clap(flatten)] pub rpc_params: RpcParams, + #[allow(missing_docs)] + #[clap(flatten)] + pub p2p_params: P2pParams, + #[allow(missing_docs)] #[clap(flatten)] pub block_production_params: BlockProductionParams, diff --git a/crates/madara/node/src/cli/p2p.rs b/crates/madara/node/src/cli/p2p.rs new file mode 100644 index 000000000..05bad447c --- /dev/null +++ b/crates/madara/node/src/cli/p2p.rs @@ -0,0 +1,26 @@ +use std::path::PathBuf; + +#[derive(Clone, Debug, clap::Args)] +pub struct P2pParams { + /// Enable the p2p service. + #[arg(env = "MADARA_P2P", long)] + pub p2p: bool, + + /// Port for peer-to-peer. By default, it will ask the os for an unused port. + #[arg(env = "MADARA_P2P_PORT", long)] + pub p2p_port: Option, + + /// Peer-to-peer identity file. By default, we generate a new one everytime the node starts. + /// + /// Use `--p2p-save-identity` with this argument to generate and save the identity file + /// if it is not present. If the `--p2p-save-identity` argument is not set and the identity file + /// does not exist, the node will exit with an error. + /// + /// Usage example: `--p2p-identity-file identity.json --p2p-save-identity`. + #[arg(env = "MADARA_P2P_IDENTITY_FILE", long)] + pub p2p_identity_file: Option, + + /// Use with `--p2p-identity-file`. + #[arg(env = "MADARA_P2P_SAVE_IDENTITY", long)] + pub p2p_save_identity: bool, +} diff --git a/crates/madara/node/src/cli/rpc.rs b/crates/madara/node/src/cli/rpc.rs index 07c7ecbe3..75d975373 100644 --- a/crates/madara/node/src/cli/rpc.rs +++ b/crates/madara/node/src/cli/rpc.rs @@ -57,7 +57,7 @@ impl FromStr for Cors { pub struct RpcParams { /// Disables the user RPC endpoint. This includes all methods which are part /// of the official starknet specs. - #[arg(env = "MADARA_RPC_DISABLE", long, default_value_t = false)] + #[arg(env = "MADARA_RPC_DISABLE", long, default_value_t = false, alias = "no-rpc")] pub rpc_disable: bool, /// Exposes the user RPC endpoint on address 0.0.0.0. This generally means diff --git a/crates/madara/node/src/cli/telemetry.rs b/crates/madara/node/src/cli/telemetry.rs index aa1927d06..42d790582 100644 --- a/crates/madara/node/src/cli/telemetry.rs +++ b/crates/madara/node/src/cli/telemetry.rs @@ -4,7 +4,7 @@ use clap::Args; #[derive(Debug, Clone, Args)] pub struct TelemetryParams { /// Enable connecting to the Madara telemetry server. - #[arg(env = "MADARA_TELEMETRY", long, alias = "telemetry")] + #[arg(env = "MADARA_TELEMETRY", long)] pub telemetry: bool, /// The URL of the telemetry server. diff --git a/crates/madara/node/src/main.rs b/crates/madara/node/src/main.rs index d71b89073..0302056c8 100644 --- a/crates/madara/node/src/main.rs +++ b/crates/madara/node/src/main.rs @@ -10,16 +10,14 @@ use clap::Parser; use cli::RunCmd; use http::{HeaderName, HeaderValue}; use mc_analytics::Analytics; -use mc_block_import::BlockImporter; use mc_db::{DatabaseService, TrieLogConfig}; use mc_gateway_client::GatewayProvider; use mc_mempool::{GasPriceProvider, L1DataProvider, Mempool, MempoolLimits}; use mc_rpc::providers::{AddTransactionProvider, ForwardToProvider, MempoolAddTxProvider}; -use mc_sync::fetch::fetchers::WarpUpdateConfig; use mc_telemetry::{SysInfo, TelemetryService}; use mp_oracle::pragma::PragmaOracleBuilder; use mp_utils::service::{MadaraServiceId, ServiceMonitor}; -use service::{BlockProductionService, GatewayService, L1SyncService, L2SyncService, RpcService}; +use service::{BlockProductionService, GatewayService, L1SyncService, P2pService, RpcService, SyncService}; use starknet_api::core::ChainId; use std::sync::Arc; @@ -163,6 +161,8 @@ async fn main() -> anyhow::Result<()> { mempool.load_txs_from_db().context("Loading mempool transactions")?; let mempool = Arc::new(mempool); + let (l1_head_snd, l1_head_recv) = tokio::sync::watch::channel(None); + let service_l1_sync = L1SyncService::new( &run_cmd.l1_sync_params, &service_db, @@ -172,61 +172,62 @@ async fn main() -> anyhow::Result<()> { run_cmd.is_sequencer(), run_cmd.is_devnet(), Arc::clone(&mempool), + l1_head_snd, ) .await .context("Initializing the l1 sync service")?; - // L2 Sync - - let importer = Arc::new( - BlockImporter::new(Arc::clone(service_db.backend()), run_cmd.l2_sync_params.unsafe_starting_block) - .context("Initializing importer service")?, - ); - - let warp_update = if run_cmd.args_preset.warp_update_receiver { - let mut deferred_service_start = vec![]; - let mut deferred_service_stop = vec![]; - - if !run_cmd.rpc_params.rpc_disable { - deferred_service_start.push(MadaraServiceId::RpcUser); - } - - if run_cmd.rpc_params.rpc_admin { - deferred_service_start.push(MadaraServiceId::RpcAdmin); - } - - if run_cmd.gateway_params.feeder_gateway_enable { - deferred_service_start.push(MadaraServiceId::Gateway); - } - - if run_cmd.telemetry_params.telemetry { - deferred_service_start.push(MadaraServiceId::Telemetry); - } + // P2p - if run_cmd.is_sequencer() { - deferred_service_start.push(MadaraServiceId::BlockProduction); - deferred_service_stop.push(MadaraServiceId::L2Sync); - } + let mut service_p2p = + P2pService::new(run_cmd.p2p_params.clone(), &service_db).await.context("Initializing p2p service")?; - Some(WarpUpdateConfig { - warp_update_port_rpc: run_cmd.l2_sync_params.warp_update_port_rpc, - warp_update_port_fgw: run_cmd.l2_sync_params.warp_update_port_fgw, - warp_update_shutdown_sender: run_cmd.l2_sync_params.warp_update_shutdown_sender, - warp_update_shutdown_receiver: run_cmd.l2_sync_params.warp_update_shutdown_receiver, - deferred_service_start, - deferred_service_stop, - }) - } else { - None - }; + // L2 Sync - let service_l2_sync = L2SyncService::new( + // let _warp_update = if run_cmd.args_preset.warp_update_receiver { + // let mut deferred_service_start = vec![]; + // let mut deferred_service_stop = vec![]; + + // if !run_cmd.rpc_params.rpc_disable { + // deferred_service_start.push(MadaraServiceId::RpcUser); + // } + + // if run_cmd.rpc_params.rpc_admin { + // deferred_service_start.push(MadaraServiceId::RpcAdmin); + // } + + // if run_cmd.gateway_params.feeder_gateway_enable { + // deferred_service_start.push(MadaraServiceId::Gateway); + // } + + // if run_cmd.telemetry_params.telemetry { + // deferred_service_start.push(MadaraServiceId::Telemetry); + // } + + // if run_cmd.is_sequencer() { + // deferred_service_start.push(MadaraServiceId::BlockProduction); + // deferred_service_stop.push(MadaraServiceId::L2Sync); + // } + + // Some(WarpUpdateConfig { + // warp_update_port_rpc: run_cmd.l2_sync_params.warp_update_port_rpc, + // warp_update_port_fgw: run_cmd.l2_sync_params.warp_update_port_fgw, + // warp_update_shutdown_sender: run_cmd.l2_sync_params.warp_update_shutdown_sender, + // warp_update_shutdown_receiver: run_cmd.l2_sync_params.warp_update_shutdown_receiver, + // deferred_service_start, + // deferred_service_stop, + // }) + // } else { + // None + // }; + + let service_l2_sync = SyncService::new( &run_cmd.l2_sync_params, - Arc::clone(&chain_config), - &service_db, - importer, - service_telemetry.new_handle(), - warp_update, + service_db.backend(), + service_p2p.commands(), + l1_head_recv, + // service_telemetry.new_handle(), + // warp_update, ) .await .context("Initializing sync service")?; @@ -243,15 +244,10 @@ async fn main() -> anyhow::Result<()> { // Block production - let importer = Arc::new( - BlockImporter::new(Arc::clone(service_db.backend()), run_cmd.l2_sync_params.unsafe_starting_block) - .context("Initializing importer service")?, - ); let service_block_production = BlockProductionService::new( &run_cmd.block_production_params, &service_db, Arc::clone(&mempool), - importer, Arc::clone(&l1_data_provider), )?; @@ -301,6 +297,7 @@ async fn main() -> anyhow::Result<()> { let app = ServiceMonitor::default() .with(service_db)? .with(service_l1_sync)? + .with(service_p2p)? .with(service_l2_sync)? .with(service_block_production)? .with(service_rpc_user)? @@ -321,6 +318,10 @@ async fn main() -> anyhow::Result<()> { app.activate(MadaraServiceId::L1Sync); } + if run_cmd.p2p_params.p2p { + app.activate(MadaraServiceId::P2p); + } + if warp_update_receiver { app.activate(MadaraServiceId::L2Sync); } else if run_cmd.is_sequencer() { diff --git a/crates/madara/node/src/service/block_production.rs b/crates/madara/node/src/service/block_production.rs index 5f6146e64..786db89ed 100644 --- a/crates/madara/node/src/service/block_production.rs +++ b/crates/madara/node/src/service/block_production.rs @@ -1,6 +1,5 @@ use crate::cli::block_production::BlockProductionParams; use anyhow::Context; -use mc_block_import::{BlockImporter, BlockValidationContext}; use mc_block_production::{metrics::BlockProductionMetrics, BlockProductionTask}; use mc_db::{DatabaseService, MadaraBackend}; use mc_devnet::{ChainGenesisDescription, DevnetKeys}; @@ -10,7 +9,6 @@ use std::{io::Write, sync::Arc}; pub struct BlockProductionService { backend: Arc, - block_import: Arc, mempool: Arc, metrics: Arc, l1_data_provider: Arc, @@ -23,7 +21,6 @@ impl BlockProductionService { config: &BlockProductionParams, db_service: &DatabaseService, mempool: Arc, - block_import: Arc, l1_data_provider: Arc, ) -> anyhow::Result { let metrics = Arc::new(BlockProductionMetrics::register()); @@ -33,7 +30,6 @@ impl BlockProductionService { l1_data_provider, mempool, metrics, - block_import, n_devnet_contracts: config.devnet_contracts, }) } @@ -44,16 +40,14 @@ impl Service for BlockProductionService { // TODO(cchudant,2024-07-30): special threading requirements for the block production task #[tracing::instrument(skip(self, runner), fields(module = "BlockProductionService"))] async fn start<'a>(&mut self, runner: ServiceRunner<'a>) -> anyhow::Result<()> { - let Self { backend, l1_data_provider, mempool, metrics, block_import, .. } = self; + let Self { backend, l1_data_provider, mempool, metrics, .. } = self; let block_production_task = BlockProductionTask::new( Arc::clone(backend), - Arc::clone(block_import), Arc::clone(mempool), Arc::clone(metrics), Arc::clone(l1_data_provider), - ) - .await?; + )?; runner.service_loop(move |ctx| block_production_task.block_production_task(ctx)); @@ -75,7 +69,7 @@ impl BlockProductionService { /// called on node startup even if sequencer block production is not yet /// enabled. This happens during warp updates on a local sequencer. pub async fn setup_devnet(&self) -> anyhow::Result<()> { - let Self { backend, n_devnet_contracts, block_import, .. } = self; + let Self { backend, n_devnet_contracts, .. } = self; let keys = if backend.get_latest_block_n().context("Getting the latest block number in db")?.is_none() { // deploy devnet genesis @@ -86,18 +80,8 @@ impl BlockProductionService { let contracts = genesis_config.add_devnet_contracts(*n_devnet_contracts).context("Failed to add devnet contracts")?; - let genesis_block = - genesis_config.build(backend.chain_config()).context("Building genesis block from devnet config")?; - - block_import - .add_block( - genesis_block, - BlockValidationContext::new(backend.chain_config().chain_id.clone()).trust_class_hashes(true), - ) - .await - .context("Importing devnet genesis block")?; - - contracts.save_to_db(backend).context("Saving predeployed devnet contract keys to database")?; + // Deploy genesis block + genesis_config.build_and_store(backend).context("Building and storing genesis block")?; contracts } else { diff --git a/crates/madara/node/src/service/l1.rs b/crates/madara/node/src/service/l1.rs index e305d741c..f345d5adf 100644 --- a/crates/madara/node/src/service/l1.rs +++ b/crates/madara/node/src/service/l1.rs @@ -3,6 +3,7 @@ use alloy::primitives::Address; use anyhow::Context; use mc_db::{DatabaseService, MadaraBackend}; use mc_eth::client::{EthereumClient, L1BlockMetrics}; +use mc_eth::state_update::L1HeadSender; use mc_mempool::{GasPriceProvider, Mempool}; use mp_block::H160; use mp_utils::service::{MadaraServiceId, PowerOfTwo, Service, ServiceId, ServiceRunner}; @@ -13,6 +14,7 @@ use std::time::Duration; #[derive(Clone)] pub struct L1SyncService { db_backend: Arc, + l1_head_snd: Option, eth_client: Option>, l1_gas_provider: GasPriceProvider, chain_id: ChainId, @@ -32,6 +34,7 @@ impl L1SyncService { authority: bool, devnet: bool, mempool: Arc, + l1_head_snd: L1HeadSender, ) -> anyhow::Result { let eth_client = if !config.l1_sync_disabled && (config.l1_endpoint.is_some() || !devnet) { if let Some(l1_rpc_url) = &config.l1_endpoint { @@ -60,7 +63,7 @@ impl L1SyncService { if gas_price_sync_enabled { let eth_client = eth_client .clone() - .context("L1 gas prices require the ethereum service to be enabled. Either disable gas prices syncing using `--gas-price 0`, or disable L1 sync using the `--no-l1-sync` argument.")?; + .context("L1 gas prices require the ethereum service to be enabled. Either disable gas prices syncing using `--gas-price 0` and `--blob-gas-price 0`, or enable L1 sync by removing the `--no-l1-sync` argument.")?; // running at-least once before the block production service tracing::info!("⏳ Getting initial L1 gas prices"); mc_eth::l1_gas_price::gas_price_worker_once(ð_client, &l1_gas_provider, gas_price_poll) @@ -76,6 +79,7 @@ impl L1SyncService { gas_price_sync_disabled: !gas_price_sync_enabled, gas_price_poll, mempool, + l1_head_snd: Some(l1_head_snd), }) } } @@ -96,6 +100,7 @@ impl Service for L1SyncService { if let Some(eth_client) = &self.eth_client { // enabled + let l1_head_snd = self.l1_head_snd.take().context("Service already started")?; let eth_client = Arc::clone(eth_client); runner.service_loop(move |ctx| { mc_eth::sync::l1_sync_worker( @@ -106,6 +111,7 @@ impl Service for L1SyncService { gas_price_sync_disabled, gas_price_poll, mempool, + l1_head_snd, ctx, ) }); diff --git a/crates/madara/node/src/service/l2.rs b/crates/madara/node/src/service/l2.rs index 2c843729c..dad792b80 100644 --- a/crates/madara/node/src/service/l2.rs +++ b/crates/madara/node/src/service/l2.rs @@ -1,84 +1,96 @@ -use crate::cli::L2SyncParams; -use mc_block_import::BlockImporter; -use mc_db::{DatabaseService, MadaraBackend}; -use mc_sync::fetch::fetchers::{FetchConfig, WarpUpdateConfig}; -use mc_sync::SyncConfig; -use mc_telemetry::TelemetryHandle; -use mp_chain_config::ChainConfig; +use mc_db::MadaraBackend; +use mc_eth::state_update::L1HeadReceiver; +use mc_p2p::P2pCommands; +use mc_sync2::{ + import::{BlockImporter, BlockValidationConfig}, + SyncControllerConfig, +}; use mp_utils::service::{MadaraServiceId, PowerOfTwo, Service, ServiceId, ServiceRunner}; use std::sync::Arc; -use std::time::Duration; + +use crate::cli::l2::L2SyncParams; #[derive(Clone)] -pub struct L2SyncService { +struct StartArgs { + p2p_commands: Option, + l1_head_recv: L1HeadReceiver, db_backend: Arc, - block_importer: Arc, - fetch_config: FetchConfig, - backup_every_n_blocks: Option, - starting_block: Option, - telemetry: Arc, - pending_block_poll_interval: Duration, + params: L2SyncParams, +} + +#[derive(Clone)] +pub struct SyncService { + start_args: Option, + disabled: bool, } -impl L2SyncService { +impl SyncService { pub async fn new( config: &L2SyncParams, - chain_config: Arc, - db: &DatabaseService, - block_importer: Arc, - telemetry: TelemetryHandle, - warp_update: Option, + db: &Arc, + mut p2p_commands: Option, + l1_head_recv: L1HeadReceiver, ) -> anyhow::Result { - let fetch_config = config.block_fetch_config(chain_config.chain_id.clone(), chain_config.clone(), warp_update); - - tracing::info!("🛰️ Using feeder gateway URL: {}", fetch_config.feeder_gateway.as_str()); - + if !config.p2p_sync { + p2p_commands = None; + } Ok(Self { - db_backend: Arc::clone(db.backend()), - fetch_config, - starting_block: config.unsafe_starting_block, - backup_every_n_blocks: config.backup_every_n_blocks, - block_importer, - telemetry: Arc::new(telemetry), - pending_block_poll_interval: config.pending_block_poll_interval, + start_args: (!config.l2_sync_disabled).then_some(StartArgs { + p2p_commands, + l1_head_recv, + db_backend: db.clone(), + params: config.clone(), + }), + disabled: config.l2_sync_disabled, }) } } #[async_trait::async_trait] -impl Service for L2SyncService { +impl Service for SyncService { async fn start<'a>(&mut self, runner: ServiceRunner<'a>) -> anyhow::Result<()> { - let L2SyncService { - db_backend, - fetch_config, - backup_every_n_blocks, - starting_block, - pending_block_poll_interval, - block_importer, - telemetry, - } = self.clone(); - let telemetry = Arc::clone(&telemetry); + if self.disabled { + return Ok(()); + } + let this = self.start_args.take().expect("Service already started"); + let importer = Arc::new(BlockImporter::new(this.db_backend.clone(), BlockValidationConfig::default())); + + let config = SyncControllerConfig { + l1_head_recv: this.l1_head_recv, + stop_at_block_n: this.params.sync_stop_at, + stop_on_sync: this.params.stop_on_sync, + }; + + runner.service_loop(move |ctx| async move { + if this.params.p2p_sync { + use mc_sync2::p2p::{forward_sync, ForwardSyncConfig, P2pPipelineArguments}; - runner.service_loop(move |ctx| { - mc_sync::l2_sync_worker( - db_backend, - ctx, - fetch_config, - SyncConfig { - block_importer, - starting_block, - backup_every_n_blocks, - telemetry, - pending_block_poll_interval, - }, - ) + let Some(p2p_commands) = this.p2p_commands else { + anyhow::bail!("Cannot enable --p2p-sync without starting the peer-to-peer service using --p2p.") + }; + let args = P2pPipelineArguments::new(this.db_backend, p2p_commands, importer); + forward_sync(args, config, ForwardSyncConfig::default().disable_tries(this.params.disable_tries)) + .run(ctx) + .await + } else { + let gateway = this.params.create_feeder_client(this.db_backend.chain_config().clone())?; + mc_sync2::gateway::forward_sync( + this.db_backend, + importer, + gateway, + config, + mc_sync2::gateway::ForwardSyncConfig::default().disable_tries(this.params.disable_tries), + ) + .run(ctx) + .await + } }); Ok(()) } } -impl ServiceId for L2SyncService { +impl ServiceId for SyncService { #[inline(always)] fn svc_id(&self) -> PowerOfTwo { MadaraServiceId::L2Sync.svc_id() diff --git a/crates/madara/node/src/service/mod.rs b/crates/madara/node/src/service/mod.rs index 84a36057b..9fa97111c 100644 --- a/crates/madara/node/src/service/mod.rs +++ b/crates/madara/node/src/service/mod.rs @@ -2,10 +2,12 @@ mod block_production; mod gateway; mod l1; mod l2; +mod p2p; mod rpc; pub use block_production::BlockProductionService; pub use gateway::GatewayService; pub use l1::L1SyncService; -pub use l2::L2SyncService; +pub use l2::SyncService; +pub use p2p::P2pService; pub use rpc::RpcService; diff --git a/crates/madara/node/src/service/p2p.rs b/crates/madara/node/src/service/p2p.rs new file mode 100644 index 000000000..a6b487000 --- /dev/null +++ b/crates/madara/node/src/service/p2p.rs @@ -0,0 +1,59 @@ +use crate::cli::P2pParams; +use anyhow::Context; +use mc_db::DatabaseService; +use mc_p2p::P2pCommands; +use mp_utils::service::{MadaraServiceId, PowerOfTwo, Service, ServiceId, ServiceRunner}; +use std::time::Duration; + +pub struct P2pService { + enabled: bool, + // add_transaction_provider: Arc, + p2p: Option, +} + +impl P2pService { + pub async fn new( + config: P2pParams, + db: &DatabaseService, + // add_transaction_provider: Arc, + ) -> anyhow::Result { + let p2p = if config.p2p { + let p2p_config = mc_p2p::P2pConfig { + bootstrap_nodes: db.backend().chain_config().p2p_bootstrap_nodes.clone(), + port: config.p2p_port, + status_interval: Duration::from_secs(3), + identity_file: config.p2p_identity_file, + save_identity: config.p2p_save_identity, + }; + let p2p = mc_p2p::MadaraP2pBuilder::new(p2p_config, db.backend().clone() /*add_transaction_provider*/) + .context("Building p2p service")?; + Some(p2p) + } else { + None + }; + + Ok(Self { p2p, enabled: config.p2p }) + } + + pub fn commands(&mut self) -> Option { + self.p2p.as_ref().map(|p2p| p2p.commands()) + } +} + +#[async_trait::async_trait] +impl Service for P2pService { + async fn start<'a>(&mut self, runner: ServiceRunner<'a>) -> anyhow::Result<()> { + if self.enabled { + let p2p = self.p2p.take().expect("Service already started"); + runner.service_loop(move |ctx| async move { p2p.build().context("Building p2p service")?.run(ctx).await }); + } + Ok(()) + } +} + +impl ServiceId for P2pService { + #[inline(always)] + fn svc_id(&self) -> PowerOfTwo { + MadaraServiceId::P2p.svc_id() + } +} diff --git a/crates/madara/primitives/block/Cargo.toml b/crates/madara/primitives/block/Cargo.toml index df04f3807..3eb1d8d1c 100644 --- a/crates/madara/primitives/block/Cargo.toml +++ b/crates/madara/primitives/block/Cargo.toml @@ -14,20 +14,30 @@ workspace = true [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] +[features] +default = [] +testing = [] + [dependencies] # Madara +bitvec = { workspace = true } +bonsai-trie = { workspace = true } mp-chain-config = { workspace = true } +mp-class = { workspace = true } mp-receipt = { workspace = true } +mp-state-update = { workspace = true } mp-transactions = { workspace = true } # Starknet blockifier = { workspace = true } starknet-types-core = { workspace = true } starknet-types-rpc = { workspace = true } +malachite-core-types.workspace = true # Other primitive-types.workspace = true +rayon = { workspace = true } serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } diff --git a/crates/madara/primitives/block/src/commitments.rs b/crates/madara/primitives/block/src/commitments.rs new file mode 100644 index 000000000..a4b3b339e --- /dev/null +++ b/crates/madara/primitives/block/src/commitments.rs @@ -0,0 +1,187 @@ +use crate::{header::PendingHeader, Header, TransactionWithReceipt}; +use bitvec::vec::BitVec; +use mp_chain_config::StarknetVersion; +use mp_receipt::EventWithTransactionHash; +use mp_state_update::StateDiff; +use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; +use starknet_types_core::{ + felt::Felt, + hash::{Pedersen, Poseidon, StarkHash}, +}; + +pub struct CommitmentComputationContext { + pub protocol_version: StarknetVersion, + pub chain_id: Felt, +} + +pub struct TransactionAndReceiptCommitment { + pub transaction_commitment: Felt, + pub receipt_commitment: Felt, + pub transaction_count: u64, +} + +impl TransactionAndReceiptCommitment { + /// Uses the rayon pool. + pub fn compute(ctx: &CommitmentComputationContext, transactions: &[TransactionWithReceipt]) -> Self { + // Override pre-v0.13.2 transaction hash computation + let starknet_version = StarknetVersion::max(ctx.protocol_version, StarknetVersion::V0_13_2); + + // Verify transaction hashes. Also compute the (hash with signature, receipt hash). + let tx_hashes_with_signature_and_receipt_hashes: Vec<_> = transactions + .par_iter() + .enumerate() + .map(|(_index, tx)| { + let got = tx.transaction.compute_hash(ctx.chain_id, starknet_version, /* is_query */ false); + (tx.transaction.compute_hash_with_signature(got, starknet_version), tx.receipt.compute_hash()) + }) + .collect(); + + let transaction_commitment = compute_transaction_commitment( + tx_hashes_with_signature_and_receipt_hashes.iter().map(|(fst, _)| *fst), + ctx.protocol_version, + ); + + let receipt_commitment = compute_receipt_commitment( + tx_hashes_with_signature_and_receipt_hashes.iter().map(|(_, snd)| *snd), + starknet_version, + ); + + Self { transaction_commitment, receipt_commitment, transaction_count: transactions.len() as u64 } + } +} + +pub struct StateDiffCommitment { + pub state_diff_commitment: Felt, + pub state_diff_length: u64, +} + +impl StateDiffCommitment { + pub fn compute(_ctx: &CommitmentComputationContext, state_diff: &StateDiff) -> Self { + Self { state_diff_length: state_diff.len() as u64, state_diff_commitment: state_diff.compute_hash() } + } +} + +pub struct EventsCommitment { + pub events_commitment: Felt, + pub events_count: u64, +} + +impl EventsCommitment { + pub fn compute(ctx: &CommitmentComputationContext, events: &[EventWithTransactionHash]) -> Self { + // Override pre-v0.13.2 transaction hash computation + let starknet_version = StarknetVersion::max(ctx.protocol_version, StarknetVersion::V0_13_2); + + let event_hashes: Vec<_> = + events.par_iter().map(|ev| ev.event.compute_hash(ev.transaction_hash, starknet_version)).collect(); + + let events_commitment = compute_event_commitment(event_hashes, starknet_version); + + Self { events_commitment, events_count: events.len() as u64 } + } +} + +pub struct BlockCommitments { + pub transaction: TransactionAndReceiptCommitment, + pub state_diff: StateDiffCommitment, + /// A commitment to the events produced in this block + pub event: EventsCommitment, +} + +impl BlockCommitments { + /// Uses the rayon pool. + pub fn compute( + ctx: &CommitmentComputationContext, + transactions: &[TransactionWithReceipt], + state_diff: &StateDiff, + events: &[EventWithTransactionHash], + ) -> Self { + let (transaction, (state_diff, event)) = rayon::join( + || TransactionAndReceiptCommitment::compute(ctx, transactions), + || rayon::join(|| StateDiffCommitment::compute(ctx, state_diff), || EventsCommitment::compute(ctx, events)), + ); + Self { transaction, state_diff, event } + } +} + +impl PendingHeader { + pub fn to_closed_header(self, commitments: BlockCommitments, global_state_root: Felt, block_number: u64) -> Header { + Header { + parent_block_hash: self.parent_block_hash, + block_number, + sequencer_address: self.sequencer_address, + block_timestamp: self.block_timestamp, + protocol_version: self.protocol_version, + l1_gas_price: self.l1_gas_price, + l1_da_mode: self.l1_da_mode, + global_state_root, + transaction_count: commitments.transaction.transaction_count, + transaction_commitment: commitments.transaction.transaction_commitment, + event_count: commitments.event.events_count, + event_commitment: commitments.event.events_commitment, + state_diff_length: Some(commitments.state_diff.state_diff_length), + state_diff_commitment: Some(commitments.state_diff.state_diff_commitment), + receipt_commitment: Some(commitments.transaction.receipt_commitment), + } + } +} + +pub fn compute_event_commitment( + events_hashes: impl IntoIterator, + starknet_version: StarknetVersion, +) -> Felt { + let mut peekable = events_hashes.into_iter().peekable(); + if peekable.peek().is_none() { + return Felt::ZERO; + } + if starknet_version < StarknetVersion::V0_13_2 { + compute_merkle_root::(peekable) + } else { + compute_merkle_root::(peekable) + } +} + +pub fn compute_transaction_commitment( + tx_hashes_with_signature: impl IntoIterator, + starknet_version: StarknetVersion, +) -> Felt { + if starknet_version < StarknetVersion::V0_13_2 { + compute_merkle_root::(tx_hashes_with_signature) + } else { + compute_merkle_root::(tx_hashes_with_signature) + } +} + +pub fn compute_receipt_commitment( + receipt_hashes: impl IntoIterator, + _starknet_version: StarknetVersion, +) -> Felt { + compute_merkle_root::(receipt_hashes) +} + +/// Compute the root hash of a list of values. +// The `HashMapDb` can't fail, so we can safely unwrap the results. +// +// perf: Note that committing changes still has the greatest performance hit +// as this is where the root hash is calculated. Due to the Merkle structure +// of Bonsai Tries, this results in a trie size that grows very rapidly with +// each new insertion. It seems that the only vector of optimization here +// would be to parallelize the tree traversal on insertion and optimize hash computation. +// It seems lambdaclass' crypto lib does not do simd hashing, we may want to look into that. +pub fn compute_merkle_root(values: impl IntoIterator) -> Felt { + //TODO: replace the identifier by an empty slice when bonsai supports it + const IDENTIFIER: &[u8] = b"0xinmemory"; + let config = bonsai_trie::BonsaiStorageConfig::default(); + let bonsai_db = bonsai_trie::databases::HashMapDb::::default(); + let mut bonsai_storage = + bonsai_trie::BonsaiStorage::<_, _, H>::new(bonsai_db, config, /* max tree height */ 64); + + values.into_iter().enumerate().for_each(|(index, value)| { + let key = BitVec::from_vec(index.to_be_bytes().to_vec()); // TODO: remove this useless allocation + bonsai_storage.insert(IDENTIFIER, key.as_bitslice(), &value).expect("Failed to insert into bonsai storage"); + }); + + let id = bonsai_trie::id::BasicIdBuilder::new().new_id(); + + bonsai_storage.commit(id).expect("Failed to commit to bonsai storage"); + bonsai_storage.root_hash(IDENTIFIER).expect("Failed to get root hash") +} diff --git a/crates/madara/primitives/block/src/header.rs b/crates/madara/primitives/block/src/header.rs index 2a1682bd3..1b1952647 100644 --- a/crates/madara/primitives/block/src/header.rs +++ b/crates/madara/primitives/block/src/header.rs @@ -206,10 +206,16 @@ impl Header { } /// Compute the hash of the header. - pub fn compute_hash(&self, chain_id: Felt) -> Felt { - if self.protocol_version.is_pre_v0_7() { + pub fn compute_hash(&self, chain_id: Felt, pre_v0_13_2_override: bool) -> Felt { + let hash_version = if self.protocol_version < StarknetVersion::V0_13_2 && pre_v0_13_2_override { + StarknetVersion::V0_13_2 + } else { + self.protocol_version + }; + + if hash_version.is_pre_v0_7() { self.compute_hash_inner_pre_v0_7(chain_id) - } else if self.protocol_version < StarknetVersion::V0_13_2 { + } else if hash_version < StarknetVersion::V0_13_2 { Pedersen::hash_array(&[ Felt::from(self.block_number), self.global_state_root, @@ -336,7 +342,7 @@ mod tests { #[test] fn test_header_hash_v0_13_2() { let header = dummy_header(StarknetVersion::V0_13_2); - let hash = header.compute_hash(Felt::from_bytes_be_slice(b"CHAIN_ID")); + let hash = header.compute_hash(Felt::from_bytes_be_slice(b"CHAIN_ID"), false); let expected_hash = Felt::from_hex_unchecked("0x545dd9ef652b07cebb3c8b6d43b6c477998f124e75df970dfee300fb32a698b"); assert_eq!(hash, expected_hash); @@ -345,7 +351,7 @@ mod tests { #[test] fn test_header_hash_v0_11_1() { let header = dummy_header(StarknetVersion::V0_11_1); - let hash = header.compute_hash(Felt::from_bytes_be_slice(b"CHAIN_ID")); + let hash = header.compute_hash(Felt::from_bytes_be_slice(b"CHAIN_ID"), false); let expected_hash = Felt::from_hex_unchecked("0x42ec5792c165e0235d7576dc9b4a56140b217faba0b2f57c0a48b850ea5999c"); assert_eq!(hash, expected_hash); @@ -354,7 +360,7 @@ mod tests { #[test] fn test_header_hash_pre_v0_7() { let header = dummy_header(StarknetVersion::V_0_0_0); - let hash = header.compute_hash(Felt::from_bytes_be_slice(b"SN_MAIN")); + let hash = header.compute_hash(Felt::from_bytes_be_slice(b"SN_MAIN"), false); let expected_hash = Felt::from_hex_unchecked("0x6028bf0975e1d4c95713e021a0f0217e74d5a748a20691d881c86d9d62d1432"); assert_eq!(hash, expected_hash); diff --git a/crates/madara/primitives/block/src/lib.rs b/crates/madara/primitives/block/src/lib.rs index 6e1c3fb58..269f5ab36 100644 --- a/crates/madara/primitives/block/src/lib.rs +++ b/crates/madara/primitives/block/src/lib.rs @@ -1,18 +1,43 @@ //! Starknet block primitives. use crate::header::GasPrices; +use commitments::{BlockCommitments, CommitmentComputationContext}; use header::{BlockTimestamp, L1DataAvailabilityMode, PendingHeader}; use mp_chain_config::StarknetVersion; -use mp_receipt::TransactionReceipt; +use mp_receipt::{EventWithTransactionHash, TransactionReceipt}; +use mp_state_update::StateDiff; use mp_transactions::Transaction; use starknet_types_core::felt::Felt; +pub mod commitments; pub mod header; + pub use header::Header; pub use primitive_types::{H160, U256}; + pub type BlockId = starknet_types_rpc::BlockId; pub type BlockTag = starknet_types_rpc::BlockTag; +// TODO: where should we put that? +#[derive(Debug, Clone)] +pub struct TransactionWithReceipt { + pub transaction: Transaction, + pub receipt: TransactionReceipt, +} + +#[derive(Debug, Clone)] +pub struct ConsensusSignature { + pub r: Felt, + pub s: Felt, +} + +#[derive(Debug, Clone)] +pub struct BlockHeaderWithSignatures { + pub header: Header, + pub block_hash: Felt, + pub consensus_signatures: Vec, +} + #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] #[allow(clippy::large_enum_variant)] pub enum MadaraMaybePendingBlockInfo { @@ -202,6 +227,7 @@ impl MadaraMaybePendingBlock { } /// Starknet block definition. +#[cfg_attr(any(test, feature = "testing"), derive(PartialEq))] #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct MadaraBlock { pub info: MadaraBlockInfo, @@ -219,6 +245,37 @@ impl MadaraBlock { } } +/// For testing we use a more rigorous impl of [PartialEq] which doesn't just +/// check the block hash. +#[cfg(not(any(test, feature = "testing")))] +impl PartialEq for MadaraBlock { + fn eq(&self, other: &Self) -> bool { + self.info.block_hash == other.info.block_hash + } +} + +impl Eq for MadaraBlock {} + +impl Ord for MadaraBlock { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.info.header.block_number.cmp(&other.info.header.block_number) + } +} + +impl PartialOrd for MadaraBlock { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl malachite_core_types::Value for MadaraBlock { + type Id = Felt; + + fn id(&self) -> Self::Id { + self.info.block_hash + } +} + /// Starknet block definition. #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] pub struct MadaraPendingBlock { @@ -294,6 +351,43 @@ pub struct VisitedSegmentEntry { pub segments: Vec, } +pub struct FullBlock { + pub block_hash: Felt, + pub header: Header, + pub state_diff: StateDiff, + pub transactions: Vec, + pub events: Vec, +} + +/// A pending block is a block that has not yet been closed. +pub struct PendingFullBlock { + pub header: PendingHeader, + pub state_diff: StateDiff, + pub transactions: Vec, + pub events: Vec, +} + +impl PendingFullBlock { + /// Uses the rayon thread pool. + pub fn close_block( + self, + ctx: &CommitmentComputationContext, + block_number: u64, + new_global_state_root: Felt, + pre_v0_13_2_override: bool, + ) -> FullBlock { + let commitments = BlockCommitments::compute(ctx, &self.transactions, &self.state_diff, &self.events); + let header = self.header.to_closed_header(commitments, new_global_state_root, block_number); + FullBlock { + block_hash: header.compute_hash(ctx.chain_id, pre_v0_13_2_override), + header, + state_diff: self.state_diff, + transactions: self.transactions, + events: self.events, + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/madara/primitives/chain_config/Cargo.toml b/crates/madara/primitives/chain_config/Cargo.toml index 961d7f5a1..2037648b0 100644 --- a/crates/madara/primitives/chain_config/Cargo.toml +++ b/crates/madara/primitives/chain_config/Cargo.toml @@ -26,6 +26,7 @@ mp-utils.workspace = true # Other anyhow.workspace = true lazy_static.workspace = true +multiaddr.workspace = true primitive-types.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true diff --git a/crates/madara/primitives/chain_config/src/chain_config.rs b/crates/madara/primitives/chain_config/src/chain_config.rs index 4d37e1d3d..209414f9b 100644 --- a/crates/madara/primitives/chain_config/src/chain_config.rs +++ b/crates/madara/primitives/chain_config/src/chain_config.rs @@ -116,7 +116,7 @@ pub struct ChainConfig { /// The Starknet core contract address for the L1 watcher. pub eth_core_contract_address: H160, - /// The Starknet SHARP verifier La address. Check out the [docs](https://docs.starknet.io/architecture-and-concepts/solidity-verifier/) + /// The Starknet SHARP verifier L1 address. Check out the [docs](https://docs.starknet.io/architecture-and-concepts/solidity-verifier/) /// for more information pub eth_gps_statement_verifier: H160, @@ -131,6 +131,9 @@ pub struct ChainConfig { #[serde(deserialize_with = "deserialize_private_key")] pub private_key: ZeroingPrivateKey, + #[serde(default)] + pub p2p_bootstrap_nodes: Vec, + /// Transaction limit in the mempool. pub mempool_tx_limit: usize, /// Transaction limit in the mempool, we have an additional limit for declare transactions. @@ -245,6 +248,8 @@ impl ChainConfig { private_key: ZeroingPrivateKey::default(), + p2p_bootstrap_nodes: vec![], + mempool_tx_limit: 10_000, mempool_declare_tx_limit: 20, mempool_tx_max_age: Some(Duration::from_secs(60 * 60)), // an hour? diff --git a/crates/madara/primitives/class/src/compile.rs b/crates/madara/primitives/class/src/compile.rs index 056109f3a..6a7b1690b 100644 --- a/crates/madara/primitives/class/src/compile.rs +++ b/crates/madara/primitives/class/src/compile.rs @@ -25,25 +25,21 @@ pub enum ClassCompilationError { ParsingSierraVersion(Cow<'static, str>), #[error("Failed to construct a blockifier class: {0}")] BlockifierClassConstructionFailed(#[from] cairo_vm::types::errors::program_errors::ProgramError), + #[error("Compiled class hash mismatch, expected {expected:#x} got {got:#x}")] + CompiledClassHashMismatch { expected: Felt, got: Felt }, } impl CompressedLegacyContractClass { - pub fn serialize_to_json(&self) -> Result { - let mut program: serde_json::Value = - serde_json::from_reader(crate::convert::gz_decompress_stream(self.program.as_slice()))?; - - let program_object = program.as_object_mut().ok_or(ClassCompilationError::ProgramIsNotAnObject)?; - - if !program_object.contains_key("debug_info") { - program_object.insert("debug_info".to_owned(), serde_json::json!("")); - } - + // Returns `impl serde::Serialize` because the fact that it returns a serde_json::Value is an impl detail + // we should actually change that, it would be better to have a concrete type here. + pub fn abi(&self) -> Result { // This convoluted JSON serialization is a way to get around bincode's // lack of support for #[serde(tag = "type")]. Abi entries should be // serialized as typed JSON structs, so we have to do this manually. // // NOTE: that the `type` field is already present in each ABI entry // struct so we do not need to add it manually. + let abi = self .abi .as_ref() @@ -64,10 +60,23 @@ impl CompressedLegacyContractClass { }) .transpose()?; + Ok(abi) + } + + pub fn serialize_to_json(&self) -> Result { + let mut program: serde_json::Value = + serde_json::from_reader(crate::convert::gz_decompress_stream(self.program.as_slice()))?; + + let program_object = program.as_object_mut().ok_or(ClassCompilationError::ProgramIsNotAnObject)?; + + if !program_object.contains_key("debug_info") { + program_object.insert("debug_info".to_owned(), serde_json::json!("")); + } + let json = serde_json::json!({ "program": program, "entry_points_by_type": self.entry_points_by_type, - "abi": abi + "abi": self.abi }); Ok(serde_json::to_string(&json)?) diff --git a/crates/madara/primitives/class/src/lib.rs b/crates/madara/primitives/class/src/lib.rs index efb3bd955..cc1ee81cd 100644 --- a/crates/madara/primitives/class/src/lib.rs +++ b/crates/madara/primitives/class/src/lib.rs @@ -1,7 +1,8 @@ use blockifier::execution::contract_class::{ClassInfo as BClassInfo, ContractClass as BContractClass}; +use class_hash::ComputeClassHashError; use compile::ClassCompilationError; use starknet_types_core::felt::Felt; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap, fmt, sync::Arc}; pub mod class_hash; pub mod class_update; @@ -63,6 +64,47 @@ pub struct SierraConvertedClass { pub compiled: Arc, } +#[derive(Copy, Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub enum ClassType { + Sierra, + Legacy, +} + +impl fmt::Display for ClassType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Sierra => write!(f, "Sierra"), + Self::Legacy => write!(f, "Legacy"), + } + } +} + +#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub struct ClassInfoWithHash { + pub class_info: ClassInfo, + pub class_hash: Felt, +} + +impl ClassInfoWithHash { + /// Does class compilation. Does check the resulting compiled class hash. + pub fn convert(self) -> Result { + match self.class_info { + ClassInfo::Sierra(sierra_class_info) => { + let compiled = sierra_class_info.compile()?; + Ok(ConvertedClass::Sierra(SierraConvertedClass { + class_hash: self.class_hash, + info: sierra_class_info, + compiled: Arc::new(compiled), + })) + } + ClassInfo::Legacy(legacy_class_info) => Ok(ConvertedClass::Legacy(LegacyConvertedClass { + class_hash: self.class_hash, + info: legacy_class_info, + })), + } + } +} + #[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub enum ClassInfo { Sierra(SierraClassInfo), @@ -95,6 +137,17 @@ impl ClassInfo { ClassInfo::Legacy(_) => None, } } + + pub fn compute_hash(&self) -> Result { + match self { + ClassInfo::Sierra(sierra_class_info) => sierra_class_info.contract_class.compute_class_hash(), + ClassInfo::Legacy(legacy_class_info) => legacy_class_info.contract_class.compute_class_hash(), + } + } + + pub fn with_computed_hash(self) -> Result { + Ok(ClassInfoWithHash { class_hash: self.compute_hash()?, class_info: self }) + } } #[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] @@ -108,6 +161,19 @@ pub struct SierraClassInfo { pub compiled_class_hash: Felt, } +impl SierraClassInfo { + pub fn compile(&self) -> Result { + let (compiled_class_hash, compiled) = self.contract_class.compile_to_casm()?; + if self.compiled_class_hash != compiled_class_hash { + return Err(ClassCompilationError::CompiledClassHashMismatch { + expected: self.compiled_class_hash, + got: compiled_class_hash, + }); + } + Ok(compiled) + } +} + #[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub enum ContractClass { Sierra(Arc), diff --git a/crates/madara/primitives/convert/src/felt.rs b/crates/madara/primitives/convert/src/felt.rs index 29c85782f..af0774625 100644 --- a/crates/madara/primitives/convert/src/felt.rs +++ b/crates/madara/primitives/convert/src/felt.rs @@ -1,11 +1,76 @@ use primitive_types::H160; use starknet_types_core::felt::Felt; +use std::cmp::Ordering; + +#[derive(Debug, thiserror::Error)] +#[error("Malformated field element.")] +pub struct MalformatedFelt; + +pub trait FeltExt { + fn from_slice_be_checked(slice: &[u8]) -> Result; + fn from_bytes_checked(slice: &[u8; 32]) -> Result; + + fn slice_be_len(&self) -> usize; + fn to_h160(&self) -> Result; +} + +impl FeltExt for Felt { + fn from_slice_be_checked(slice: &[u8]) -> Result { + if slice.len() > 32 { + return Err(MalformatedFelt); + } + + let mut unpacked = [0; 32]; + for i in 0..slice.len() { + unpacked[32 - slice.len() + i] = slice[i] + } + + Felt::from_bytes_checked(&unpacked) + } + + fn from_bytes_checked(b: &[u8; 32]) -> Result { + let limbs = [ + u64::from_be_bytes([b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]]), + u64::from_be_bytes([b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15]]), + u64::from_be_bytes([b[16], b[17], b[18], b[19], b[20], b[21], b[22], b[23]]), + u64::from_be_bytes([b[24], b[25], b[26], b[27], b[28], b[29], b[30], b[31]]), + ]; + // Check if it overflows the modulus. + + // p=2^251 + 17*2^192 + 1 + const MODULUS_U64: [u64; 4] = [576460752303423505u64, 0, 0, 1]; + + for i in 0..4 { + match u64::cmp(&limbs[i], &MODULUS_U64[i]) { + Ordering::Less => break, + Ordering::Equal if i == 3 => return Err(MalformatedFelt), + Ordering::Equal => continue, + Ordering::Greater => return Err(MalformatedFelt), + } + } + + Ok(Felt::from_bytes_be(b)) + } + + fn slice_be_len(&self) -> usize { + let bytes = self.to_bytes_be(); + let mut len = 32; + while len > 0 && bytes[32 - len] == 0 { + len -= 1; + } + len + } + + fn to_h160(&self) -> Result { + felt_to_h160(self) + } +} #[derive(Debug, thiserror::Error)] #[error("Felt is too big to convert to H160.")] pub struct FeltToH160Error; -pub fn felt_to_h160(felt: &Felt) -> Result { +fn felt_to_h160(felt: &Felt) -> Result { const MAX_H160: Felt = Felt::from_hex_unchecked("0xffffffffffffffffffffffffffffffffffffffff"); if felt > &MAX_H160 { diff --git a/crates/madara/primitives/convert/src/hash256_serde.rs b/crates/madara/primitives/convert/src/hash256_serde.rs new file mode 100644 index 000000000..be2782df3 --- /dev/null +++ b/crates/madara/primitives/convert/src/hash256_serde.rs @@ -0,0 +1,33 @@ +use serde::{de, Deserializer, Serializer}; +use serde_with::{DeserializeAs, SerializeAs}; +use starknet_core::types::Hash256; +use std::fmt; + +/// Bincode-friendly [`SerializeAs`] and [`DeserializeAs`] implementation for [`Hash256`] +pub struct Hash256Serde; + +impl SerializeAs for Hash256Serde { + fn serialize_as(value: &Hash256, serializer: S) -> Result { + serializer.serialize_bytes(value.as_bytes()) + } +} + +impl<'de> DeserializeAs<'de, Hash256> for Hash256Serde { + fn deserialize_as>(deserializer: D) -> Result { + struct Hash256Visitor; + impl de::Visitor<'_> for Hash256Visitor { + type Value = Hash256; + fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "a byte array of size 32 representing a Hash256") + } + fn visit_bytes(self, v: &[u8]) -> Result { + if v.len() != 32 { + return Err(de::Error::invalid_length(v.len(), &self)); + } + let bytes: [u8; 32] = v.try_into().expect("condition checked just before"); + Ok(Hash256::from_bytes(bytes)) + } + } + deserializer.deserialize_bytes(Hash256Visitor) + } +} diff --git a/crates/madara/primitives/convert/src/lib.rs b/crates/madara/primitives/convert/src/lib.rs index b118dcafe..51217b7c6 100644 --- a/crates/madara/primitives/convert/src/lib.rs +++ b/crates/madara/primitives/convert/src/lib.rs @@ -1,8 +1,11 @@ mod felt; -pub mod hex_serde; mod to_felt; -pub use felt::felt_to_h160; +pub mod hash256_serde; +pub mod hex_serde; + +pub use felt::FeltExt; +pub use primitive_types::{H160, H256}; pub use to_felt::{DisplayFeltAsHex, FeltHexDisplay, ToFelt}; pub mod test { diff --git a/crates/madara/primitives/gateway/Cargo.toml b/crates/madara/primitives/gateway/Cargo.toml index 2d1e072b7..aa5613b6e 100644 --- a/crates/madara/primitives/gateway/Cargo.toml +++ b/crates/madara/primitives/gateway/Cargo.toml @@ -17,7 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Deoxys -mc-block-import.workspace = true mp-block.workspace = true mp-chain-config.workspace = true mp-class.workspace = true diff --git a/crates/madara/primitives/gateway/src/block.rs b/crates/madara/primitives/gateway/src/block.rs index cf682fc59..04a52fce5 100644 --- a/crates/madara/primitives/gateway/src/block.rs +++ b/crates/madara/primitives/gateway/src/block.rs @@ -1,15 +1,28 @@ +use super::{ + receipt::{ConfirmedReceipt, MsgToL2}, + transaction::Transaction, +}; use anyhow::Context; -use mp_block::header::{BlockTimestamp, L1DataAvailabilityMode}; +use mp_block::{ + header::{BlockTimestamp, L1DataAvailabilityMode, PendingHeader}, + FullBlock, PendingFullBlock, TransactionWithReceipt, +}; use mp_chain_config::StarknetVersion; use mp_convert::hex_serde::U128AsHex; +use mp_receipt::EventWithTransactionHash; +use mp_state_update::StateDiff; use serde::{Deserialize, Serialize}; use serde_with::serde_as; use starknet_types_core::felt::Felt; +use std::mem; -use super::{ - receipt::{ConfirmedReceipt, MsgToL2}, - transaction::Transaction, -}; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +#[cfg_attr(test, derive(Eq))] +pub struct ProviderBlockHeader { + pub block_number: u64, + pub block_hash: Felt, +} #[derive(Debug, Clone, PartialEq, Serialize)] // no Deserialize because it's untagged #[serde(untagged)] @@ -110,6 +123,8 @@ impl ProviderBlock { Some(block.info.header.sequencer_address) }; + // TODO(compute_v0_13_2_hashes): once `compute_v0_13_2_hashes` becomes the default, we should show all post-v0.13.2 commitments + // in the block including receipt and state_diff commitments. let (receipt_commitment, state_diff_commitment) = if block.info.header.protocol_version >= StarknetVersion::V0_13_2 { (block.info.header.receipt_commitment, block.info.header.state_diff_commitment) @@ -145,11 +160,18 @@ impl ProviderBlock { } } - pub fn header(&self) -> anyhow::Result { - Ok(mc_block_import::UnverifiedHeader { - parent_block_hash: Some(self.parent_block_hash), + pub fn into_full_block(self, state_diff: StateDiff) -> anyhow::Result { + let header = self.header()?; + let TransactionsReceiptsAndEvents { transactions, events } = + convert_txs(self.transactions, self.transaction_receipts); + Ok(FullBlock { block_hash: self.block_hash, header, transactions, events, state_diff }) + } + + pub fn header(&self) -> anyhow::Result { + Ok(mp_block::Header { + parent_block_hash: self.parent_block_hash, sequencer_address: self.sequencer_address.unwrap_or_default(), - block_timestamp: BlockTimestamp(self.timestamp), + block_timestamp: mp_block::header::BlockTimestamp(self.timestamp), protocol_version: self .starknet_version .as_deref() @@ -165,6 +187,15 @@ impl ProviderBlock { strk_l1_data_gas_price: self.l1_data_gas_price.price_in_fri, }, l1_da_mode: self.l1_da_mode, + block_number: self.block_number, + global_state_root: self.state_root, + transaction_count: self.transactions.len() as u64, + transaction_commitment: self.transaction_commitment, + event_count: self.transaction_receipts.iter().map(|tx| tx.events.len() as u64).sum(), + event_commitment: self.event_commitment, + state_diff_length: self.state_diff_length, + state_diff_commitment: self.state_diff_commitment, + receipt_commitment: self.receipt_commitment, }) } } @@ -224,9 +255,9 @@ impl ProviderBlockPending { } } - pub fn header(&self) -> anyhow::Result { - Ok(mc_block_import::UnverifiedHeader { - parent_block_hash: Some(self.parent_block_hash), + pub fn header(&self) -> anyhow::Result { + Ok(PendingHeader { + parent_block_hash: self.parent_block_hash, sequencer_address: self.sequencer_address, block_timestamp: BlockTimestamp(self.timestamp), protocol_version: self @@ -247,6 +278,13 @@ impl ProviderBlockPending { l1_da_mode: self.l1_da_mode, }) } + + pub fn into_full_block(self, state_diff: StateDiff) -> anyhow::Result { + let header = self.header()?; + let TransactionsReceiptsAndEvents { transactions, events } = + convert_txs(self.transactions, self.transaction_receipts); + Ok(PendingFullBlock { header, transactions, events, state_diff }) + } } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -300,3 +338,29 @@ fn receipts(receipts: Vec, transaction: &[Transa }) .collect() } + +struct TransactionsReceiptsAndEvents { + transactions: Vec, + events: Vec, +} + +fn convert_txs(transactions: Vec, mut receipts: Vec) -> TransactionsReceiptsAndEvents { + TransactionsReceiptsAndEvents { + events: receipts + .iter_mut() + .flat_map(|receipt| { + mem::take(&mut receipt.events) + .into_iter() + .map(|event| EventWithTransactionHash { transaction_hash: receipt.transaction_hash, event }) + }) + .collect(), + transactions: transactions + .into_iter() + .zip(receipts) + .map(|(transaction, receipt)| TransactionWithReceipt { + receipt: receipt.into_mp(&transaction), + transaction: transaction.into(), + }) + .collect(), + } +} diff --git a/crates/madara/primitives/gateway/src/receipt.rs b/crates/madara/primitives/gateway/src/receipt.rs index e0f220c1e..2ec85c4e3 100644 --- a/crates/madara/primitives/gateway/src/receipt.rs +++ b/crates/madara/primitives/gateway/src/receipt.rs @@ -1,5 +1,5 @@ use mp_block::H160; -use mp_convert::felt_to_h160; +use mp_convert::FeltExt; use mp_receipt::{Event, L1Gas, MsgToL1}; use primitive_types::H256; use serde::{Deserialize, Serialize}; @@ -89,7 +89,7 @@ impl ConfirmedReceipt { let message_hash = message_to_l2.hash(); mp_receipt::L1HandlerTransactionReceipt { - message_hash: H256::from_slice(message_hash.as_bytes()), + message_hash, transaction_hash: self.transaction_hash, actual_fee: fee_payment(self.actual_fee, tx.version()), messages_sent: self.l2_to_l1_messages, @@ -286,7 +286,7 @@ impl TryFrom<&L1HandlerTransaction> for MsgToL2 { fn try_from(l1_handler: &L1HandlerTransaction) -> Result { let (l1_address, payload) = l1_handler.calldata.split_first().ok_or(())?; Ok(Self { - from_address: felt_to_h160(l1_address).map_err(|_| ())?, + from_address: l1_address.to_h160().map_err(drop)?, to_address: l1_handler.contract_address, selector: l1_handler.entry_point_selector, payload: payload.to_vec(), diff --git a/crates/madara/primitives/gateway/src/state_update.rs b/crates/madara/primitives/gateway/src/state_update.rs index b370fb7ab..7761f45b5 100644 --- a/crates/madara/primitives/gateway/src/state_update.rs +++ b/crates/madara/primitives/gateway/src/state_update.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use mp_block::{FullBlock, PendingFullBlock}; use mp_state_update::{DeclaredClassItem, DeployedContractItem, StorageEntry}; use serde::{Deserialize, Serialize}; use serde_with::serde_as; @@ -194,9 +195,21 @@ pub struct ProviderStateUpdateWithBlock { pub block: ProviderBlock, } +impl ProviderStateUpdateWithBlock { + pub fn into_full_block(self) -> anyhow::Result { + self.block.into_full_block(self.state_update.state_diff.into()) + } +} + #[derive(Clone, Debug, Deserialize, Serialize, PartialEq)] #[cfg_attr(test, derive(Eq))] pub struct ProviderStateUpdateWithBlockPending { pub state_update: ProviderStateUpdatePending, pub block: ProviderBlockPending, } + +impl ProviderStateUpdateWithBlockPending { + pub fn into_full_block(self) -> anyhow::Result { + self.block.into_full_block(self.state_update.state_diff.into()) + } +} diff --git a/crates/madara/primitives/malachite/Cargo.toml b/crates/madara/primitives/malachite/Cargo.toml new file mode 100644 index 000000000..896ad17e7 --- /dev/null +++ b/crates/madara/primitives/malachite/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "malachite" +authors.workspace = true +homepage.workspace = true +edition.workspace = true +repository.workspace = true +version.workspace = true +license.workspace = true + +[dependencies] + +# Madara +mp-block.workspace = true +mp-transactions.workspace = true + +# Starknet +malachite-core-types.workspace = true +starknet-types-core.workspace = true + +[lints] +workspace = true diff --git a/crates/madara/primitives/malachite/src/context.rs b/crates/madara/primitives/malachite/src/context.rs new file mode 100644 index 000000000..18e1ab20b --- /dev/null +++ b/crates/madara/primitives/malachite/src/context.rs @@ -0,0 +1,65 @@ +use mp_block::MadaraBlock; + +use crate::{ + proposal::{ProposalPartStub, ProposalStub}, + types::{Address, Height}, + validators::{ValidatorSet, ValidatorStub}, + vote::{SigningProviderStub, SigningSchemeStub, VoteStub}, +}; + +#[derive(Clone, Debug)] +pub struct MadaraContext; + +impl malachite_core_types::Context for MadaraContext { + type Address = Address; + type Height = Height; + type ProposalPart = ProposalPartStub; + type Proposal = ProposalStub; + type Validator = ValidatorStub; + type ValidatorSet = ValidatorSet; + type Value = MadaraBlock; + type Vote = VoteStub; + type SigningScheme = SigningSchemeStub; + type SigningProvider = SigningProviderStub; + + fn select_proposer<'a>( + &self, + validator_set: &'a Self::ValidatorSet, + height: Self::Height, + round: malachite_core_types::Round, + ) -> &'a Self::Validator { + todo!() + } + + fn signing_provider(&self) -> &Self::SigningProvider { + todo!() + } + + fn new_proposal( + height: Self::Height, + round: malachite_core_types::Round, + value: Self::Value, + pol_round: malachite_core_types::Round, + address: Self::Address, + ) -> Self::Proposal { + todo!() + } + + fn new_prevote( + height: Self::Height, + round: malachite_core_types::Round, + value_id: malachite_core_types::NilOrVal>, + address: Self::Address, + ) -> Self::Vote { + todo!() + } + + fn new_precommit( + height: Self::Height, + round: malachite_core_types::Round, + value_id: malachite_core_types::NilOrVal>, + address: Self::Address, + ) -> Self::Vote { + todo!() + } +} diff --git a/crates/madara/primitives/malachite/src/lib.rs b/crates/madara/primitives/malachite/src/lib.rs new file mode 100644 index 000000000..c5aae042c --- /dev/null +++ b/crates/madara/primitives/malachite/src/lib.rs @@ -0,0 +1,5 @@ +mod context; +mod proposal; +mod types; +mod validators; +mod vote; diff --git a/crates/madara/primitives/malachite/src/proposal.rs b/crates/madara/primitives/malachite/src/proposal.rs new file mode 100644 index 000000000..bb351cb9c --- /dev/null +++ b/crates/madara/primitives/malachite/src/proposal.rs @@ -0,0 +1,48 @@ +use mp_block::MadaraBlock; + +use crate::{ + context::MadaraContext, + types::{Address, Height}, +}; + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct ProposalPartStub; + +impl malachite_core_types::ProposalPart for ProposalPartStub { + fn is_first(&self) -> bool { + unimplemented!() + } + + fn is_last(&self) -> bool { + unimplemented!() + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct ProposalStub; + +impl malachite_core_types::Proposal for ProposalStub { + fn height(&self) -> Height { + todo!() + } + + fn round(&self) -> malachite_core_types::Round { + todo!() + } + + fn value(&self) -> &MadaraBlock { + todo!() + } + + fn take_value(self) -> MadaraBlock { + todo!() + } + + fn pol_round(&self) -> malachite_core_types::Round { + todo!() + } + + fn validator_address(&self) -> &Address { + todo!() + } +} diff --git a/crates/madara/primitives/malachite/src/types.rs b/crates/madara/primitives/malachite/src/types.rs new file mode 100644 index 000000000..a3c5292ac --- /dev/null +++ b/crates/madara/primitives/malachite/src/types.rs @@ -0,0 +1,37 @@ +use starknet_types_core::felt::Felt; + +#[repr(transparent)] +#[derive(Clone, Copy, Default, Debug, Eq, PartialEq, Ord, PartialOrd)] +pub struct Height(u64); + +impl std::fmt::Display for Height { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Height").field("at", &self.0).finish() + } +} + +impl malachite_core_types::Height for Height { + fn increment_by(&self, n: u64) -> Self { + Self(self.0.saturating_add(n)) + } + + fn decrement_by(&self, n: u64) -> Option { + self.0.checked_sub(n).map(Self) + } + + fn as_u64(&self) -> u64 { + self.0 + } +} + +#[repr(transparent)] +#[derive(Clone, Copy, Default, Debug, Eq, PartialEq, Ord, PartialOrd)] +pub struct Address(Felt); + +impl std::fmt::Display for Address { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Address").field("at", &self.0).finish() + } +} + +impl malachite_core_types::Address for Address {} diff --git a/crates/madara/primitives/malachite/src/validators.rs b/crates/madara/primitives/malachite/src/validators.rs new file mode 100644 index 000000000..f7307eb35 --- /dev/null +++ b/crates/madara/primitives/malachite/src/validators.rs @@ -0,0 +1,44 @@ +use malachite_core_types::Validator as _; +use std::collections::BTreeMap; + +use crate::{context::MadaraContext, types::Address}; + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct ValidatorStub; + +impl malachite_core_types::Validator for ValidatorStub { + fn address(&self) -> &Address { + todo!() + } + + fn public_key(&self) -> &malachite_core_types::PublicKey { + todo!() + } + + fn voting_power(&self) -> malachite_core_types::VotingPower { + todo!() + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct ValidatorSet { + validators: BTreeMap, +} + +impl malachite_core_types::ValidatorSet for ValidatorSet { + fn count(&self) -> usize { + self.validators.len() + } + + fn total_voting_power(&self) -> malachite_core_types::VotingPower { + self.validators.values().fold(0, |acc, v| acc + v.voting_power()) + } + + fn get_by_address(&self, address: &Address) -> Option<&ValidatorStub> { + self.validators.get(address) + } + + fn get_by_index(&self, index: usize) -> Option<&ValidatorStub> { + self.validators.values().take(index).last() + } +} diff --git a/crates/madara/primitives/malachite/src/vote.rs b/crates/madara/primitives/malachite/src/vote.rs new file mode 100644 index 000000000..ad8083cba --- /dev/null +++ b/crates/madara/primitives/malachite/src/vote.rs @@ -0,0 +1,127 @@ +//! These are temporary types while I wait to integrate @cchudant's p2p pr into +//! this one. + +use starknet_types_core::felt::Felt; + +use crate::{ + context::MadaraContext, + types::{Address, Height}, +}; + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct VoteStub; + +impl malachite_core_types::Vote for VoteStub { + fn height(&self) -> Height { + unimplemented!() + } + + fn round(&self) -> malachite_core_types::Round { + unimplemented!() + } + + fn value(&self) -> &malachite_core_types::NilOrVal { + unimplemented!() + } + + fn take_value(self) -> malachite_core_types::NilOrVal { + unimplemented!() + } + + fn vote_type(&self) -> malachite_core_types::VoteType { + unimplemented!() + } + + fn validator_address(&self) -> &Address { + unimplemented!() + } + + fn extension(&self) -> Option<&malachite_core_types::SignedExtension> { + unimplemented!() + } + + fn extend(self, extension: malachite_core_types::SignedExtension) -> Self { + unimplemented!() + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct SigningSchemeStub; + +impl malachite_core_types::SigningScheme for SigningSchemeStub { + type DecodingError = String; + type Signature = (); + type PublicKey = (); + type PrivateKey = (); + + fn decode_signature(bytes: &[u8]) -> Result { + unimplemented!() + } + + fn encode_signature(signature: &Self::Signature) -> Vec { + unimplemented!() + } +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct SigningProviderStub; + +impl malachite_core_types::SigningProvider for SigningProviderStub { + fn sign_vote(&self, vote: VoteStub) -> malachite_core_types::SignedMessage { + todo!() + } + + fn verify_signed_vote( + &self, + vote: &VoteStub, + signature: &malachite_core_types::Signature, + public_key: &malachite_core_types::PublicKey, + ) -> bool { + todo!() + } + + fn sign_proposal( + &self, + proposal: ::Proposal, + ) -> malachite_core_types::SignedMessage::Proposal> + { + todo!() + } + + fn verify_signed_proposal( + &self, + proposal: &::Proposal, + signature: &malachite_core_types::Signature, + public_key: &malachite_core_types::PublicKey, + ) -> bool { + todo!() + } + + fn sign_proposal_part( + &self, + proposal_part: ::ProposalPart, + ) -> malachite_core_types::SignedMessage< + MadaraContext, + ::ProposalPart, + > { + todo!() + } + + fn verify_signed_proposal_part( + &self, + proposal_part: &::ProposalPart, + signature: &malachite_core_types::Signature, + public_key: &malachite_core_types::PublicKey, + ) -> bool { + todo!() + } + + fn verify_commit_signature( + &self, + certificate: &malachite_core_types::CommitCertificate, + commit_sig: &malachite_core_types::CommitSignature, + validator: &::Validator, + ) -> Result> { + todo!() + } +} diff --git a/crates/madara/primitives/proto/Cargo.toml b/crates/madara/primitives/proto/Cargo.toml new file mode 100644 index 000000000..771096bce --- /dev/null +++ b/crates/madara/primitives/proto/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "mp-proto" +authors.workspace = true +homepage.workspace = true +edition.workspace = true +repository.workspace = true +version.workspace = true +license.workspace = true + +[dependencies] +# Madara +mc-db.workspace = true +mp-block.workspace = true +mp-class.workspace = true +mp-convert.workspace = true +mp-receipt.workspace = true +mp-state-update.workspace = true +mp-transactions.workspace = true +mp-utils.workspace = true + +m-proc-macros.workspace = true + +# Starknet +starknet-core.workspace = true +starknet-types-core.workspace = true + +# Error handling +anyhow.workspace = true +thiserror.workspace = true + +# Protobuf +base64.workspace = true +bytes.workspace = true +prost.workspace = true +serde.workspace = true +serde_json.workspace = true +unsigned-varint.workspace = true + +# Debug and testing +tracing.workspace = true +rstest.workspace = true +assert_matches.workspace = true +rand.workspace = true +proptest.workspace = true +proptest-derive.workspace = true +proptest-state-machine.workspace = true + +[lints] +workspace = true + +[build-dependencies] +prost-build.workspace = true diff --git a/crates/madara/primitives/proto/build.rs b/crates/madara/primitives/proto/build.rs new file mode 100644 index 000000000..3faf4ae4b --- /dev/null +++ b/crates/madara/primitives/proto/build.rs @@ -0,0 +1,34 @@ +fn main() -> std::io::Result<()> { + let files: Vec<_> = collect_files("starknet-p2p-specs/p2p/proto/", "proto")?; + + prost_build::Config::new() + .extern_path(".Felt252", "crate::model_primitives::Felt252") + .extern_path(".Hash", "crate::model_primitives::Hash") + .extern_path(".EthereumAddress", "crate::model_primitives::EthereumAddress") + .extern_path(".Hash256", "crate::model_primitives::Hash256") + .extern_path(".Address", "crate::model_primitives::Address") + .protoc_arg("--experimental_allow_proto3_optional") + .compile_protos(&files, &["starknet-p2p-specs/"])?; + Ok(()) +} + +/// Recursively collects all files in a directory. +/// +/// This function will keep exploring sub-directories until it has reached the +/// bottom of the file tree. +fn collect_files(path: impl AsRef, ext: &str) -> std::io::Result> { + let mut files = vec![]; + + for entry in std::fs::read_dir(path)? { + let entry = entry?; + let path = entry.path(); + + if path.is_dir() { + files.extend(collect_files(path, ext)?); + } else if path.extension().expect("file with no extension") == ext { + files.push(path) + } + } + + std::io::Result::Ok(files) +} diff --git a/crates/madara/primitives/proto/proptest-regressions/proposal.txt b/crates/madara/primitives/proto/proptest-regressions/proposal.txt new file mode 100644 index 000000000..8839f8de0 --- /dev/null +++ b/crates/madara/primitives/proto/proptest-regressions/proposal.txt @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. + diff --git a/crates/madara/primitives/proto/src/classes.rs b/crates/madara/primitives/proto/src/classes.rs new file mode 100644 index 000000000..dd188e59b --- /dev/null +++ b/crates/madara/primitives/proto/src/classes.rs @@ -0,0 +1,151 @@ +use crate::{model, FromModelError}; +use base64::prelude::*; +use mp_class::{ + ClassInfo, CompressedLegacyContractClass, EntryPointsByType, FlattenedSierraClass, LegacyClassInfo, + LegacyContractEntryPoint, LegacyEntryPointsByType, SierraClassInfo, SierraEntryPoint, +}; +use mp_state_update::DeclaredClassCompiledClass; +use starknet_core::types::Felt; +use std::sync::Arc; + +impl model::class::Class { + pub fn parse_model(self, compiled_class_hash: DeclaredClassCompiledClass) -> Result { + Ok(match self { + Self::Cairo0(tx) => { + if compiled_class_hash != DeclaredClassCompiledClass::Legacy { + return Err(FromModelError::invalid_field("Expected Sierra class, got Legacy")); + } + ClassInfo::Legacy(tx.try_into()?) + } + Self::Cairo1(tx) => { + let DeclaredClassCompiledClass::Sierra(compiled_class_hash) = compiled_class_hash else { + return Err(FromModelError::invalid_field("Expected Legacy class, got Sierra")); + }; + ClassInfo::Sierra(tx.parse_model(compiled_class_hash)?) + } + }) + } +} + +impl TryFrom for LegacyClassInfo { + type Error = FromModelError; + fn try_from(value: model::Cairo0Class) -> Result { + let abi: Vec = + serde_json::from_str(&value.abi).map_err(FromModelError::LegacyClassJsonError)?; + Ok(Self { + contract_class: Arc::new(CompressedLegacyContractClass { + program: BASE64_STANDARD.decode(&value.program).map_err(FromModelError::LegacyClassBase64Decode)?, + entry_points_by_type: LegacyEntryPointsByType { + constructor: value.constructors.into_iter().map(TryInto::try_into).collect::>()?, + external: value.externals.into_iter().map(TryInto::try_into).collect::>()?, + l1_handler: value.l1_handlers.into_iter().map(TryInto::try_into).collect::>()?, + }, + abi: Some(abi.into_iter().map(Into::into).collect()), + }), + }) + } +} + +impl TryFrom for LegacyContractEntryPoint { + type Error = FromModelError; + fn try_from(value: model::EntryPoint) -> Result { + Ok(Self { + offset: value.offset, + selector: value.selector.ok_or(FromModelError::missing_field("EntryPoint::selector"))?.into(), + }) + } +} + +impl model::Cairo1Class { + pub fn parse_model(self, compiled_class_hash: Felt) -> Result { + Ok(SierraClassInfo { + contract_class: Arc::new(FlattenedSierraClass { + sierra_program: self.program.into_iter().map(Into::into).collect(), + contract_class_version: self.contract_class_version, + entry_points_by_type: self.entry_points.unwrap_or_default().try_into()?, + abi: self.abi, + }), + compiled_class_hash, + }) + } +} + +impl TryFrom for EntryPointsByType { + type Error = FromModelError; + fn try_from(value: model::Cairo1EntryPoints) -> Result { + Ok(Self { + constructor: value.constructors.into_iter().map(TryInto::try_into).collect::>()?, + external: value.externals.into_iter().map(TryInto::try_into).collect::>()?, + l1_handler: value.l1_handlers.into_iter().map(TryInto::try_into).collect::>()?, + }) + } +} + +impl TryFrom for SierraEntryPoint { + type Error = FromModelError; + fn try_from(value: model::SierraEntryPoint) -> Result { + Ok(Self { + selector: value.selector.ok_or(FromModelError::missing_field("SierraEntryPoint::selector"))?.into(), + function_idx: value.index, + }) + } +} + +impl From for model::class::Class { + fn from(value: ClassInfo) -> Self { + match value { + ClassInfo::Sierra(info) => Self::Cairo1(info.into()), + ClassInfo::Legacy(info) => Self::Cairo0(info.into()), + } + } +} + +impl From for model::Cairo1Class { + fn from(value: SierraClassInfo) -> Self { + let contract_class = Arc::unwrap_or_clone(value.contract_class); + Self { + // TODO(p2p-perf): we should replace the generated DTO with a hand-written one to avoid the copy of the whole class into memory. + abi: contract_class.abi, + entry_points: Some(contract_class.entry_points_by_type.into()), + program: contract_class.sierra_program.into_iter().map(Into::into).collect(), + contract_class_version: contract_class.contract_class_version, + } + } +} + +impl From for model::Cairo1EntryPoints { + fn from(value: EntryPointsByType) -> Self { + Self { + externals: value.external.into_iter().map(Into::into).collect(), + l1_handlers: value.l1_handler.into_iter().map(Into::into).collect(), + constructors: value.constructor.into_iter().map(Into::into).collect(), + } + } +} + +impl From for model::SierraEntryPoint { + fn from(value: SierraEntryPoint) -> Self { + Self { index: value.function_idx, selector: Some(value.selector.into()) } + } +} + +impl From for model::Cairo0Class { + fn from(value: LegacyClassInfo) -> Self { + let contract_class = Arc::unwrap_or_clone(value.contract_class); + Self { + // TODO(dto-faillible-conversion) + abi: serde_json::to_string(&contract_class.abi().expect("Serializing contract class ABI")) + .expect("Serializing contract class ABI to json"), + externals: contract_class.entry_points_by_type.external.into_iter().map(Into::into).collect(), + l1_handlers: contract_class.entry_points_by_type.l1_handler.into_iter().map(Into::into).collect(), + constructors: contract_class.entry_points_by_type.constructor.into_iter().map(Into::into).collect(), + program: BASE64_STANDARD.encode(&contract_class.program), + } + } +} + +impl From for model::EntryPoint { + fn from(value: LegacyContractEntryPoint) -> Self { + Self { selector: Some(value.selector.into()), offset: value.offset } + } +} diff --git a/crates/madara/primitives/proto/src/events.rs b/crates/madara/primitives/proto/src/events.rs new file mode 100644 index 000000000..551e29123 --- /dev/null +++ b/crates/madara/primitives/proto/src/events.rs @@ -0,0 +1,30 @@ +use crate::{model, model_field, CollectInto, FromModelError}; +use m_proc_macros::model_describe; +use mp_receipt::{Event, EventWithTransactionHash}; + +impl From for model::Event { + fn from(value: EventWithTransactionHash) -> Self { + Self { + transaction_hash: Some(value.transaction_hash.into()), + from_address: Some(value.event.from_address.into()), + keys: value.event.keys.collect_into(), + data: value.event.data.collect_into(), + } + } +} + +impl TryFrom for EventWithTransactionHash { + type Error = FromModelError; + + #[model_describe(model::Event)] + fn try_from(value: model::Event) -> Result { + Ok(Self { + transaction_hash: model_field!(value => transaction_hash).into(), + event: Event { + from_address: model_field!(value => from_address).into(), + keys: value.keys.collect_into(), + data: value.data.collect_into(), + }, + }) + } +} diff --git a/crates/madara/primitives/proto/src/headers.rs b/crates/madara/primitives/proto/src/headers.rs new file mode 100644 index 000000000..318ef9b66 --- /dev/null +++ b/crates/madara/primitives/proto/src/headers.rs @@ -0,0 +1,120 @@ +use crate::{ + model::{self}, + model_field, model_field_variant, CollectInto, FromModelError, +}; +use m_proc_macros::model_describe; +use mp_block::{ + header::{GasPrices, L1DataAvailabilityMode}, + BlockHeaderWithSignatures, ConsensusSignature, Header, +}; + +impl TryFrom for BlockHeaderWithSignatures { + type Error = FromModelError; + + #[model_describe(model::SignedBlockHeader)] + fn try_from(value: model::SignedBlockHeader) -> Result { + let transactions = model_field!(value => transactions); + let events = model_field!(value => events); + let state_diff_commitment = model_field!(value => state_diff_commitment); + Ok(Self { + header: Header { + parent_block_hash: model_field!(value => parent_hash).into(), + block_number: value.number, + global_state_root: model_field!(value => state_root).into(), + sequencer_address: model_field!(value => sequencer_address).into(), + block_timestamp: mp_block::header::BlockTimestamp(value.time), + transaction_count: transactions.n_leaves, + transaction_commitment: model_field!(transactions => root).into(), + event_count: events.n_leaves, + event_commitment: model_field!(events => root).into(), + state_diff_length: Some(state_diff_commitment.state_diff_length), + state_diff_commitment: Some(model_field!(state_diff_commitment => root).into()), + receipt_commitment: Some(model_field!(value => receipts).into()), + protocol_version: value + .protocol_version + .parse() + .map_err(|_| FromModelError::invalid_field("protocol_version"))?, + l1_gas_price: GasPrices { + eth_l1_gas_price: model_field!(value => l1_gas_price_wei).into(), + strk_l1_gas_price: model_field!(value => l1_gas_price_fri).into(), + eth_l1_data_gas_price: model_field!(value => l1_data_gas_price_wei).into(), + strk_l1_data_gas_price: model_field!(value => l1_data_gas_price_fri).into(), + }, + l1_da_mode: model_field_variant!(model::L1DataAvailabilityMode => value.l1_data_availability_mode) + .into(), + }, + block_hash: model_field!(value => block_hash).into(), + consensus_signatures: value.signatures.into_iter().map(TryInto::try_into).collect::>()?, + }) + } +} + +impl TryFrom for ConsensusSignature { + type Error = FromModelError; + + #[model_describe(model::ConsensusSignature)] + fn try_from(value: model::ConsensusSignature) -> Result { + Ok(Self { r: model_field!(value => r).into(), s: model_field!(value => s).into() }) + } +} + +impl From for model::SignedBlockHeader { + fn from(val: BlockHeaderWithSignatures) -> Self { + model::SignedBlockHeader { + block_hash: Some(val.block_hash.into()), + parent_hash: Some(val.header.parent_block_hash.into()), + number: val.header.block_number, + time: val.header.block_timestamp.0, + sequencer_address: Some(val.header.sequencer_address.into()), + state_root: Some(val.header.global_state_root.into()), + state_diff_commitment: val.header.state_diff_commitment.zip(val.header.state_diff_length).map( + |(commitment, state_diff_length)| model::StateDiffCommitment { + state_diff_length, + root: Some(commitment.into()), + }, + ), + transactions: Some(model::Patricia { + n_leaves: val.header.transaction_count, + root: Some(val.header.transaction_commitment.into()), + }), + events: Some(model::Patricia { + n_leaves: val.header.event_count, + root: Some(val.header.event_commitment.into()), + }), + receipts: val.header.receipt_commitment.map(Into::into), + protocol_version: val.header.protocol_version.to_string(), + l1_gas_price_fri: Some(val.header.l1_gas_price.strk_l1_gas_price.into()), + l1_gas_price_wei: Some(val.header.l1_gas_price.eth_l1_gas_price.into()), + l1_data_gas_price_fri: Some(val.header.l1_gas_price.strk_l1_data_gas_price.into()), + l1_data_gas_price_wei: Some(val.header.l1_gas_price.eth_l1_data_gas_price.into()), + l1_data_availability_mode: model::L1DataAvailabilityMode::from(val.header.l1_da_mode).into(), + l2_gas_price_fri: None, // TODO: update blockifier + l2_gas_price_wei: None, + signatures: val.consensus_signatures.collect_into(), + } + } +} + +impl From for model::ConsensusSignature { + fn from(value: ConsensusSignature) -> Self { + Self { r: Some(value.r.into()), s: Some(value.s.into()) } + } +} + +impl From for model::L1DataAvailabilityMode { + fn from(value: L1DataAvailabilityMode) -> Self { + match value { + L1DataAvailabilityMode::Calldata => Self::Calldata, + L1DataAvailabilityMode::Blob => Self::Blob, + } + } +} + +impl From for L1DataAvailabilityMode { + fn from(value: model::L1DataAvailabilityMode) -> Self { + match value { + model::L1DataAvailabilityMode::Calldata => Self::Calldata, + model::L1DataAvailabilityMode::Blob => Self::Blob, + } + } +} diff --git a/crates/madara/primitives/proto/src/lib.rs b/crates/madara/primitives/proto/src/lib.rs new file mode 100644 index 000000000..71717db34 --- /dev/null +++ b/crates/madara/primitives/proto/src/lib.rs @@ -0,0 +1,98 @@ +use mc_db::stream::{BlockStreamConfig, Direction}; +use std::borrow::Cow; + +#[allow(clippy::all)] +pub mod model { + pub use crate::model_primitives::*; + include!(concat!(env!("OUT_DIR"), "/_.rs")); +} + +mod classes; +mod events; +mod headers; +mod model_primitives; +mod proposal; +mod transactions; + +#[derive(thiserror::Error, Debug)] +pub enum FromModelError { + #[error("Missing field: {0}")] + MissingField(Cow<'static, str>), + #[error("Invalid field: {0}")] + InvalidField(Cow<'static, str>), + #[error("Invalid enum variant for {ty}: {value}")] + InvalidEnumVariant { ty: Cow<'static, str>, value: i32 }, + #[error("Legacy class conversion json error: {0:#}")] + LegacyClassJsonError(serde_json::Error), + #[error("Legacy class base64 decode error: {0:#}")] + LegacyClassBase64Decode(base64::DecodeError), +} + +impl FromModelError { + pub fn missing_field(s: impl Into>) -> Self { + Self::MissingField(s.into()) + } + pub fn invalid_field(s: impl Into>) -> Self { + Self::InvalidField(s.into()) + } + pub fn invalid_enum_variant(ty: impl Into>, value: i32) -> Self { + Self::InvalidEnumVariant { ty: ty.into(), value } + } +} + +#[macro_export] +macro_rules! model_field { + ($struct:expr => $value:ident) => { + $struct.$value.ok_or($crate::FromModelError::missing_field(format!("{}::{}", __MODEL, stringify!($value))))? + }; +} + +#[macro_export] +macro_rules! model_field_variant { + ($model:ty => $value:expr) => { + <$model>::try_from($value).map_err(|_| { + FromModelError::invalid_enum_variant(concat!(stringify!($model), "::", stringify!($value)), $value) + })? + }; +} + +pub(crate) trait TryIntoField { + fn try_into_field(self, repr: &'static str) -> Result; +} + +impl TryIntoField for S +where + S: TryInto, +{ + fn try_into_field(self, repr: &'static str) -> Result { + self.try_into().map_err(|_| FromModelError::invalid_field(repr)) + } +} + +pub(crate) trait CollectInto { + fn collect_into(self) -> Vec; +} + +impl CollectInto for Vec +where + V: Into, +{ + fn collect_into(self) -> Vec { + self.into_iter().map(Into::into).collect() + } +} + +impl From for model::Iteration { + fn from(value: BlockStreamConfig) -> Self { + Self { + direction: match value.direction { + Direction::Forward => model::iteration::Direction::Forward, + Direction::Backward => model::iteration::Direction::Backward, + } + .into(), + limit: value.limit.unwrap_or_default(), + step: value.step.get(), + start: Some(model::iteration::Start::BlockNumber(value.start)), + } + } +} diff --git a/crates/madara/primitives/proto/src/model_primitives.rs b/crates/madara/primitives/proto/src/model_primitives.rs new file mode 100644 index 000000000..ca16bec32 --- /dev/null +++ b/crates/madara/primitives/proto/src/model_primitives.rs @@ -0,0 +1,257 @@ +//! We override some of the codegen definitions to avoid useless allocations, in particular, +//! [`Felt252`], [`Hash`], [`Hash256`]. [`Uint128`], [`Address`] (felt251), [`EthereumAddress`]. TODO: [`PeerId`] + +use crate::model; +use bytes::Buf; +use bytes::BufMut; +use bytes::Bytes; +use mp_convert::FeltExt; +use prost::DecodeError; +use starknet_core::types::Felt as SnFelt; +use starknet_core::types::Hash256 as SnHash256; +use std::ops::{Deref, DerefMut}; + +trait WrappedBytesPrimitive: Default { + fn to_bytes(&self, dest: &mut impl BufMut); + fn bytes_len(&self) -> usize; + fn from_bytes(b: Bytes) -> Result; +} + +macro_rules! impl_message_for_wrappeed_bytes_primitive { + ($type_name:ident) => { + impl prost::Message for $type_name { + fn encode_raw(&self, buf: &mut impl prost::bytes::BufMut) { + prost::encoding::encode_key(1u32, prost::encoding::wire_type::WireType::LengthDelimited, buf); + prost::encoding::encode_varint(self.bytes_len() as u64, buf); + self.to_bytes(buf) + } + fn merge_field( + &mut self, + tag: u32, + wire_type: prost::encoding::wire_type::WireType, + buf: &mut impl prost::bytes::Buf, + ctx: prost::encoding::DecodeContext, + ) -> Result<(), DecodeError> { + if tag == 1u32 { + let len = prost::encoding::decode_varint(buf)?; + if len > buf.remaining() as u64 { + return Err(DecodeError::new("Message size error")); + } + let len = len as usize; + + *self = Self::from_bytes(buf.copy_to_bytes(len))?; + Ok(()) + } else { + prost::encoding::skip_field(wire_type, tag, buf, ctx) + } + } + fn encoded_len(&self) -> usize { + let len = self.bytes_len(); + prost::encoding::key_len(1u32) + prost::encoding::encoded_len_varint(len as u64) + len + } + fn clear(&mut self) { + *self = Default::default(); + } + } + }; +} + +macro_rules! impl_wrapper_traits { + ($type_name:ident => $inner:ident) => { + impl From<$inner> for $type_name { + fn from(value: $inner) -> Self { + Self(value) + } + } + impl From<$type_name> for $inner { + fn from(value: $type_name) -> Self { + value.0 + } + } + impl Deref for $type_name { + type Target = $inner; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + impl DerefMut for $type_name { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + }; +} + +#[derive(Clone, Default, PartialEq, Debug)] +pub struct Felt252(pub SnFelt); +impl_wrapper_traits!(Felt252 => SnFelt); + +impl WrappedBytesPrimitive for Felt252 { + fn to_bytes(&self, dest: &mut impl BufMut) { + let bytes = self.0.to_bytes_be(); + let bytes = &bytes[32 - self.0.slice_be_len()..32]; + dest.put(bytes); + } + fn bytes_len(&self) -> usize { + self.0.slice_be_len() + } + fn from_bytes(b: Bytes) -> Result { + SnFelt::from_slice_be_checked(&b).map(Self).map_err(|_| DecodeError::new("Malformated felt")) + } +} +impl_message_for_wrappeed_bytes_primitive!(Felt252); + +/// A hash value representable as a [`Felt252`]. +#[derive(Clone, Default, PartialEq, Debug)] +pub struct Hash(pub SnFelt); +impl_wrapper_traits!(Hash => SnFelt); + +impl WrappedBytesPrimitive for Hash { + fn to_bytes(&self, dest: &mut impl BufMut) { + let bytes = self.0.to_bytes_be(); + let bytes = &bytes[32 - self.0.slice_be_len()..32]; + dest.put(bytes); + } + fn bytes_len(&self) -> usize { + self.0.slice_be_len() + } + fn from_bytes(b: Bytes) -> Result { + SnFelt::from_slice_be_checked(&b).map(Self).map_err(|_| DecodeError::new("Malformated felt")) + } +} +impl_message_for_wrappeed_bytes_primitive!(Hash); + +/// 2**251 +pub const ADDRESS_UPPER_BOUND: SnFelt = + SnFelt::from_hex_unchecked("0x800000000000000000000000000000000000000000000000000000000000000"); + +#[derive(Clone, Default, PartialEq, Debug)] +pub struct Address(pub SnFelt); +impl_wrapper_traits!(Address => SnFelt); // TODO: we need a proper felt251 primitive + +impl WrappedBytesPrimitive for Address { + fn to_bytes(&self, dest: &mut impl BufMut) { + let bytes = self.0.to_bytes_be(); + let bytes = &bytes[32 - self.0.slice_be_len()..32]; + dest.put(bytes); + } + fn bytes_len(&self) -> usize { + self.0.slice_be_len() + } + fn from_bytes(b: Bytes) -> Result { + let felt = SnFelt::from_slice_be_checked(&b).map_err(|_| DecodeError::new("Malformated felt"))?; + if felt < ADDRESS_UPPER_BOUND { + Ok(Self(felt)) + } else { + Err(DecodeError::new("Address out of range")) + } + } +} +impl_message_for_wrappeed_bytes_primitive!(Address); + +#[derive(Clone, Default, PartialEq, Debug)] +pub struct EthereumAddress(pub [u8; 20]); +impl From for SnFelt { + fn from(value: EthereumAddress) -> Self { + SnFelt::from_bytes_be_slice(&value.0) + } +} +impl From for EthereumAddress { + fn from(value: SnFelt) -> Self { + // TODO: replace the primitive type in mp_receipt + Self(value.to_bytes_be()[32 - 20..].try_into().unwrap()) + } +} + +impl WrappedBytesPrimitive for EthereumAddress { + fn to_bytes(&self, dest: &mut impl BufMut) { + dest.put(&self.0[..]); + } + fn bytes_len(&self) -> usize { + self.0.len() // always 20 bytes + } + fn from_bytes(b: Bytes) -> Result { + b[..].try_into().map(Self).map_err(|_| DecodeError::new("Expected exactly 32 bytes for Hash256 value")) + } +} +impl_message_for_wrappeed_bytes_primitive!(EthereumAddress); + +#[derive(Clone, PartialEq, Debug)] +pub struct Hash256(pub SnHash256); +impl_wrapper_traits!(Hash256 => SnHash256); + +impl WrappedBytesPrimitive for Hash256 { + fn to_bytes(&self, dest: &mut impl BufMut) { + dest.put(&self.0.as_bytes()[..]); + } + fn bytes_len(&self) -> usize { + self.0.as_bytes().len() // always 32 bytes + } + fn from_bytes(b: Bytes) -> Result { + b[..] + .try_into() + .map(SnHash256::from_bytes) + .map(Self) + .map_err(|_| DecodeError::new("Expected exactly 32 bytes for Hash256 value")) + } +} +impl_message_for_wrappeed_bytes_primitive!(Hash256); + +impl Default for Hash256 { + fn default() -> Self { + Self(SnHash256::from_bytes(Default::default())) + } +} + +// We don't need to redefined Uint128 as it doesnt allocate anything. + +pub use model::Uint128; +impl From for Uint128 { + fn from(value: u128) -> Self { + let b = value.to_le_bytes(); + let low = u64::from_le_bytes([b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]]); + let high = u64::from_le_bytes([b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15]]); + Self { low, high } + } +} +impl From for u128 { + fn from(value: Uint128) -> Self { + let l = value.low.to_le_bytes(); + let h = value.high.to_le_bytes(); + u128::from_le_bytes([ + l[0], l[1], l[2], l[3], l[4], l[5], l[6], l[7], h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7], + ]) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn felt_252() { + let test_one = |felt, serialized| { + let mut dest = vec![]; + Felt252::from(felt).to_bytes(&mut dest); + assert_eq!(&dest[..], serialized); + let back = Felt252::from_bytes(dest.into()).unwrap(); + assert_eq!(SnFelt::from(back), felt); + }; + test_one(SnFelt::from_hex_unchecked("0x0"), &[0u8; 0] as &[u8]); + test_one(SnFelt::from_hex_unchecked("0x1"), &[1]); + test_one( + SnFelt::from_hex_unchecked("0x40000000000001100000000000012100000000000000000000000000000000"), + &[64, 0, 0, 0, 0, 0, 1, 16, 0, 0, 0, 0, 0, 1, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + ); + test_one( + SnFelt::from_hex_unchecked("0x800000000000011000000000000000000000000000000000000000000000000"), + &[8, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + ); + assert!(Felt252::from_bytes( + (&[8u8, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] + as &[_]) + .into() + ) + .is_err()); + } +} diff --git a/crates/madara/primitives/proto/src/proposal.rs b/crates/madara/primitives/proto/src/proposal.rs new file mode 100644 index 000000000..51ae2fecc --- /dev/null +++ b/crates/madara/primitives/proto/src/proposal.rs @@ -0,0 +1,924 @@ +use std::collections::BTreeSet; + +use m_proc_macros::model_describe; +use prost::Message; + +use crate::{ + model::{self, stream_message}, + model_field, +}; + +#[derive(thiserror::Error, Debug)] +pub enum AccumulateError { + #[error("Invalid stream id: expected {0:?}, got {1:?}")] + InvalidStreamId(model::ConsensusStreamId, model::ConsensusStreamId), + #[error("{0} is more than the max amount of bytes which can be received ({1})")] + MaxBounds(usize, usize), + #[error("New Fin with id {1} but already received Fin at message id {0}")] + DoubleFin(u64, u64), + #[error("Failed to decode model: {0:?}")] + DecodeError(#[from] prost::DecodeError), + #[error(transparent)] + ModelError(#[from] crate::FromModelError), +} + +#[derive(Debug)] +#[cfg_attr(test, derive(Clone))] +pub enum OrderedStreamAccumulator +where + T: prost::Message, + T: Default, +{ + Accumulate(OrderedStreamAccumulatorInner), + Done(T), +} + +#[derive(Debug)] +#[cfg_attr(test, derive(Clone))] +pub struct OrderedStreamAccumulatorInner +where + T: prost::Message, + T: Default, +{ + stream_id: Option>, + messages: BTreeSet, + limits: OrderedStreamLimits, + fin: Option, + _phantom: std::marker::PhantomData, +} + +#[derive(Debug)] +#[cfg_attr(test, derive(Clone))] +struct OrderedStreamItem { + content: Vec, + message_id: u64, +} + +impl PartialEq for OrderedStreamItem { + fn eq(&self, other: &Self) -> bool { + self.message_id == other.message_id + } +} + +impl Eq for OrderedStreamItem {} + +impl Ord for OrderedStreamItem { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.message_id.cmp(&other.message_id) + } +} + +impl PartialOrd for OrderedStreamItem { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[derive(Debug)] +#[cfg_attr(test, derive(Clone))] +struct OrderedStreamLimits { + max: usize, + current: usize, +} + +impl Default for OrderedStreamLimits { + fn default() -> Self { + Self { max: usize::MAX, current: 0 } + } +} + +impl OrderedStreamLimits { + fn new(max: usize) -> Self { + Self { max, current: 0 } + } + + fn update(&mut self, increment: usize) -> Result<(), AccumulateError> { + if self.current + increment > self.max { + Err(AccumulateError::MaxBounds(self.current + increment, self.max)) + } else { + self.current += increment; + Ok(()) + } + } +} + +impl OrderedStreamAccumulator +where + T: prost::Message, + T: Default, +{ + pub fn new() -> Self { + Self::Accumulate(OrderedStreamAccumulatorInner:: { + stream_id: None, + messages: Default::default(), + limits: Default::default(), + fin: None, + _phantom: std::marker::PhantomData, + }) + } + + pub fn new_with_limits(max: usize) -> Self { + Self::Accumulate(OrderedStreamAccumulatorInner:: { + stream_id: None, + messages: Default::default(), + limits: OrderedStreamLimits::new(max), + fin: None, + _phantom: std::marker::PhantomData, + }) + } + + #[model_describe(model::StreamMessage)] + pub fn accumulate(self, stream_message: model::StreamMessage) -> Result { + match self { + Self::Accumulate(mut inner) => { + let stream_id = inner.stream_id.get_or_insert_with(|| stream_message.stream_id.clone()); + let message_id = stream_message.message_id; + + Self::check_stream_id(&stream_message.stream_id, stream_id)?; + + match model_field!(stream_message => message) { + stream_message::Message::Content(bytes) => Self::update_content(inner, message_id, &bytes), + stream_message::Message::Fin(_) => Self::update_fin(inner, message_id), + } + } + Self::Done(_) => Ok(self), + } + } + + pub fn is_done(&self) -> bool { + matches!(self, Self::Done(_)) + } + + pub fn consume(self) -> Option { + match self { + Self::Accumulate(_) => None, + Self::Done(res) => Some(res), + } + } + + fn check_stream_id(actual: &[u8], expected: &[u8]) -> Result<(), AccumulateError> { + if actual != expected { + let actual = model::ConsensusStreamId::decode(actual)?; + let expected = model::ConsensusStreamId::decode(expected)?; + Err(AccumulateError::InvalidStreamId(actual, expected)) + } else { + Ok(()) + } + } + + fn update_content( + mut inner: OrderedStreamAccumulatorInner, + message_id: u64, + bytes: &[u8], + ) -> Result { + inner.limits.update(bytes.len())?; + + let item = OrderedStreamItem { content: bytes.to_vec(), message_id }; + inner.messages.insert(item); + + match inner.fin { + Some(id) => Self::handle_fin(inner, id), + None => Ok(Self::Accumulate(inner)), + } + } + + fn update_fin(mut inner: OrderedStreamAccumulatorInner, message_id: u64) -> Result { + inner.fin = match inner.fin { + Some(id) => return Err(AccumulateError::DoubleFin(id, message_id)), + None => Some(message_id), + }; + + Self::handle_fin(inner, message_id) + } + + fn handle_fin(inner: OrderedStreamAccumulatorInner, message_id: u64) -> Result { + if inner.messages.len() == message_id as usize { + let bytes = inner.messages.into_iter().flat_map(|m| m.content).collect::>(); + Ok(Self::Done(T::decode(bytes.as_slice())?)) + } else { + Ok(Self::Accumulate(inner)) + } + } +} + +#[cfg(test)] +mod test { + use prost::Message; + use rand::{seq::SliceRandom, SeedableRng}; + use starknet_core::types::Felt; + + use crate::{ + model::{self}, + proposal::{AccumulateError, OrderedStreamAccumulator}, + }; + + #[rstest::fixture] + fn proposal_part() -> model::ProposalPart { + model::ProposalPart { + messages: Some(model::proposal_part::Messages::Init(model::ProposalInit { + height: 1, + round: 2, + valid_round: Some(3), + proposer: Some(model::Address(Felt::ONE)), + })), + } + } + + #[rstest::fixture] + fn stream_proposal_part( + proposal_part: model::ProposalPart, + ) -> impl Iterator { + let mut buffer = Vec::new(); + proposal_part.encode(&mut buffer).expect("Failed to encode proposal part"); + + buffer + .chunks(buffer.len() / 10) + .map(Vec::from) + .map(model::stream_message::Message::Content) + .chain(std::iter::once(model::stream_message::Message::Fin(model::Fin {}))) + .collect::>() + .into_iter() + } + + #[rstest::fixture] + fn stream_proposal_part_shuffled( + stream_proposal_part: impl Iterator, + ) -> impl Iterator { + let mut rng = rand::rngs::SmallRng::seed_from_u64(42); + let mut messages = stream_proposal_part.collect::>(); + messages.shuffle(&mut rng); + return messages.into_iter(); + } + + #[rstest::fixture] + fn stream_id(#[default(0)] seed: u64) -> Vec { + let stream_id = model::ConsensusStreamId { height: seed, round: (seed + 1) as u32 }; + let mut stream_id_buffer = Vec::new(); + stream_id.encode(&mut stream_id_buffer).expect("Failed to encode stream id"); + + stream_id_buffer + } + + #[rstest::fixture] + fn stream_message( + stream_proposal_part: impl Iterator, + #[with(1)] stream_id: Vec, + ) -> impl Iterator { + stream_proposal_part.enumerate().map(move |(i, message)| model::StreamMessage { + message: Some(message), + stream_id: stream_id.clone(), + message_id: i as u64, + }) + } + + #[rstest::fixture] + fn stream_message_shuffled( + stream_proposal_part: impl Iterator, + #[with(1)] stream_id: Vec, + ) -> impl Iterator { + let mut rng = rand::rngs::SmallRng::seed_from_u64(42); + let mut stream_messages = stream_proposal_part + .enumerate() + .map(move |(i, message)| model::StreamMessage { + message: Some(message), + stream_id: stream_id.clone(), + message_id: i as u64, + }) + .collect::>(); + stream_messages.shuffle(&mut rng); + + stream_messages.into_iter() + } + + #[rstest::fixture] + fn stream_message_invalid_stream_id( + mut stream_proposal_part: impl Iterator, + #[from(stream_id)] + #[with(1)] + stream_id_1: Vec, + #[from(stream_id)] + #[with(2)] + stream_id_2: Vec, + ) -> impl Iterator { + vec![ + stream_proposal_part + .next() + .map(|message| model::StreamMessage { message: Some(message), stream_id: stream_id_1, message_id: 0 }) + .expect("Failed to generate stream message"), + stream_proposal_part + .next() + .map(|message| model::StreamMessage { message: Some(message), stream_id: stream_id_2, message_id: 0 }) + .expect("Failed to generate stream message"), + ] + .into_iter() + } + + #[rstest::fixture] + fn stream_message_double_fin( + stream_message: impl Iterator, + #[with(1)] stream_id: Vec, + ) -> impl Iterator { + std::iter::once(model::StreamMessage { + message: Some(model::stream_message::Message::Fin(model::Fin {})), + stream_id: stream_id.clone(), + message_id: u64::MAX, + }) + .chain(stream_message) + .map(move |mut stream_message| { + stream_message.stream_id = stream_id.clone(); + stream_message + }) + } + + #[rstest::fixture] + fn stream_message_decode_error( + mut stream_proposal_part: impl Iterator, + #[with(1)] stream_id: Vec, + ) -> impl Iterator { + std::iter::once(model::StreamMessage { + message: Some(stream_proposal_part.next().unwrap()), + stream_id: stream_id.clone(), + message_id: 0, + }) + .chain(std::iter::once(model::StreamMessage { + message: Some(model::stream_message::Message::Fin(model::Fin {})), + stream_id: stream_id.clone(), + message_id: 0, + })) + } + + #[rstest::fixture] + fn stream_message_model_error(#[with(1)] stream_id: Vec) -> impl Iterator { + std::iter::once(model::StreamMessage { message: None, stream_id, message_id: 0 }) + } + + /// Receives a proposal part in a single, ordered stream. All should work as + /// expected + #[rstest::rstest] + #[timeout(std::time::Duration::from_secs(1))] + fn ordered_stream_simple( + proposal_part: model::ProposalPart, + stream_message: impl Iterator, + ) { + let mut accumulator = OrderedStreamAccumulator::::new(); + let mut i = 0; + + for message in stream_message { + accumulator = accumulator.accumulate(message).expect("Failed to accumulate message stream"); + i += 1; + } + + assert!(i > 1, "Proposal part was streamed over a single message"); + assert!(accumulator.is_done()); + + let proposal_part_actual = accumulator.consume(); + assert_eq!( + proposal_part_actual, + Some(proposal_part), + "Failed to reconstruct proposal part from message stream" + ); + } + + /// Receives a proposal part with a bound to the number of bytes which can + /// be received. + #[rstest::rstest] + #[timeout(std::time::Duration::from_secs(1))] + fn ordered_stream_bounded( + proposal_part: model::ProposalPart, + stream_message: impl Iterator, + ) { + let limit = proposal_part.encode_to_vec().len(); + let mut accumulator = OrderedStreamAccumulator::::new_with_limits(limit); + let mut i = 0; + + for message in stream_message { + accumulator = accumulator.accumulate(message).expect("Failed to accumulate message stream"); + i += 1; + } + + assert!(i > 1, "Proposal part was streamed over a single message"); + assert!(accumulator.is_done()); + + let proposal_part_actual = accumulator.consume(); + assert_eq!( + proposal_part_actual, + Some(proposal_part), + "Failed to reconstruct proposal part from message stream" + ); + } + + /// Receives a proposal part in an _unordered_ stream. The + /// [OrderedStreamAccumulator] has to sort the inputs and decode them + /// correctly. + #[rstest::rstest] + #[timeout(std::time::Duration::from_secs(1))] + fn ordered_stream_shuffled( + proposal_part: model::ProposalPart, + stream_message_shuffled: impl Iterator, + ) { + let mut accumulator = OrderedStreamAccumulator::::new(); + let mut i = 0; + + for message in stream_message_shuffled { + accumulator = accumulator.accumulate(message).expect("Failed to accumulate message stream"); + i += 1; + } + + assert!(i > 1, "Proposal part was streamed over a single message"); + assert!(accumulator.is_done()); + + let proposal_part_actual = accumulator.consume(); + assert_eq!( + proposal_part_actual, + Some(proposal_part), + "Failed to reconstruct proposal part from message stream" + ); + } + + /// Receives a proposal part with different stream ids. This is indicative + /// of multiple streams overlapping and should not happen if the sender is + /// not malfunctioning. + #[rstest::rstest] + #[timeout(std::time::Duration::from_secs(1))] + fn ordered_stream_fail_invalid_stream_id( + mut stream_message_invalid_stream_id: impl Iterator, + ) { + let mut accumulator = OrderedStreamAccumulator::::new(); + + accumulator = accumulator + .accumulate(stream_message_invalid_stream_id.next().unwrap()) + .expect("Failed on first message reception: this should not happen"); + + assert_matches::assert_matches!( + accumulator.accumulate(stream_message_invalid_stream_id.next().unwrap()), + Err(AccumulateError::InvalidStreamId(..)) + ); + } + + /// Receives a proposal part in a stream with more bytes than is allowed in + /// the stream limits. + #[rstest::rstest] + #[timeout(std::time::Duration::from_secs(1))] + fn ordered_stream_fail_max_bounds( + proposal_part: model::ProposalPart, + stream_message: impl Iterator, + ) { + let limit = proposal_part.encode_to_vec().len(); + let mut accumulator = OrderedStreamAccumulator::::new_with_limits(limit - 1); + + for message in stream_message { + accumulator = match accumulator.accumulate(message) { + Ok(accumulator) => accumulator, + Err(e) => { + assert_matches::assert_matches!(e, AccumulateError::MaxBounds(..)); + break; + } + }; + } + } + + /// Receives a proposal part in a stream with multiple FIN messages. This is + /// considered malicious. + #[rstest::rstest] + #[timeout(std::time::Duration::from_secs(1))] + fn ordered_stream_fail_double_fin(stream_message_double_fin: impl Iterator) { + let mut accumulator = OrderedStreamAccumulator::::new(); + + for message in stream_message_double_fin { + accumulator = match accumulator.accumulate(message) { + Ok(accumulator) => accumulator, + Err(e) => { + assert_matches::assert_matches!(e, AccumulateError::DoubleFin(..)); + break; + } + }; + } + } + + /// Receives a proposal part in a stream. The proposal part is only + /// partially sent before the FIN, so this should result in a decode error. + #[rstest::rstest] + #[timeout(std::time::Duration::from_secs(1))] + fn ordered_stream_fail_decode_error(stream_message_decode_error: impl Iterator) { + let mut accumulator = OrderedStreamAccumulator::::new(); + + for message in stream_message_decode_error { + accumulator = match accumulator.accumulate(message) { + Ok(accumulator) => accumulator, + Err(e) => { + assert_matches::assert_matches!(e, AccumulateError::DecodeError(..)); + break; + } + }; + } + } + + /// Receives a proposal part in a stream. Protobuf allows for all message + /// fields to be optional. In our case, we consider any missing field which + /// is not explicitly marked as `optional` to be required, and return an + /// error if this is the case. + #[rstest::rstest] + #[timeout(std::time::Duration::from_secs(1))] + fn ordered_stream_fail_model_error(stream_message_model_error: impl Iterator) { + let mut accumulator = OrderedStreamAccumulator::::new(); + + for message in stream_message_model_error { + accumulator = match accumulator.accumulate(message) { + Ok(accumulator) => accumulator, + Err(e) => { + assert_matches::assert_matches!(e, AccumulateError::ModelError(..)); + break; + } + }; + } + } +} + +#[cfg(test)] +mod proptest { + use std::collections::VecDeque; + + use proptest::prelude::*; + use proptest::prop_compose; + use proptest_state_machine::ReferenceStateMachine; + use proptest_state_machine::StateMachineTest; + use prost::Message; + use starknet_core::types::Felt; + + use crate::model; + + use super::AccumulateError; + use super::OrderedStreamAccumulator; + + type SUT = OrderedStreamAccumulator; + + proptest_state_machine::prop_state_machine! { + #![proptest_config(proptest::prelude::ProptestConfig { + // Enable verbose mode to make the state machine test print the + // transitions for each case. + verbose: 1, + // The number of tests which need to be valid for this to pass. + cases: 64, + // Max duration (in milliseconds) for each generated case. + timeout: 1_000, + ..Default::default() + })] + + #[test] + fn ordered_stream_proptest(sequential 1..256 => SUT); + } + + prop_compose! { + fn stream_id()(seed in 0..100u64) -> Vec { + let stream_id = model::ConsensusStreamId { height: seed, round: seed as u32 }; + let mut buffer = Vec::new(); + stream_id.encode(&mut buffer).expect("Failed to encode stream id"); + + buffer + } + } + + prop_compose! { + fn proposal_part()(len in 10..100usize) -> model::ProposalPart { + let tx = model::ConsensusTransaction { + transaction_hash: Some(model::Hash(Felt::ONE)), + txn: Some(model::consensus_transaction::Txn::L1Handler(model::L1HandlerV0 { + nonce: Some(model::Felt252(Felt::ZERO)), + address: Some(model::Address(Felt::ONE)), + entry_point_selector: Some(model::Felt252(Felt::TWO)), + calldata: vec![model::Felt252(Felt::THREE); 12] + })) + }; + + model::ProposalPart { + messages: Some(model::proposal_part::Messages::Transactions(model::TransactionBatch { + transactions: vec![tx; len] + })) + } + } + } + + prop_compose! { + fn stream_messages(stream_id: Vec, proposal_part: model::ProposalPart)( + split_into in 1..256usize + ) -> VecDeque { + let mut buffer = Vec::new(); + proposal_part.encode(&mut buffer).expect("Failed to encode proposal part"); + + buffer + .chunks(buffer.len() / split_into) + .map(Vec::from) + .map(model::stream_message::Message::Content) + .chain(std::iter::once(model::stream_message::Message::Fin(model::Fin {}))) + .enumerate() + .map(|(i, message)| model::StreamMessage { + message: Some(message), + stream_id: stream_id.clone(), + message_id: i as u64 + }) + .collect() + } + } + + prop_compose! { + fn reference_state_machine()( + stream_id in stream_id(), + proposal_part in proposal_part() + )( + stream_messages in stream_messages(stream_id.clone(), proposal_part.clone()), + stream_id in Just(stream_id), + proposal_part in Just(proposal_part), + delta in 0..10_000usize + ) -> OrderedStreamAccumulatorStateMachine { + let size = proposal_part.encoded_len(); + let limit = if delta > 5_000 { + size.saturating_sub(delta) + } else { + size.saturating_add(delta) + }; + + OrderedStreamAccumulatorStateMachine { + proposal_part, + stream_messages, + stream_id, + message_id: 0, + size, + limit, + } + } + } + + #[derive(Clone)] + pub enum ProptestTransition { + Accumulate(model::StreamMessage), + ActMalicious(ProptestMaliciousTransition), + Collect, + } + + #[derive(Clone)] + pub enum ProptestMaliciousTransition { + InvalidStreamId(model::StreamMessage), + InsertGarbageData(model::StreamMessage), + DoubleFin(model::StreamMessage), + InvalidModel(model::StreamMessage), + } + + impl std::fmt::Debug for ProptestTransition { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Accumulate(_) => f.debug_tuple("Accumulate").finish(), + Self::ActMalicious(transition) => f.debug_tuple("ActMalicious").field(&transition).finish(), + Self::Collect => f.debug_tuple("Collect").finish(), + } + } + } + + impl std::fmt::Debug for ProptestMaliciousTransition { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::InvalidStreamId(_) => f.debug_tuple("InvalidStreamId").finish(), + Self::InsertGarbageData(_) => f.debug_tuple("InsertGarbageData").finish(), + Self::DoubleFin(_) => f.debug_tuple("DoubleFin").finish(), + Self::InvalidModel(_) => f.debug_tuple("InvalidModel").finish(), + } + } + } + + #[derive(Clone)] + pub struct OrderedStreamAccumulatorStateMachine { + proposal_part: model::ProposalPart, + stream_messages: VecDeque, + stream_id: Vec, + message_id: u64, + size: usize, + limit: usize, + } + + impl std::fmt::Debug for OrderedStreamAccumulatorStateMachine { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let stream_messages = self + .stream_messages + .iter() + .map(|m| { + m.message.clone().map(|m| match m { + model::stream_message::Message::Content(_) => "Content(...)", + model::stream_message::Message::Fin(_) => "Fin", + }) + }) + .take(5) + .collect::>(); + + let stream_messages = if stream_messages.len() < self.stream_messages.len() { + format!("{stream_messages:?}... ({} items)", self.stream_messages.len()) + } else { + format!("{stream_messages:?} ({} items)", self.stream_messages.len()) + }; + + f.debug_struct("OrderedStreamAccumulatorStateMachine") + .field("stream_messages", &stream_messages) + .field("stream_id", &model::ConsensusStreamId::decode(self.stream_id.as_slice()).unwrap()) + .field("message_id", &self.message_id) + .field("size", &self.size) + .field("limit", &self.limit) + .finish() + } + } + + impl ReferenceStateMachine for OrderedStreamAccumulatorStateMachine { + type State = OrderedStreamAccumulatorStateMachine; + type Transition = ProptestTransition; + + fn init_state() -> BoxedStrategy { + reference_state_machine().boxed() + } + + fn transitions(state: &Self::State) -> BoxedStrategy { + if let Some(stream_message) = state.stream_messages.front() { + prop_oneof![ + 4 => Just(ProptestTransition::Accumulate(stream_message.clone())), + // 1 => Self::act_malicious(state) + ] + .boxed() + } else { + prop_oneof! [ + 1 => Just(ProptestTransition::Collect), + // 1 => Self::act_malicious(state) + ] + .boxed() + } + } + + fn apply(mut state: Self::State, transition: &Self::Transition) -> Self::State { + match transition { + ProptestTransition::Accumulate(_) => { + state.stream_messages.pop_front(); + state.message_id += 1; + } + ProptestTransition::ActMalicious(transition) => match transition { + ProptestMaliciousTransition::InvalidStreamId(_) => { + state.stream_messages.pop_front(); + } + ProptestMaliciousTransition::InsertGarbageData(_) => { + state.stream_messages.pop_front(); + state.size += 1; + } + _ => (), + }, + ProptestTransition::Collect => (), + } + + state + } + } + + impl OrderedStreamAccumulatorStateMachine { + fn act_malicious(state: &Self) -> impl Strategy { + let invalid_stream_id = || { + let mut stream_id = state.stream_id.clone(); + stream_id[0] += 1; + + let mut stream_message = state.stream_messages.front().cloned().unwrap_or_default(); + stream_message.stream_id = stream_id; + + ProptestMaliciousTransition::InvalidStreamId(stream_message) + }; + + let insert_garbage_data = || { + let content = state + .stream_messages + .front() + .cloned() + .unwrap_or_default() + .message + .unwrap_or(model::stream_message::Message::Content(vec![])); + + let content = if let model::stream_message::Message::Content(mut content) = content { + content.push(42); + content + } else { + vec![42] + }; + + let stream_message = model::StreamMessage { + message: Some(model::stream_message::Message::Content(content)), + stream_id: state.stream_id.clone(), + message_id: state.message_id, + }; + + ProptestMaliciousTransition::InsertGarbageData(stream_message) + }; + + let double_fin = || { + let stream_message = model::StreamMessage { + message: Some(model::stream_message::Message::Fin(model::Fin {})), + stream_id: state.stream_id.clone(), + message_id: u64::MAX - state.message_id, + }; + + ProptestMaliciousTransition::DoubleFin(stream_message) + }; + + let invalid_model = || { + let stream_mesage = model::StreamMessage { + message: None, + stream_id: state.stream_id.clone(), + message_id: u64::MAX / 2 - state.message_id, + }; + + ProptestMaliciousTransition::InvalidModel(stream_mesage) + }; + + prop_oneof![ + Just(ProptestTransition::ActMalicious(invalid_stream_id())), + Just(ProptestTransition::ActMalicious(insert_garbage_data())), + Just(ProptestTransition::ActMalicious(double_fin())), + Just(ProptestTransition::ActMalicious(invalid_model())) + ] + } + } + + impl StateMachineTest for OrderedStreamAccumulator { + type SystemUnderTest = Self; + type Reference = OrderedStreamAccumulatorStateMachine; + + fn init_test(ref_state: &::State) -> Self::SystemUnderTest { + Self::new_with_limits(ref_state.limit) + } + + fn apply( + state: Self::SystemUnderTest, + ref_state: &::State, + transition: ::Transition, + ) -> Self::SystemUnderTest { + match transition { + ProptestTransition::Accumulate(stream_message) => { + let res = state.clone().accumulate(stream_message.clone()); + Self::check_limits(&res, stream_message.clone(), &state, ref_state); + + if matches!(stream_message.message.unwrap(), model::stream_message::Message::Fin(_)) + && ref_state.limit >= ref_state.size + { + if let Ok(res) = &res { + assert_matches::assert_matches!(res, OrderedStreamAccumulator::Done(_)); + } + } + + res.unwrap_or(state) + } + ProptestTransition::ActMalicious(transition) => match transition { + ProptestMaliciousTransition::InvalidStreamId(stream_message) => { + let res = state.clone().accumulate(stream_message.clone()); + Self::check_limits(&res, stream_message, &state, ref_state); + assert_matches::assert_matches!(res, Err(AccumulateError::InvalidStreamId(..))); + res.unwrap_or(state) + } + ProptestMaliciousTransition::InsertGarbageData(stream_message) => { + let res = state.clone().accumulate(stream_message.clone()); + Self::check_limits(&res, stream_message, &state, ref_state); + res.unwrap_or(state) + } + ProptestMaliciousTransition::DoubleFin(stream_message) => { + let res = state.clone().accumulate(stream_message.clone()); + Self::check_limits(&res, stream_message, &state, ref_state); + assert_matches::assert_matches!(res, Err(AccumulateError::DoubleFin(..))); + res.unwrap_or(state) + } + ProptestMaliciousTransition::InvalidModel(stream_message) => { + let res = state.clone().accumulate(stream_message.clone()); + Self::check_limits(&res, stream_message, &state, ref_state); + assert_matches::assert_matches!(res, Err(AccumulateError::ModelError(..))); + res.unwrap_or(state) + } + }, + ProptestTransition::Collect => { + if ref_state.limit >= ref_state.size { + let res = state.clone().consume(); + assert!(state.is_done(), "Called collect on incomplete stream"); + assert!(res.is_some(), "Complete stream returned none"); + assert_eq!(res, Some(ref_state.proposal_part.clone()), "Collected stream does not match"); + } + state + } + } + } + } + + impl OrderedStreamAccumulator { + fn check_limits( + res: &Result, + stream_message: model::StreamMessage, + state: &Self, + ref_state: &OrderedStreamAccumulatorStateMachine, + ) { + if let OrderedStreamAccumulator::Accumulate(inner) = &state { + if let model::stream_message::Message::Content(bytes) = stream_message.message.clone().unwrap() { + if ref_state.limit < ref_state.size && inner.limits.clone().update(bytes.len()).is_err() { + assert_matches::assert_matches!(res, Err(AccumulateError::MaxBounds(..))); + } else { + assert_matches::assert_matches!(res, Ok(..)); + } + } + } + } + } +} diff --git a/crates/madara/primitives/proto/src/transactions.rs b/crates/madara/primitives/proto/src/transactions.rs new file mode 100644 index 000000000..a324408b4 --- /dev/null +++ b/crates/madara/primitives/proto/src/transactions.rs @@ -0,0 +1,811 @@ +use super::FromModelError; +use crate::{ + model::{self, receipt::execution_resources::BuiltinCounter}, + model_field, model_field_variant, CollectInto, TryIntoField, +}; +use m_proc_macros::model_describe; +use mp_block::TransactionWithReceipt; +use mp_receipt::{ + DeclareTransactionReceipt, DeployAccountTransactionReceipt, DeployTransactionReceipt, ExecutionResources, + ExecutionResult, FeePayment, InvokeTransactionReceipt, L1Gas, L1HandlerTransactionReceipt, MsgToL1, PriceUnit, + TransactionReceipt, +}; +use mp_transactions::{ + DataAvailabilityMode, DeclareTransaction, DeclareTransactionV0, DeclareTransactionV1, DeclareTransactionV2, + DeclareTransactionV3, DeployAccountTransaction, DeployAccountTransactionV1, DeployAccountTransactionV3, + DeployTransaction, InvokeTransaction, InvokeTransactionV0, InvokeTransactionV1, InvokeTransactionV3, + L1HandlerTransaction, ResourceBounds, ResourceBoundsMapping, Transaction, TransactionWithHash, +}; +use starknet_core::types::Felt; + +impl From for model::TransactionWithReceipt { + fn from(value: TransactionWithReceipt) -> Self { + Self { + transaction: Some(model::TransactionInBlock { + transaction_hash: Some(value.receipt.transaction_hash().into()), + txn: Some(value.transaction.into()), + }), + receipt: Some(value.receipt.into()), + } + } +} + +impl TryFrom for TransactionWithReceipt { + type Error = FromModelError; + + #[model_describe(model::TransactionWithReceipt)] + fn try_from(value: model::TransactionWithReceipt) -> Result { + let transaction = model_field!(value => transaction); + let TransactionWithHash { transaction, hash } = TransactionWithHash::try_from(transaction)?; + let receipt = model_field!(value => receipt).parse_model(hash)?; + Ok(Self { transaction, receipt }) + } +} + +impl TryFrom for TransactionWithHash { + type Error = FromModelError; + + #[model_describe(model::TransactionInBlock)] + fn try_from(value: model::TransactionInBlock) -> Result { + Ok(Self { + transaction: model_field!(value => txn).try_into()?, + hash: model_field!(value => transaction_hash).into(), + }) + } +} + +impl TryFrom for Transaction { + type Error = FromModelError; + fn try_from(value: model::transaction_in_block::Txn) -> Result { + use model::transaction_in_block::Txn; + Ok(match value { + Txn::DeclareV0(tx) => Self::Declare(DeclareTransaction::V0(tx.try_into()?)), + Txn::DeclareV1(tx) => Self::Declare(DeclareTransaction::V1(tx.try_into()?)), + Txn::DeclareV2(tx) => Self::Declare(DeclareTransaction::V2(tx.try_into()?)), + Txn::DeclareV3(tx) => Self::Declare(DeclareTransaction::V3(tx.try_into()?)), + Txn::Deploy(tx) => Self::Deploy(tx.try_into()?), + Txn::DeployAccountV1(tx) => Self::DeployAccount(DeployAccountTransaction::V1(tx.try_into()?)), + Txn::DeployAccountV3(tx) => Self::DeployAccount(DeployAccountTransaction::V3(tx.try_into()?)), + Txn::InvokeV0(tx) => Self::Invoke(InvokeTransaction::V0(tx.try_into()?)), + Txn::InvokeV1(tx) => Self::Invoke(InvokeTransaction::V1(tx.try_into()?)), + Txn::InvokeV3(tx) => Self::Invoke(InvokeTransaction::V3(tx.try_into()?)), + Txn::L1Handler(tx) => Self::L1Handler(tx.try_into()?), + }) + } +} + +impl TryFrom for DeclareTransactionV0 { + type Error = FromModelError; + + #[model_describe(model::transaction_in_block::DeclareV0WithoutClass)] + fn try_from(value: model::transaction_in_block::DeclareV0WithoutClass) -> Result { + Ok(Self { + sender_address: model_field!(value => sender).into(), + max_fee: model_field!(value => max_fee).into(), + signature: model_field!(value => signature).parts.collect_into(), + class_hash: model_field!(value => class_hash).into(), + }) + } +} + +impl TryFrom for DeclareTransactionV1 { + type Error = FromModelError; + + #[model_describe(model::transaction_in_block::DeclareV1WithoutClass)] + fn try_from(value: model::transaction_in_block::DeclareV1WithoutClass) -> Result { + Ok(Self { + sender_address: model_field!(value => sender).into(), + max_fee: model_field!(value => max_fee).into(), + signature: model_field!(value => signature).parts.collect_into(), + nonce: model_field!(value => nonce).into(), + class_hash: model_field!(value => class_hash).into(), + }) + } +} + +impl TryFrom for DeclareTransactionV2 { + type Error = FromModelError; + + #[model_describe(model::transaction_in_block::DeclareV2WithoutClass)] + fn try_from(value: model::transaction_in_block::DeclareV2WithoutClass) -> Result { + Ok(Self { + sender_address: model_field!(value => sender).into(), + compiled_class_hash: model_field!(value => compiled_class_hash).into(), + max_fee: model_field!(value => max_fee).into(), + signature: model_field!(value => signature).parts.collect_into(), + nonce: model_field!(value => nonce).into(), + class_hash: model_field!(value => class_hash).into(), + }) + } +} + +impl TryFrom for DeclareTransactionV3 { + type Error = FromModelError; + + #[model_describe(model::transaction_in_block::DeclareV3WithoutClass)] + fn try_from(value: model::transaction_in_block::DeclareV3WithoutClass) -> Result { + let common = model_field!(value => common); + Ok(Self { + sender_address: model_field!(common => sender).into(), + compiled_class_hash: model_field!(common => compiled_class_hash).into(), + signature: model_field!(common => signature).parts.collect_into(), + nonce: model_field!(common => nonce).into(), + class_hash: model_field!(value => class_hash).into(), + resource_bounds: model_field!(common => resource_bounds).try_into()?, + tip: common.tip, + paymaster_data: common.paymaster_data.collect_into(), + account_deployment_data: common.account_deployment_data.collect_into(), + nonce_data_availability_mode: + model_field_variant!(model::VolitionDomain => common.nonce_data_availability_mode).into(), + fee_data_availability_mode: + model_field_variant!(model::VolitionDomain => common.fee_data_availability_mode).into(), + }) + } +} + +impl TryFrom for DeployTransaction { + type Error = FromModelError; + + #[model_describe(model::transaction_in_block::Deploy)] + fn try_from(value: model::transaction_in_block::Deploy) -> Result { + Ok(Self { + version: value.version.into(), + contract_address_salt: model_field!(value => address_salt).into(), + constructor_calldata: value.calldata.collect_into(), + class_hash: model_field!(value => class_hash).into(), + }) + } +} + +impl TryFrom for DeployAccountTransactionV1 { + type Error = FromModelError; + + #[model_describe(model::transaction_in_block::DeployAccountV1)] + fn try_from(value: model::transaction_in_block::DeployAccountV1) -> Result { + Ok(Self { + max_fee: model_field!(value => max_fee).into(), + signature: model_field!(value => signature).parts.collect_into(), + nonce: model_field!(value => nonce).into(), + contract_address_salt: model_field!(value => address_salt).into(), + constructor_calldata: value.calldata.collect_into(), + class_hash: model_field!(value => class_hash).into(), + }) + } +} + +impl TryFrom for DeployAccountTransactionV3 { + type Error = FromModelError; + + #[model_describe(model::DeployAccountV3)] + fn try_from(value: model::DeployAccountV3) -> Result { + Ok(Self { + signature: model_field!(value => signature).parts.collect_into(), + nonce: model_field!(value => nonce).into(), + contract_address_salt: model_field!(value => address_salt).into(), + constructor_calldata: value.calldata.collect_into(), + class_hash: model_field!(value => class_hash).into(), + resource_bounds: model_field!(value => resource_bounds).try_into()?, + tip: value.tip, + paymaster_data: value.paymaster_data.collect_into(), + nonce_data_availability_mode: + model_field_variant!(model::VolitionDomain => value.nonce_data_availability_mode).into(), + fee_data_availability_mode: model_field_variant!(model::VolitionDomain => value.fee_data_availability_mode) + .into(), + }) + } +} + +impl TryFrom for InvokeTransactionV0 { + type Error = FromModelError; + + #[model_describe(model::transaction_in_block::InvokeV0)] + fn try_from(value: model::transaction_in_block::InvokeV0) -> Result { + Ok(Self { + max_fee: model_field!(value => max_fee).into(), + signature: model_field!(value => signature).parts.collect_into(), + contract_address: model_field!(value => address).into(), + entry_point_selector: model_field!(value => entry_point_selector).into(), + calldata: value.calldata.collect_into(), + }) + } +} + +impl TryFrom for InvokeTransactionV1 { + type Error = FromModelError; + + #[model_describe(model::transaction_in_block::InvokeV1)] + fn try_from(value: model::transaction_in_block::InvokeV1) -> Result { + Ok(Self { + sender_address: model_field!(value => sender).into(), + calldata: value.calldata.collect_into(), + max_fee: model_field!(value => max_fee).into(), + signature: model_field!(value => signature).parts.collect_into(), + nonce: model_field!(value => nonce).into(), + }) + } +} + +impl TryFrom for InvokeTransactionV3 { + type Error = FromModelError; + + #[model_describe(model::InvokeV3)] + fn try_from(value: model::InvokeV3) -> Result { + Ok(Self { + sender_address: model_field!(value => sender).into(), + calldata: value.calldata.collect_into(), + signature: model_field!(value => signature).parts.collect_into(), + nonce: model_field!(value => nonce).into(), + resource_bounds: model_field!(value => resource_bounds).try_into()?, + tip: value.tip, + paymaster_data: value.paymaster_data.collect_into(), + account_deployment_data: value.account_deployment_data.collect_into(), + nonce_data_availability_mode: + model_field_variant!(model::VolitionDomain => value.nonce_data_availability_mode).into(), + fee_data_availability_mode: model_field_variant!(model::VolitionDomain => value.fee_data_availability_mode) + .into(), + }) + } +} + +impl TryFrom for L1HandlerTransaction { + type Error = FromModelError; + + #[model_describe(model::L1HandlerV0)] + fn try_from(value: model::L1HandlerV0) -> Result { + Ok(Self { + version: Felt::ZERO, + nonce: model_field!(value => nonce).0.try_into_field("L1HandlerV0::nonce")?, + contract_address: model_field!(value => address).into(), + entry_point_selector: model_field!(value => entry_point_selector).into(), + calldata: value.calldata.collect_into(), + }) + } +} + +impl TryFrom for ResourceBoundsMapping { + type Error = FromModelError; + + #[model_describe(model::ResourceBounds)] + fn try_from(value: model::ResourceBounds) -> Result { + Ok(Self { + l1_gas: model_field!(value => l1_gas).try_into()?, + l2_gas: model_field!(value => l2_gas).try_into()?, + }) + } +} + +impl TryFrom for ResourceBounds { + type Error = FromModelError; + + #[model_describe(model::ResourceLimits)] + fn try_from(value: model::ResourceLimits) -> Result { + Ok(Self { + max_amount: model_field!(value => max_amount).0.try_into_field("ResourceLimits::max_amount")?, + max_price_per_unit: model_field!(value => max_price_per_unit) + .0 + .try_into_field("ResourceLimits::max_price_per_unit")?, + }) + } +} + +impl From for DataAvailabilityMode { + fn from(value: model::VolitionDomain) -> Self { + use model::VolitionDomain; + match value { + VolitionDomain::L1 => DataAvailabilityMode::L1, + VolitionDomain::L2 => DataAvailabilityMode::L2, + } + } +} + +fn execution_result(revert_reason: Option) -> ExecutionResult { + match revert_reason { + Some(reason) => ExecutionResult::Reverted { reason }, + None => ExecutionResult::Succeeded, + } +} + +impl model::Receipt { + #[model_describe(model::Receipt)] + pub fn parse_model(self, transaction_hash: Felt) -> Result { + use model::receipt::Type; + + Ok(match model_field!(self => r#type) { + Type::Invoke(tx) => TransactionReceipt::Invoke(tx.parse_model(transaction_hash)?), + Type::L1Handler(tx) => TransactionReceipt::L1Handler(tx.parse_model(transaction_hash)?), + Type::Declare(tx) => TransactionReceipt::Declare(tx.parse_model(transaction_hash)?), + Type::DeprecatedDeploy(tx) => TransactionReceipt::Deploy(tx.parse_model(transaction_hash)?), + Type::DeployAccount(tx) => TransactionReceipt::DeployAccount(tx.parse_model(transaction_hash)?), + }) + } +} + +impl model::receipt::Invoke { + #[model_describe(model::receipt::Invoke)] + pub fn parse_model(self, transaction_hash: Felt) -> Result { + let common = model_field!(self => common); + Ok(InvokeTransactionReceipt { + transaction_hash, + actual_fee: FeePayment { + unit: common.price_unit().into(), + amount: model_field!(common => actual_fee).into(), + }, + messages_sent: common.messages_sent.into_iter().map(TryInto::try_into).collect::>()?, + events: vec![], + execution_resources: model_field!(common => execution_resources).try_into()?, + execution_result: execution_result(common.revert_reason), + }) + } +} + +impl model::receipt::L1Handler { + #[model_describe(model::receipt::L1Handler)] + pub fn parse_model(self, transaction_hash: Felt) -> Result { + let common = model_field!(self => common); + Ok(L1HandlerTransactionReceipt { + transaction_hash, + actual_fee: FeePayment { + unit: common.price_unit().into(), + amount: model_field!(common => actual_fee).into(), + }, + messages_sent: common.messages_sent.into_iter().map(TryInto::try_into).collect::>()?, + events: vec![], + execution_resources: model_field!(common => execution_resources).try_into()?, + execution_result: execution_result(common.revert_reason), + message_hash: model_field!(self => msg_hash).into(), + }) + } +} + +impl model::receipt::Declare { + #[model_describe(model::receipt::Declare)] + pub fn parse_model(self, transaction_hash: Felt) -> Result { + let common = model_field!(self => common); + Ok(DeclareTransactionReceipt { + transaction_hash, + actual_fee: FeePayment { + unit: common.price_unit().into(), + amount: model_field!(common => actual_fee).into(), + }, + messages_sent: common.messages_sent.into_iter().map(TryInto::try_into).collect::>()?, + events: vec![], + execution_resources: model_field!(common => execution_resources).try_into()?, + execution_result: execution_result(common.revert_reason), + }) + } +} + +impl model::receipt::Deploy { + #[model_describe(model::receipt::Deploy)] + pub fn parse_model(self, transaction_hash: Felt) -> Result { + let common = model_field!(self => common); + Ok(DeployTransactionReceipt { + transaction_hash, + actual_fee: FeePayment { + unit: common.price_unit().into(), + amount: model_field!(common => actual_fee).into(), + }, + messages_sent: common.messages_sent.into_iter().map(TryInto::try_into).collect::>()?, + events: vec![], + execution_resources: model_field!(common => execution_resources).try_into()?, + execution_result: execution_result(common.revert_reason), + contract_address: model_field!(self => contract_address).into(), + }) + } +} + +impl model::receipt::DeployAccount { + #[model_describe(model::receipt::DeployAccount)] + pub fn parse_model(self, transaction_hash: Felt) -> Result { + let common = model_field!(self => common); + Ok(DeployAccountTransactionReceipt { + transaction_hash, + actual_fee: FeePayment { + unit: common.price_unit().into(), + amount: model_field!(common => actual_fee).into(), + }, + messages_sent: common.messages_sent.into_iter().map(TryInto::try_into).collect::>()?, + events: vec![], + execution_resources: model_field!(common => execution_resources).try_into()?, + execution_result: execution_result(common.revert_reason), + contract_address: model_field!(self => contract_address).into(), + }) + } +} + +impl TryFrom for MsgToL1 { + type Error = FromModelError; + + #[model_describe(model::MessageToL1)] + fn try_from(value: model::MessageToL1) -> Result { + Ok(Self { + from_address: model_field!(value => from_address).into(), + to_address: model_field!(value => to_address).into(), + payload: value.payload.collect_into(), + }) + } +} + +impl TryFrom for ExecutionResources { + type Error = FromModelError; + + #[model_describe(model::receipt::ExecutionResources)] + fn try_from(value: model::receipt::ExecutionResources) -> Result { + let builtins = model_field!(value => builtins); + Ok(Self { + steps: value.steps.into(), + memory_holes: value.memory_holes.into(), + range_check_builtin_applications: builtins.range_check.into(), + pedersen_builtin_applications: builtins.pedersen.into(), + poseidon_builtin_applications: builtins.poseidon.into(), + ec_op_builtin_applications: builtins.ec_op.into(), + ecdsa_builtin_applications: builtins.ecdsa.into(), + bitwise_builtin_applications: builtins.bitwise.into(), + keccak_builtin_applications: builtins.keccak.into(), + // TODO: missing builtins (blockifier update needed) + // TODO: what's that again? why is the naming convention different and why don't we have the field for it + // segment_arena_builtin: builtins., + // segment_arena_builtin: builtins.segment_arena, + segment_arena_builtin: 0, + data_availability: L1Gas { + l1_gas: model_field!(value => l1_gas).0.try_into_field("ExecutionResources::l1_gas")?, + l1_data_gas: model_field!(value => l1_data_gas).0.try_into_field("ExecutionResources::l1_data_gas")?, + }, + // TODO: wrong, update blockifier + total_gas_consumed: L1Gas::default(), + // l1_gas: .. + // l1_data_gas: .. + // total_l1_gas: .. + }) + } +} + +impl From for model::TransactionInBlock { + fn from(value: TransactionWithHash) -> Self { + Self { transaction_hash: Some(value.hash.into()), txn: Some(value.transaction.into()) } + } +} + +impl From for model::transaction_in_block::Txn { + fn from(value: Transaction) -> Self { + match value { + Transaction::Invoke(tx) => match tx { + InvokeTransaction::V0(tx) => Self::InvokeV0(tx.into()), + InvokeTransaction::V1(tx) => Self::InvokeV1(tx.into()), + InvokeTransaction::V3(tx) => Self::InvokeV3(tx.into()), + }, + Transaction::L1Handler(tx) => Self::L1Handler(tx.into()), + Transaction::Declare(tx) => match tx { + DeclareTransaction::V0(tx) => Self::DeclareV0(tx.into()), + DeclareTransaction::V1(tx) => Self::DeclareV1(tx.into()), + DeclareTransaction::V2(tx) => Self::DeclareV2(tx.into()), + DeclareTransaction::V3(tx) => Self::DeclareV3(tx.into()), + }, + Transaction::Deploy(tx) => Self::Deploy(tx.into()), + Transaction::DeployAccount(tx) => match tx { + DeployAccountTransaction::V1(tx) => Self::DeployAccountV1(tx.into()), + DeployAccountTransaction::V3(tx) => Self::DeployAccountV3(tx.into()), + }, + } + } +} + +impl From for model::transaction_in_block::InvokeV0 { + fn from(value: InvokeTransactionV0) -> Self { + Self { + max_fee: Some(value.max_fee.into()), + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + address: Some(value.contract_address.into()), + entry_point_selector: Some(value.entry_point_selector.into()), + calldata: value.calldata.collect_into(), + } + } +} + +impl From for model::transaction_in_block::InvokeV1 { + fn from(value: InvokeTransactionV1) -> Self { + Self { + sender: Some(value.sender_address.into()), + max_fee: Some(value.max_fee.into()), + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + calldata: value.calldata.collect_into(), + nonce: Some(value.nonce.into()), + } + } +} + +impl From for model::InvokeV3 { + fn from(value: InvokeTransactionV3) -> Self { + Self { + sender: Some(value.sender_address.into()), + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + calldata: value.calldata.collect_into(), + resource_bounds: Some(value.resource_bounds.into()), + tip: value.tip, + paymaster_data: value.paymaster_data.collect_into(), + account_deployment_data: value.account_deployment_data.collect_into(), + nonce_data_availability_mode: model::VolitionDomain::from(value.nonce_data_availability_mode).into(), + fee_data_availability_mode: model::VolitionDomain::from(value.fee_data_availability_mode).into(), + nonce: Some(value.nonce.into()), + } + } +} + +impl From for model::L1HandlerV0 { + fn from(value: L1HandlerTransaction) -> Self { + Self { + nonce: Some(Felt::from(value.nonce).into()), + address: Some(value.contract_address.into()), + entry_point_selector: Some(value.entry_point_selector.into()), + calldata: value.calldata.collect_into(), + } + } +} + +impl From for model::transaction_in_block::DeclareV0WithoutClass { + fn from(value: DeclareTransactionV0) -> Self { + Self { + sender: Some(value.sender_address.into()), + max_fee: Some(value.max_fee.into()), + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + class_hash: Some(value.class_hash.into()), + } + } +} + +impl From for model::transaction_in_block::DeclareV1WithoutClass { + fn from(value: DeclareTransactionV1) -> Self { + Self { + sender: Some(value.sender_address.into()), + max_fee: Some(value.max_fee.into()), + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + class_hash: Some(value.class_hash.into()), + nonce: Some(value.nonce.into()), + } + } +} + +impl From for model::transaction_in_block::DeclareV2WithoutClass { + fn from(value: DeclareTransactionV2) -> Self { + Self { + sender: Some(value.sender_address.into()), + max_fee: Some(value.max_fee.into()), + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + class_hash: Some(value.class_hash.into()), + nonce: Some(value.nonce.into()), + compiled_class_hash: Some(value.compiled_class_hash.into()), + } + } +} + +impl From for model::transaction_in_block::DeclareV3WithoutClass { + fn from(value: DeclareTransactionV3) -> Self { + Self { + class_hash: Some(value.class_hash.into()), + common: Some(model::DeclareV3Common { + sender: Some(value.sender_address.into()), + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + nonce: Some(value.nonce.into()), + compiled_class_hash: Some(value.compiled_class_hash.into()), + resource_bounds: Some(value.resource_bounds.into()), + tip: value.tip, + paymaster_data: value.paymaster_data.collect_into(), + account_deployment_data: value.account_deployment_data.collect_into(), + nonce_data_availability_mode: model::VolitionDomain::from(value.nonce_data_availability_mode).into(), + fee_data_availability_mode: model::VolitionDomain::from(value.fee_data_availability_mode).into(), + }), + } + } +} + +impl From for model::transaction_in_block::Deploy { + fn from(value: DeployTransaction) -> Self { + Self { + class_hash: Some(value.class_hash.into()), + address_salt: Some(value.contract_address_salt.into()), + calldata: value.constructor_calldata.collect_into(), + // TODO(dto-faillible-conversion) + version: value.version.try_into().expect("DeployTransaction version is not an u32"), + } + } +} + +impl From for model::transaction_in_block::DeployAccountV1 { + fn from(value: DeployAccountTransactionV1) -> Self { + Self { + max_fee: Some(value.max_fee.into()), + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + class_hash: Some(value.class_hash.into()), + nonce: Some(value.nonce.into()), + address_salt: Some(value.contract_address_salt.into()), + calldata: value.constructor_calldata.collect_into(), + } + } +} + +impl From for model::DeployAccountV3 { + fn from(value: DeployAccountTransactionV3) -> Self { + Self { + signature: Some(model::AccountSignature { parts: value.signature.collect_into() }), + class_hash: Some(value.class_hash.into()), + nonce: Some(value.nonce.into()), + address_salt: Some(value.contract_address_salt.into()), + calldata: value.constructor_calldata.collect_into(), + resource_bounds: Some(value.resource_bounds.into()), + tip: value.tip, + paymaster_data: value.paymaster_data.collect_into(), + nonce_data_availability_mode: model::VolitionDomain::from(value.nonce_data_availability_mode).into(), + fee_data_availability_mode: model::VolitionDomain::from(value.fee_data_availability_mode).into(), + } + } +} + +impl From for model::ResourceBounds { + fn from(value: ResourceBoundsMapping) -> Self { + Self { + l1_gas: Some(value.l1_gas.into()), + l2_gas: Some(value.l2_gas.into()), + l1_data_gas: None, // TODO: Update blockifier + } + } +} + +impl From for model::ResourceLimits { + fn from(value: ResourceBounds) -> Self { + Self { + max_amount: Some(Felt::from(value.max_amount).into()), + max_price_per_unit: Some(Felt::from(value.max_price_per_unit).into()), + } + } +} + +impl From for model::VolitionDomain { + fn from(value: DataAvailabilityMode) -> Self { + match value { + DataAvailabilityMode::L1 => model::VolitionDomain::L1, + DataAvailabilityMode::L2 => model::VolitionDomain::L2, + } + } +} + +impl From for model::Receipt { + fn from(value: TransactionReceipt) -> Self { + use model::receipt::Type; + Self { + r#type: Some(match value { + TransactionReceipt::Invoke(receipt) => Type::Invoke(receipt.into()), + TransactionReceipt::L1Handler(receipt) => Type::L1Handler(receipt.into()), + TransactionReceipt::Declare(receipt) => Type::Declare(receipt.into()), + TransactionReceipt::Deploy(receipt) => Type::DeprecatedDeploy(receipt.into()), + TransactionReceipt::DeployAccount(receipt) => Type::DeployAccount(receipt.into()), + }), + } + } +} + +impl From for model::receipt::Invoke { + fn from(value: InvokeTransactionReceipt) -> Self { + Self { + common: Some(model::receipt::Common { + actual_fee: Some(value.actual_fee.amount.into()), + price_unit: model::PriceUnit::from(value.actual_fee.unit).into(), + messages_sent: value.messages_sent.collect_into(), + execution_resources: Some(value.execution_resources.into()), + revert_reason: value.execution_result.revert_reason().map(String::from), + }), + } + } +} + +impl From for model::receipt::L1Handler { + fn from(value: L1HandlerTransactionReceipt) -> Self { + Self { + common: Some(model::receipt::Common { + actual_fee: Some(value.actual_fee.amount.into()), + price_unit: model::PriceUnit::from(value.actual_fee.unit).into(), + messages_sent: value.messages_sent.collect_into(), + execution_resources: Some(value.execution_resources.into()), + revert_reason: value.execution_result.revert_reason().map(String::from), + }), + msg_hash: Some(value.message_hash.into()), + } + } +} + +impl From for model::receipt::Declare { + fn from(value: DeclareTransactionReceipt) -> Self { + Self { + common: Some(model::receipt::Common { + actual_fee: Some(value.actual_fee.amount.into()), + price_unit: model::PriceUnit::from(value.actual_fee.unit).into(), + messages_sent: value.messages_sent.collect_into(), + execution_resources: Some(value.execution_resources.into()), + revert_reason: value.execution_result.revert_reason().map(String::from), + }), + } + } +} + +impl From for model::receipt::Deploy { + fn from(value: DeployTransactionReceipt) -> Self { + Self { + common: Some(model::receipt::Common { + actual_fee: Some(value.actual_fee.amount.into()), + price_unit: model::PriceUnit::from(value.actual_fee.unit).into(), + messages_sent: value.messages_sent.collect_into(), + execution_resources: Some(value.execution_resources.into()), + revert_reason: value.execution_result.revert_reason().map(String::from), + }), + contract_address: Some(value.contract_address.into()), + } + } +} + +impl From for model::receipt::DeployAccount { + fn from(value: DeployAccountTransactionReceipt) -> Self { + Self { + common: Some(model::receipt::Common { + actual_fee: Some(value.actual_fee.amount.into()), + price_unit: model::PriceUnit::from(value.actual_fee.unit).into(), + messages_sent: value.messages_sent.collect_into(), + execution_resources: Some(value.execution_resources.into()), + revert_reason: value.execution_result.revert_reason().map(String::from), + }), + contract_address: Some(value.contract_address.into()), + } + } +} + +impl From for model::MessageToL1 { + fn from(value: MsgToL1) -> Self { + Self { + from_address: Some(value.from_address.into()), + payload: value.payload.collect_into(), + to_address: Some(value.to_address.into()), + } + } +} + +impl From for model::receipt::ExecutionResources { + fn from(value: ExecutionResources) -> Self { + Self { + // TODO(dto-faillible-conversion) + builtins: Some(BuiltinCounter { + bitwise: value.bitwise_builtin_applications.try_into().expect("bitwise_builtin > u32::MAX"), + ecdsa: value.ecdsa_builtin_applications.try_into().expect("ecdsa_builtin > u32::MAX"), + ec_op: value.ec_op_builtin_applications.try_into().expect("ec_op_builtin > u32::MAX"), + pedersen: value.pedersen_builtin_applications.try_into().expect("pedersen_builtin > u32::MAX"), + range_check: value.range_check_builtin_applications.try_into().expect("range_check_builtin > u32::MAX"), + poseidon: value.poseidon_builtin_applications.try_into().expect("poseidon_builtin > u32::MAX"), + keccak: value.keccak_builtin_applications.try_into().expect("keccak_builtin > u32::MAX"), + // TODO: missing builtins + ..Default::default() + }), + // TODO(dto-faillible-conversion) + steps: value.steps.try_into().expect("steps > u32::MAX"), + // TODO(dto-faillible-conversion) + memory_holes: value.memory_holes.try_into().expect("memory_holes > u32::MAX"), + l1_gas: Some(Felt::from(value.total_gas_consumed.l1_gas).into()), + l1_data_gas: Some(Felt::from(value.total_gas_consumed.l1_data_gas).into()), + total_l1_gas: Some(Felt::from(value.total_gas_consumed.l1_gas).into()), + l2_gas: None, // TODO: update blockifier + } + } +} + +impl From for model::PriceUnit { + fn from(value: PriceUnit) -> Self { + match value { + PriceUnit::Wei => Self::Wei, + PriceUnit::Fri => Self::Fri, + } + } +} +impl From for PriceUnit { + fn from(value: model::PriceUnit) -> Self { + match value { + model::PriceUnit::Wei => Self::Wei, + model::PriceUnit::Fri => Self::Fri, + } + } +} diff --git a/crates/madara/primitives/proto/starknet-p2p-specs b/crates/madara/primitives/proto/starknet-p2p-specs new file mode 160000 index 000000000..1e8cc53b3 --- /dev/null +++ b/crates/madara/primitives/proto/starknet-p2p-specs @@ -0,0 +1 @@ +Subproject commit 1e8cc53b38636f8a354db6658e9209ea6e41a854 diff --git a/crates/madara/primitives/receipt/Cargo.toml b/crates/madara/primitives/receipt/Cargo.toml index f807c1517..7a3f235b9 100644 --- a/crates/madara/primitives/receipt/Cargo.toml +++ b/crates/madara/primitives/receipt/Cargo.toml @@ -16,20 +16,25 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] +# Madara +mp-chain-config = { workspace = true } +mp-convert = { workspace = true } + # Starknet blockifier = { workspace = true } cairo-vm = { workspace = true } -rstest.workspace = true starknet-core = { workspace = true } starknet-types-core = { workspace = true } starknet-types-rpc = { workspace = true } starknet_api = { workspace = true } -thiserror = { workspace = true } -tracing.workspace = true # Other primitive-types = { workspace = true } +rstest.workspace = true serde = { workspace = true, features = ["derive"] } +serde_with.workspace = true +thiserror = { workspace = true } +tracing.workspace = true [dev-dependencies] bincode = { workspace = true } diff --git a/crates/madara/primitives/receipt/src/from_blockifier.rs b/crates/madara/primitives/receipt/src/from_blockifier.rs index fe76a477e..d1fe13037 100644 --- a/crates/madara/primitives/receipt/src/from_blockifier.rs +++ b/crates/madara/primitives/receipt/src/from_blockifier.rs @@ -6,8 +6,7 @@ use blockifier::transaction::{ transactions::L1HandlerTransaction, }; use cairo_vm::types::builtin_name::BuiltinName; -use primitive_types::H256; -use starknet_core::types::MsgToL2; +use starknet_core::types::{Hash256, MsgToL2}; use starknet_types_core::felt::Felt; use thiserror::Error; @@ -43,7 +42,7 @@ pub enum L1HandlerMessageError { InvalidNonce, } -fn get_l1_handler_message_hash(tx: &L1HandlerTransaction) -> Result { +fn get_l1_handler_message_hash(tx: &L1HandlerTransaction) -> Result { let (from_address, payload) = tx.tx.calldata.0.split_first().ok_or(L1HandlerMessageError::EmptyCalldata)?; let from_address = (*from_address).try_into().map_err(|_| L1HandlerMessageError::FromAddressOutOfRange)?; @@ -57,7 +56,7 @@ fn get_l1_handler_message_hash(tx: &L1HandlerTransaction) -> Result impl Iterator { @@ -75,17 +74,6 @@ pub fn from_blockifier_execution_info(res: &TransactionExecutionInfo, tx: &Trans let actual_fee = FeePayment { amount: res.transaction_receipt.fee.into(), unit: price_unit }; let transaction_hash = blockifier_tx_hash(tx); - let message_hash = match tx { - Transaction::L1HandlerTransaction(tx) => match get_l1_handler_message_hash(tx) { - Ok(hash) => Some(hash), - Err(err) => { - tracing::error!("Error getting l1 handler message hash: {:?}", err); - None - } - }, - _ => None, - }; - let messages_sent = recursive_call_info_iter(res) .flat_map(|call| { call.execution.l2_to_l1_messages.iter().map(|message| MsgToL1 { @@ -172,14 +160,16 @@ pub fn from_blockifier_execution_info(res: &TransactionExecutionInfo, tx: &Trans execution_result, }) } - Transaction::L1HandlerTransaction(_tx) => TransactionReceipt::L1Handler(L1HandlerTransactionReceipt { + Transaction::L1HandlerTransaction(tx) => TransactionReceipt::L1Handler(L1HandlerTransactionReceipt { transaction_hash, actual_fee, messages_sent, events, execution_resources, execution_result, - message_hash: message_hash.unwrap(), // it's a safe unwrap because it would've panicked earlier if it was Err + // This should not panic unless blockifier gives a garbage receipt. + // TODO: we should have a soft error here just in case. + message_hash: get_l1_handler_message_hash(tx).expect("Error getting l1 handler message hash"), }), } } diff --git a/crates/madara/primitives/receipt/src/lib.rs b/crates/madara/primitives/receipt/src/lib.rs index d785621a9..809602904 100644 --- a/crates/madara/primitives/receipt/src/lib.rs +++ b/crates/madara/primitives/receipt/src/lib.rs @@ -1,8 +1,4 @@ -mod from_blockifier; -mod to_starknet_types; -pub use from_blockifier::from_blockifier_execution_info; - -use primitive_types::H256; +use mp_chain_config::StarknetVersion; use serde::{Deserialize, Serialize}; use starknet_core::utils::starknet_keccak; use starknet_types_core::{ @@ -10,6 +6,12 @@ use starknet_types_core::{ hash::{Pedersen, Poseidon, StarkHash}, }; +mod from_blockifier; +mod to_starknet_types; + +pub use from_blockifier::from_blockifier_execution_info; +pub use starknet_core::types::Hash256; + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum TransactionReceipt { Invoke(InvokeTransactionReceipt), @@ -187,10 +189,11 @@ pub struct InvokeTransactionReceipt { pub execution_result: ExecutionResult, } -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde_with::serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct L1HandlerTransactionReceipt { - // normally this would be a Hash256, but the serde implementation doesn't work with bincode. - pub message_hash: H256, + #[serde_as(as = "mp_convert::hash256_serde::Hash256Serde")] + pub message_hash: Hash256, pub transaction_hash: Felt, pub actual_fee: FeePayment, pub messages_sent: Vec, @@ -199,6 +202,22 @@ pub struct L1HandlerTransactionReceipt { pub execution_result: ExecutionResult, } +// TODO: we shouldnt need to have default impls for these types (it's used in tests) +// Implement default by hand as [`Hash256`] does not impl Default. +impl Default for L1HandlerTransactionReceipt { + fn default() -> Self { + Self { + message_hash: Hash256::from_bytes(Default::default()), + transaction_hash: Default::default(), + actual_fee: Default::default(), + messages_sent: Default::default(), + events: Default::default(), + execution_resources: Default::default(), + execution_result: Default::default(), + } + } +} + #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct DeclareTransactionReceipt { pub transaction_hash: Felt, @@ -252,6 +271,15 @@ pub struct MsgToL1 { pub payload: Vec, } +/// Event with transaction hash. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct EventWithTransactionHash { + pub transaction_hash: Felt, + #[serde(flatten)] + pub event: Event, +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct Event { @@ -262,6 +290,14 @@ pub struct Event { impl Event { /// Calculate the hash of the event. + pub fn compute_hash(&self, transaction_hash: Felt, starknet_version: StarknetVersion) -> Felt { + if starknet_version < StarknetVersion::V0_13_2 { + self.compute_hash_pedersen() + } else { + self.compute_hash_poseidon(&transaction_hash) + } + } + pub fn compute_hash_pedersen(&self) -> Felt { let keys_hash = Pedersen::hash_array(&self.keys); let data_hash = Pedersen::hash_array(&self.data); @@ -315,6 +351,13 @@ pub enum ExecutionResult { } impl ExecutionResult { + pub fn revert_reason(&self) -> Option<&str> { + match self { + Self::Succeeded => None, + Self::Reverted { reason } => Some(reason), + } + } + fn compute_hash(&self) -> Felt { match self { ExecutionResult::Succeeded => Felt::ZERO, @@ -325,8 +368,6 @@ impl ExecutionResult { #[cfg(test)] mod tests { - use std::str::FromStr; - use super::*; #[test] @@ -505,7 +546,7 @@ mod tests { pub(crate) fn dummy_l1_handler_receipt() -> L1HandlerTransactionReceipt { L1HandlerTransactionReceipt { - message_hash: H256::from_str("0x0000000000000000000000000000000000000000000000000000000000000001").unwrap(), + message_hash: Hash256::from_hex("0x1").unwrap(), transaction_hash: Felt::from(2), actual_fee: FeePayment { amount: Felt::from(3), unit: PriceUnit::Wei }, messages_sent: dummy_messages(), diff --git a/crates/madara/primitives/receipt/src/to_starknet_types.rs b/crates/madara/primitives/receipt/src/to_starknet_types.rs index 503c66aac..47d460c18 100644 --- a/crates/madara/primitives/receipt/src/to_starknet_types.rs +++ b/crates/madara/primitives/receipt/src/to_starknet_types.rs @@ -1,11 +1,9 @@ -use primitive_types::H256; -use starknet_types_core::felt::Felt; - use crate::{ DeclareTransactionReceipt, DeployAccountTransactionReceipt, DeployTransactionReceipt, Event, ExecutionResources, ExecutionResult, FeePayment, InvokeTransactionReceipt, L1Gas, L1HandlerTransactionReceipt, MsgToL1, PriceUnit, TransactionReceipt, }; +use starknet_types_core::felt::Felt; impl TransactionReceipt { pub fn to_starknet_types( @@ -60,9 +58,7 @@ impl L1HandlerTransactionReceipt { finality_status: starknet_types_rpc::TxnFinalityStatus, ) -> starknet_types_rpc::L1HandlerTxnReceipt { starknet_types_rpc::L1HandlerTxnReceipt:: { - // We have to manually convert the H256 bytes to a hex hash as the - // impl of Display for H256 skips the middle bytes. - message_hash: hash_as_string(self.message_hash), + message_hash: format!("{}", self.message_hash), common_receipt_properties: starknet_types_rpc::CommonReceiptProperties { actual_fee: self.actual_fee.into(), events: self.events.into_iter().map(starknet_types_rpc::Event::from).collect(), @@ -76,23 +72,6 @@ impl L1HandlerTransactionReceipt { } } -/// Gets the **full** string hex representation of an [H256]. -/// -/// This is necessary as the default implementation of [ToString] for [H256] -/// will keep only the first and last 2 bytes, eliding the rest with '...'. -fn hash_as_string(message_hash: H256) -> String { - use std::fmt::Write; - - // 32 bytes x 2 (1 hex char = 4 bits) + 2 (for 0x) - let mut acc = String::with_capacity(68); - acc.push_str("0x"); - - message_hash.as_fixed_bytes().iter().fold(acc, |mut acc, b| { - write!(&mut acc, "{b:02x}").expect("Pre-allocated"); - acc - }) -} - impl DeclareTransactionReceipt { pub fn to_starknet_types( self, @@ -221,35 +200,3 @@ impl From for starknet_types_rpc::ExecutionStatus { } } } - -#[cfg(test)] -mod test { - use primitive_types::H256; - - use crate::{to_starknet_types::hash_as_string, L1HandlerTransactionReceipt}; - - #[test] - fn test_hash_as_string() { - let mut hash = String::with_capacity(68); - hash.push_str("0x"); - hash.push_str(&"f".repeat(64)); - assert_eq!(hash_as_string(H256::from_slice(&[u8::MAX; 32])), hash); - } - - /// The default implementation of [ToString] for [H256] will keep only the - /// first and last 2 bytes, eliding the rest with '...'. This test makes - /// sure this is not the case and we are using [hash_as_string] instead. - #[test] - fn test_l1_tx_receipt_full_hash() { - let l1_transaction_receipt = - L1HandlerTransactionReceipt { message_hash: H256::from_slice(&[u8::MAX; 32]), ..Default::default() }; - let message_hash = - l1_transaction_receipt.to_starknet_types(starknet_types_rpc::TxnFinalityStatus::L1).message_hash; - - let mut hash = String::with_capacity(68); - hash.push_str("0x"); - hash.push_str(&"f".repeat(64)); - assert_eq!(message_hash, hash); - assert!(!message_hash.contains(".")); - } -} diff --git a/crates/madara/primitives/state_update/src/lib.rs b/crates/madara/primitives/state_update/src/lib.rs index a6d3fd9a1..53b6229d7 100644 --- a/crates/madara/primitives/state_update/src/lib.rs +++ b/crates/madara/primitives/state_update/src/lib.rs @@ -1,5 +1,7 @@ mod into_starknet_types; +use std::collections::HashMap; + use starknet_types_core::{ felt::Felt, hash::{Poseidon, StarkHash}, @@ -19,13 +21,25 @@ pub struct PendingStateUpdate { pub state_diff: StateDiff, } +#[derive(Copy, Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +pub enum DeclaredClassCompiledClass { + Sierra(/* compiled_class_hash */ Felt), + Legacy, +} + #[derive(Clone, Debug, Default, PartialEq, Eq, serde::Serialize, serde::Deserialize)] pub struct StateDiff { + /// Changed storage values. Mapping (contract_address, storage_key) => value. pub storage_diffs: Vec, + /// New declared classes. List of class hashes. pub deprecated_declared_classes: Vec, + /// New declared classes. Mapping class_hash => compiled_class_hash. pub declared_classes: Vec, + /// New contract. Mapping contract_address => class_hash. pub deployed_contracts: Vec, + /// Contract has changed class. Mapping contract_address => class_hash. pub replaced_classes: Vec, + /// New contract nonce. Mapping contract_address => nonce. pub nonces: Vec, } @@ -140,6 +154,18 @@ impl StateDiff { Poseidon::hash_array(&elements) } + + pub fn all_declared_classes(&self) -> HashMap { + self.declared_classes + .iter() + .map(|class| (class.class_hash, DeclaredClassCompiledClass::Sierra(class.compiled_class_hash))) + .chain( + self.deprecated_declared_classes + .iter() + .map(|class_hash| (*class_hash, DeclaredClassCompiledClass::Legacy)), + ) + .collect() + } } #[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)] diff --git a/crates/madara/primitives/transactions/src/compute_hash.rs b/crates/madara/primitives/transactions/src/compute_hash.rs index 8cddefc38..69f9f4d58 100644 --- a/crates/madara/primitives/transactions/src/compute_hash.rs +++ b/crates/madara/primitives/transactions/src/compute_hash.rs @@ -26,31 +26,31 @@ const PEDERSEN_EMPTY: Felt = Felt::from_hex_unchecked("0x49ee3eba8c1600700ee1b87eb599f16716b0b1022947733551fde4050ca6804"); impl Transaction { - pub fn compute_hash(&self, chain_id: Felt, version: StarknetVersion, offset_version: bool) -> Felt { + pub fn compute_hash(&self, chain_id: Felt, version: StarknetVersion, is_query: bool) -> Felt { let legacy = version.is_legacy(); let is_pre_v0_7 = version.is_pre_v0_7(); if is_pre_v0_7 { - self.compute_hash_pre_v0_7(chain_id, offset_version) + self.compute_hash_pre_v0_7(chain_id, is_query) } else { - self.compute_hash_inner(chain_id, offset_version, legacy) + self.compute_hash_inner(chain_id, is_query, legacy) } } - fn compute_hash_inner(&self, chain_id: Felt, offset_version: bool, legacy: bool) -> Felt { + fn compute_hash_inner(&self, chain_id: Felt, is_query: bool, legacy: bool) -> Felt { match self { - crate::Transaction::Invoke(tx) => tx.compute_hash(chain_id, offset_version, legacy), - crate::Transaction::L1Handler(tx) => tx.compute_hash(chain_id, offset_version, legacy), - crate::Transaction::Declare(tx) => tx.compute_hash(chain_id, offset_version), + crate::Transaction::Invoke(tx) => tx.compute_hash(chain_id, is_query, legacy), + crate::Transaction::L1Handler(tx) => tx.compute_hash(chain_id, is_query, legacy), + crate::Transaction::Declare(tx) => tx.compute_hash(chain_id, is_query), crate::Transaction::Deploy(tx) => tx.compute_hash(chain_id, legacy), - crate::Transaction::DeployAccount(tx) => tx.compute_hash(chain_id, offset_version), + crate::Transaction::DeployAccount(tx) => tx.compute_hash(chain_id, is_query), } } - pub fn compute_hash_pre_v0_7(&self, chain_id: Felt, offset_version: bool) -> Felt { + pub fn compute_hash_pre_v0_7(&self, chain_id: Felt, is_query: bool) -> Felt { match self { crate::Transaction::L1Handler(tx) => tx.compute_hash_pre_v0_7(chain_id), - _ => self.compute_hash_inner(chain_id, offset_version, true), + _ => self.compute_hash_inner(chain_id, is_query, true), } } diff --git a/crates/madara/primitives/utils/src/lib.rs b/crates/madara/primitives/utils/src/lib.rs index 0e4886a9a..aafe72afe 100644 --- a/crates/madara/primitives/utils/src/lib.rs +++ b/crates/madara/primitives/utils/src/lib.rs @@ -3,6 +3,7 @@ pub mod crypto; pub mod hash; pub mod parsers; +pub mod rayon; pub mod serde; pub mod service; use std::time::{Duration, Instant}; @@ -11,22 +12,6 @@ pub use hash::trim_hash; use tokio::sync::oneshot; -/// Prefer this compared to [`tokio::spawn_blocking`], as spawn_blocking creates new OS threads and -/// we don't really need that -pub async fn spawn_rayon_task(func: F) -> R -where - F: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - let (tx, rx) = tokio::sync::oneshot::channel(); - - rayon::spawn(move || { - let _result = tx.send(func()); - }); - - rx.await.expect("tokio channel closed") -} - #[derive(Debug, Default)] pub struct StopHandle(Option>); diff --git a/crates/madara/primitives/utils/src/rayon.rs b/crates/madara/primitives/utils/src/rayon.rs new file mode 100644 index 000000000..2147429b0 --- /dev/null +++ b/crates/madara/primitives/utils/src/rayon.rs @@ -0,0 +1,65 @@ +use std::{panic::AssertUnwindSafe, sync::atomic::AtomicUsize, thread}; +use tokio::sync::Semaphore; + +/// Wraps the rayon pool in a tokio-friendly way. +/// This should be avoided in RPC/p2p/any other end-user endpoints, as this could be a DoS vector. To avoid that, +/// signature verification should probably be done before sending to the rayon pool +/// As a safety, a semaphore is added to bound the queue and support backpressure. +/// The tasks are added in FIFO order. +pub struct RayonPool { + semaphore: Semaphore, + max_tasks: usize, + permit_id: AtomicUsize, + n_acquired_permits: AtomicUsize, +} + +impl Default for RayonPool { + fn default() -> Self { + Self::new() + } +} + +impl RayonPool { + pub fn new() -> Self { + let n_cores = thread::available_parallelism().expect("Getting the number of cores").get(); + let max_tasks = n_cores * 128; + Self { semaphore: Semaphore::new(max_tasks), max_tasks, permit_id: 0.into(), n_acquired_permits: 0.into() } + } + + pub async fn spawn_rayon_task(&self, func: F) -> R + where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, + { + let max_tasks = self.max_tasks; + let permit_id = self.permit_id.fetch_add(1, std::sync::atomic::Ordering::SeqCst); + tracing::trace!("acquire permit {permit_id}"); + let permit = self.semaphore.acquire().await.expect("Poisoned semaphore"); + let n_acquired_permits = self.n_acquired_permits.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + 1; + tracing::trace!("acquired permit {permit_id} ({n_acquired_permits}/{max_tasks})"); + + let res = global_spawn_rayon_task(func).await; + + drop(permit); + + let n_acquired_permits = self.n_acquired_permits.fetch_sub(1, std::sync::atomic::Ordering::SeqCst); + tracing::trace!("released permit {permit_id} ({n_acquired_permits}/{max_tasks})"); + res + } +} + +pub async fn global_spawn_rayon_task(func: F) -> R +where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, +{ + let (tx, rx) = tokio::sync::oneshot::channel(); + + // Important: fifo mode. + rayon::spawn_fifo(move || { + // We bubble up the panics to the tokio pool. + let _result = tx.send(std::panic::catch_unwind(AssertUnwindSafe(func))); + }); + + rx.await.expect("Tokio channel closed").expect("Rayon task panicked") +} diff --git a/crates/madara/primitives/utils/src/service.rs b/crates/madara/primitives/utils/src/service.rs index ddba4eb93..6a386b5f4 100644 --- a/crates/madara/primitives/utils/src/service.rs +++ b/crates/madara/primitives/utils/src/service.rs @@ -377,6 +377,7 @@ pub enum MadaraServiceId { RpcAdmin, Gateway, Telemetry, + P2p, } impl ServiceId for MadaraServiceId { @@ -392,6 +393,7 @@ impl ServiceId for MadaraServiceId { MadaraServiceId::RpcAdmin => PowerOfTwo::P5, MadaraServiceId::Gateway => PowerOfTwo::P6, MadaraServiceId::Telemetry => PowerOfTwo::P7, + MadaraServiceId::P2p => PowerOfTwo::P8, } } } @@ -411,6 +413,7 @@ impl Display for MadaraServiceId { Self::RpcAdmin => "rpc admin", Self::Gateway => "gateway", Self::Telemetry => "telemetry", + Self::P2p => "p2p", } ) } @@ -443,6 +446,7 @@ impl From for MadaraServiceId { PowerOfTwo::P4 => Self::RpcUser, PowerOfTwo::P5 => Self::RpcAdmin, PowerOfTwo::P6 => Self::Gateway, + PowerOfTwo::P7 => Self::P2p, _ => Self::Telemetry, } } @@ -1182,7 +1186,7 @@ impl<'a> ServiceRunner<'a> { ctx.cancelled().await; tokio::time::sleep(SERVICE_GRACE_PERIOD).await; - tracing::warn!("⚠️ Forcefully shutting down service with id: {id}"); + tracing::warn!("⚠️ Forcefully shutting down service: {}", MadaraServiceId::from(*id)); } } @@ -1291,7 +1295,7 @@ impl ServiceMonitor { match result { Ok(result) => { let id = result?; - tracing::debug!("service {id} has shut down"); + tracing::debug!("Service {id} has shut down"); self.status_actual.deactivate(id); self.status_request.deactivate(id); } @@ -1316,7 +1320,7 @@ impl ServiceMonitor { .await .context("Starting service")?; - tracing::debug!("service {svc_id} has started"); + tracing::debug!("Service {svc_id} has started"); } else { // reset request self.status_request.deactivate(svc_id); diff --git a/crates/madara/proc-macros/src/lib.rs b/crates/madara/proc-macros/src/lib.rs index 74ab634c6..e7144f2d5 100644 --- a/crates/madara/proc-macros/src/lib.rs +++ b/crates/madara/proc-macros/src/lib.rs @@ -328,6 +328,35 @@ pub fn versioned_rpc(attr: TokenStream, input: TokenStream) -> TokenStream { .into() } +/// Decorate a protobuf model function with runtime information about the model +/// type it is operating on. +#[proc_macro_attribute] +pub fn model_describe(args: TokenStream, input: TokenStream) -> TokenStream { + // Parse the input function + let input_fn = syn::parse_macro_input!(input as syn::ItemFn); + + // Parse the attribute arguments + let model_type = syn::parse_macro_input!(args as syn::Path); + let Some(model_type) = model_type.segments.last() else { + return syn::Error::new(model_type.span(), "Missing model type").into_compile_error().into(); + }; + + // Get the original function name and body + let fn_body = &input_fn.block; + let fn_vis = &input_fn.vis; + let fn_sig = &input_fn.sig; + + // Generate the new function with the added const declaration + let output = quote! { + #fn_vis #fn_sig { + const __MODEL: &str = stringify!(#model_type); + #fn_body + } + }; + + output.into() +} + #[cfg(test)] mod tests { use super::*; diff --git a/scripts/pathfinder-checkpoint-override.sh b/scripts/pathfinder-checkpoint-override.sh new file mode 100755 index 000000000..1cbe5fe58 --- /dev/null +++ b/scripts/pathfinder-checkpoint-override.sh @@ -0,0 +1,24 @@ +#!/bin/sh +set -e + +# Makes an ethereum checkpoint override json for use with pathfinder's +# `--p2p.experimental.l1-checkpoint-override-json-path ` +# cli argument. +# Returns the path to the resulting json file, so that you can just supply +# `--p2p.experimental.l1-checkpoint-override-json-path $(./scripts/pathfinder-checkpoint-override.sh)` to +# pathfinder. + +curl 'http://127.0.0.1:9944/' -s \ + --header 'Content-Type: application/json' \ + --data '{ + "jsonrpc": "2.0", + "method": "starknet_getBlockWithTxHashes", + "params": { + "block_id": "latest" + }, + "id": 1 + }' \ + | jq '{ block_hash: .result.block_hash, block_number: .result.block_number, state_root: .result.new_root }' \ + > pathfinder-checkpoint-override.json + +realpath pathfinder-checkpoint-override.json