diff --git a/.github/workflows/build-dev.yaml b/.github/workflows/build-dev.yaml index 182bd66f46e..512b0d68d04 100644 --- a/.github/workflows/build-dev.yaml +++ b/.github/workflows/build-dev.yaml @@ -18,8 +18,8 @@ concurrency: env: BUILD_VERSION: latest # Computed DOCKER_HUB_USER: defi - # Note: We do not use debug build on CI for dev as well, since debug builds - # take 5x the amount of space and run the GitHub CI workers out of space. + # Note: We do not use debug build on CI for dev as well, since debug builds + # take 5x the amount of space and run the GitHub CI workers out of space. # make.sh still defaults to debug for local builds MAKE_DEBUG: 0 GIT_VERSION: 1 @@ -115,7 +115,7 @@ jobs: with: name: defichain-${{ env.BUILD_VERSION }}-${{ env.TARGET }} path: ./build/ - + - name: Unpack binaries run: tar -xvzf ./build/defichain-${{ env.BUILD_VERSION }}-${{ env.TARGET }}.${{ env.PKG_TYPE }} -C ./build/ diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index c30a6d6a427..96bf3bb1ee4 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -10,6 +10,7 @@ on: pull_request: branches: - master + - feature/** concurrency: group: ${{ github.workflow }}-${{ github.ref || github.run_id }} diff --git a/.github/workflows/tests-ocean.yml b/.github/workflows/tests-ocean.yml new file mode 100644 index 00000000000..b737977b678 --- /dev/null +++ b/.github/workflows/tests-ocean.yml @@ -0,0 +1,94 @@ +name: Tests - Jellyfish Whale Apis + +on: + workflow_dispatch: + pull_request: + branches: + - master + - feature/ocean-archive # TODO(): remove before merge to master + +concurrency: + group: ${{ github.workflow }}-${{ github.ref || github.run_id }} + cancel-in-progress: ${{ github.ref != 'refs/heads/master' }} + +env: + TARGET: x86_64-pc-linux-gnu + MAKE_DEBUG: 0 + GIT_VERSION: 1 + +jobs: + build: + runs-on: ubuntu-latest + container: defi/ain-builder:latest + env: + CARGO_INCREMENTAL: 0 + + steps: + - uses: actions/checkout@v4 + - run: git config --global --add safe.directory '*' + + - name: Populate environment + run: ./make.sh ci-export-vars + + - name: Setup dependencies + run: ./make.sh ci-setup-deps + + - name: Setup user dependencies + run: ./make.sh ci-setup-user-deps + + - name: Restore cpp build cache + id: cpp-cache-restore + uses: actions/cache/restore@v3 + with: + path: | + ./build/depends + ./build/src + ~/.ccache + key: cpp-${{ env.TARGET }}-${{ env.BUILD_TYPE }} + + - uses: Swatinem/rust-cache@v2 + with: + workspaces: lib -> ../build/lib/target + save-if: ${{ github.ref == 'refs/heads/master' }} + shared-key: ${{ env.TARGET }} + + - name: Build binaries + run: ./make.sh build + + - name: Upload binaries + uses: actions/upload-artifact@v3 + with: + name: defi-bins + path: build/src/defid + + test: + name: Jellyfish Whale Api tests + runs-on: ubuntu-latest + needs: [build] + strategy: + fail-fast: false + + steps: + - uses: actions/checkout@v4 + with: + repository: birthdayresearch/jellyfishsdk + # ref: 'main' + ref: 'canonbrother/whale-defid' # TODO(): remove before merge to master + + - name: Download binaries + uses: actions/download-artifact@v3 + with: + name: defi-bins + + - name: Setup permissions + run: | + chmod uog+x "$(pwd)/defid" + + - uses: actions/setup-node@v3 + with: + node-version: '18' + + - name: Run tests + run: | + npm ci + DEFID="$(pwd)/defid" npm run defid apps/whale-api/src/module.api/__defid__ diff --git a/doc/build-quick.md b/doc/build-quick.md index c8aac6cc287..bfc6b026aa6 100644 --- a/doc/build-quick.md +++ b/doc/build-quick.md @@ -1,6 +1,6 @@ # Quick build notes -DeFiChain is built with the same process as Bitcoin, but provides certain convenience steps to +DeFiChain is built with the same process as Bitcoin, but provides certain convenience steps to build it easily with the `./make.sh` file in the root directory. ``` @@ -89,7 +89,6 @@ an environment with correct arch and pre-requisites configured. (most pre-requisites can be installed with pkg-* commands). ``` - ## `TARGET` values ### Tier 1 @@ -145,7 +144,7 @@ but receives little to no testing. PYTHON_VENV_DIR=${PYTHON_VENV_DIR:-"${BUILD_DIR}/pyenv"} CLANG_DEFAULT_VERSION=${CLANG_DEFAULT_VERSION:-"15"} - RUST_DEFAULT_VERSION=${RUST_DEFAULT_VERSION:-"1.72"} + RUST_DEFAULT_VERSION=${RUST_DEFAULT_VERSION:-"1.76"} MAKE_DEBUG=${MAKE_DEBUG:-"1"} MAKE_USE_CLANG=${MAKE_USE_CLANG:-"$(get_default_use_clang)"} @@ -177,5 +176,5 @@ but receives little to no testing. Please read the `./make.sh` file for more details on the build helpers. [UNIX build process](./build-unix.md) should also have more info though using -`./make.sh` is recommended as it builds out-of-tree by default and supports +`./make.sh` is recommended as it builds out-of-tree by default and supports multiple targets. diff --git a/lib/Cargo.lock b/lib/Cargo.lock index 0b0b8e4d75a..43163dd0adb 100644 --- a/lib/Cargo.lock +++ b/lib/Cargo.lock @@ -23,19 +23,13 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ - "gimli 0.29.0", + "gimli 0.31.0", ] -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - [[package]] name = "adler2" version = "2.0.0" @@ -117,6 +111,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bincode", + "log", "num_cpus", "rocksdb", "serde", @@ -130,6 +125,7 @@ dependencies = [ "bitcoin", "bitflags 2.6.0", "hex", + "lazy_static", ] [[package]] @@ -174,7 +170,6 @@ dependencies = [ "sp-io", "spin", "substrate-bn", - "tempdir", "thiserror", "tokio", "vsdb_core", @@ -187,9 +182,12 @@ version = "0.1.0" dependencies = [ "ain-cpp-imports", "ain-evm", + "ain-ocean", "anyhow", "async-trait", + "axum 0.7.5", "cxx", + "defichain-rpc", "env_logger", "ethereum", "ethereum-types", @@ -197,7 +195,7 @@ dependencies = [ "heck 0.4.1", "hex", "hex-literal", - "hyper", + "hyper 0.14.30", "jsonrpsee 0.16.3", "jsonrpsee-server", "lazy_static", @@ -235,6 +233,43 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "ain-ocean" +version = "0.1.0" +dependencies = [ + "ain-cpp-imports", + "ain-db", + "ain-dftx", + "ain-macros", + "axum 0.7.5", + "bincode", + "bitcoin", + "cached", + "defichain-rpc", + "futures", + "hex", + "hyper 0.14.30", + "indexmap 2.5.0", + "jsonrpc-async", + "jsonrpsee 0.16.3", + "lazy_static", + "log", + "num_cpus", + "parking_lot", + "petgraph", + "rocksdb", + "rust_decimal", + "rust_decimal_macros", + "serde", + "serde_json", + "serde_urlencoded", + "serde_with", + "sha2 0.10.8", + "snafu", + "tempfile", + "tokio", +] + [[package]] name = "ain-rs-exports" version = "0.1.0" @@ -244,9 +279,11 @@ dependencies = [ "ain-evm", "ain-grpc", "ain-macros", + "ain-ocean", "anyhow", "cxx", "cxx-gen", + "defichain-rpc", "ethabi", "ethereum", "ethereum-types", @@ -304,9 +341,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" [[package]] name = "array-bytes" @@ -403,13 +440,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.3.4", "bitflags 1.3.2", "bytes", "futures-util", - "http", - "http-body", - "hyper", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.30", "itoa", "matchit", "memchr", @@ -418,12 +455,47 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper", + "sync_wrapper 0.1.2", "tower", "tower-layer", "tower-service", ] +[[package]] +name = "axum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +dependencies = [ + "async-trait", + "axum-core 0.4.3", + "axum-macros", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.4.1", + "hyper-util", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper 1.0.1", + "tokio", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "axum-core" version = "0.3.4" @@ -433,27 +505,60 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http", - "http-body", + "http 0.2.12", + "http-body 0.4.6", "mime", "rustversion", "tower-layer", "tower-service", ] +[[package]] +name = "axum-core" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-macros" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ - "addr2line 0.22.0", - "cc", + "addr2line 0.24.1", "cfg-if", "libc", - "miniz_oxide 0.7.4", + "miniz_oxide", "object 0.36.4", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -480,6 +585,15 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64-compat" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a8d4d2746f89841e49230dd26917df1876050f95abafafbe34f47cb534b88d7" +dependencies = [ + "byteorder", +] + [[package]] name = "base64ct" version = "1.6.0" @@ -559,7 +673,7 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitcoin" version = "0.31.0" -source = "git+https://github.com/defich/rust-bitcoin.git#c68ce40fd7f618105ab7d6e8be49ce083f97d4e8" +source = "git+https://github.com/defich/rust-bitcoin.git#fe08844ed8ca40ee9208cec3c63a042bb5d2e299" dependencies = [ "bech32", "bitcoin-internals", @@ -568,6 +682,7 @@ dependencies = [ "hex-conservative", "hex_lit", "secp256k1 0.28.2", + "serde", ] [[package]] @@ -575,11 +690,14 @@ name = "bitcoin-internals" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" +dependencies = [ + "serde", +] [[package]] name = "bitcoin-io" version = "0.1.0" -source = "git+https://github.com/defich/rust-bitcoin.git#c68ce40fd7f618105ab7d6e8be49ce083f97d4e8" +source = "git+https://github.com/defich/rust-bitcoin.git#fe08844ed8ca40ee9208cec3c63a042bb5d2e299" [[package]] name = "bitcoin_hashes" @@ -589,6 +707,7 @@ checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" dependencies = [ "bitcoin-internals", "hex-conservative", + "serde", ] [[package]] @@ -674,6 +793,30 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "borsh" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +dependencies = [ + "once_cell", + "proc-macro-crate 3.2.0", + "proc-macro2", + "quote", + "syn 2.0.77", + "syn_derive", +] + [[package]] name = "bounded-collections" version = "0.1.9" @@ -741,6 +884,28 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "bytecheck" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" +dependencies = [ + "bytecheck_derive", + "ptr_meta", + "simdutf8", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "byteorder" version = "1.5.0" @@ -777,11 +942,47 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "cached" +version = "0.48.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "355face540df58778b96814c48abb3c2ed67c4878a8087ab1819c1fedeec505f" +dependencies = [ + "ahash 0.8.11", + "async-trait", + "cached_proc_macro", + "cached_proc_macro_types", + "futures", + "hashbrown 0.14.5", + "instant", + "once_cell", + "thiserror", + "tokio", +] + +[[package]] +name = "cached_proc_macro" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d52f526f7cbc875b296856ca8c964a9f6290556922c303a8a3883e3c676e6a1" +dependencies = [ + "darling 0.14.4", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "cached_proc_macro_types" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ade8366b8bd5ba243f0a58f036cc0ca8a2f069cff1a2351ef1cac6b083e16fc0" + [[package]] name = "cc" -version = "1.1.15" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6" +checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" dependencies = [ "jobserver", "libc", @@ -803,6 +1004,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -920,9 +1127,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51e852e6dc9a5bed1fae92dd2375037bf2b768725bf3be87811edee3249d09ad" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -1127,14 +1334,38 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core 0.14.4", + "darling_macro 0.14.4", +] + [[package]] name = "darling" version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.10", + "darling_macro 0.20.10", +] + +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", ] [[package]] @@ -1151,17 +1382,52 @@ dependencies = [ "syn 2.0.77", ] +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core 0.14.4", + "quote", + "syn 1.0.109", +] + [[package]] name = "darling_macro" version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core", + "darling_core 0.20.10", "quote", "syn 2.0.77", ] +[[package]] +name = "defichain-rpc" +version = "0.18.0" +source = "git+https://github.com/defich/rust-defichain-rpc.git#9a338f8d0ed5e837a67eb8c1aa04a9efc0c5d2ba" +dependencies = [ + "async-trait", + "defichain-rpc-json", + "jsonrpc-async", + "log", + "serde", + "serde_json", +] + +[[package]] +name = "defichain-rpc-json" +version = "0.18.0" +source = "git+https://github.com/defich/rust-defichain-rpc.git#9a338f8d0ed5e837a67eb8c1aa04a9efc0c5d2ba" +dependencies = [ + "bitcoin", + "serde", + "serde_json", + "serde_with", +] + [[package]] name = "der" version = "0.7.9" @@ -1689,7 +1955,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" dependencies = [ "crc32fast", - "miniz_oxide 0.8.0", + "miniz_oxide", ] [[package]] @@ -1727,12 +1993,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - [[package]] name = "funty" version = "2.0.0" @@ -1902,9 +2162,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -1914,9 +2174,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "globset" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" +checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19" dependencies = [ "aho-corasick", "bstr", @@ -1947,7 +2207,7 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http", + "http 0.2.12", "indexmap 2.5.0", "slab", "tokio", @@ -2130,6 +2390,17 @@ dependencies = [ "itoa", ] +[[package]] +name = "http" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + [[package]] name = "http-body" version = "0.4.6" @@ -2137,7 +2408,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.1.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -2176,8 +2470,8 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http", - "http-body", + "http 0.2.12", + "http-body 0.4.6", "httparse", "httpdate", "itoa", @@ -2189,6 +2483,25 @@ dependencies = [ "want", ] +[[package]] +name = "hyper" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "smallvec", + "tokio", +] + [[package]] name = "hyper-rustls" version = "0.24.2" @@ -2196,8 +2509,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http", - "hyper", + "http 0.2.12", + "hyper 0.14.30", "log", "rustls", "rustls-native-certs", @@ -2212,12 +2525,27 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper", + "hyper 0.14.30", "pin-project-lite", "tokio", "tokio-io-timeout", ] +[[package]] +name = "hyper-util" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +dependencies = [ + "bytes", + "futures-util", + "http 1.1.0", + "http-body 1.0.1", + "hyper 1.4.1", + "pin-project-lite", + "tokio", +] + [[package]] name = "iana-time-zone" version = "0.1.60" @@ -2326,6 +2654,15 @@ dependencies = [ "generic-array 0.14.7", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + [[package]] name = "io-lifetimes" version = "1.0.11" @@ -2339,9 +2676,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "iri-string" @@ -2406,6 +2743,20 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonrpc-async" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a20e8e4ed08ee58717113cbf277b1ecef5cd9554d3e48c114de338289727d466" +dependencies = [ + "async-trait", + "base64-compat", + "serde", + "serde_derive", + "serde_json", + "tokio", +] + [[package]] name = "jsonrpsee" version = "0.16.3" @@ -2444,7 +2795,7 @@ dependencies = [ "futures-channel", "futures-util", "globset", - "hyper", + "hyper 0.14.30", "jsonrpsee-types 0.16.3", "parking_lot", "rand 0.8.5", @@ -2467,7 +2818,7 @@ dependencies = [ "async-trait", "beef", "futures-util", - "hyper", + "hyper 0.14.30", "jsonrpsee-types 0.18.2", "serde", "serde_json", @@ -2483,7 +2834,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e5f9fabdd5d79344728521bb65e3106b49ec405a78b66fbff073b72b389fa43" dependencies = [ "async-trait", - "hyper", + "hyper 0.14.30", "hyper-rustls", "jsonrpsee-core 0.16.3", "jsonrpsee-types 0.16.3", @@ -2502,7 +2853,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1705c65069729e3dccff6fd91ee431d5d31cabcf00ce68a62a2c6435ac713af9" dependencies = [ "async-trait", - "hyper", + "hyper 0.14.30", "hyper-rustls", "jsonrpsee-core 0.18.2", "jsonrpsee-types 0.18.2", @@ -2535,8 +2886,8 @@ checksum = "cf4d945a6008c9b03db3354fb3c83ee02d2faa9f2e755ec1dfb69c3551b8f4ba" dependencies = [ "futures-channel", "futures-util", - "http", - "hyper", + "http 0.2.12", + "hyper 0.14.30", "jsonrpsee-core 0.16.3", "jsonrpsee-types 0.16.3", "serde", @@ -2884,7 +3235,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.38.35", + "rustix 0.38.36", ] [[package]] @@ -2966,15 +3317,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.8.0" @@ -3335,6 +3677,8 @@ checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap 2.5.0", + "serde", + "serde_derive", ] [[package]] @@ -3622,6 +3966,26 @@ dependencies = [ "cc", ] +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "quote" version = "1.0.37" @@ -3637,19 +4001,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", -] - [[package]] name = "rand" version = "0.7.3" @@ -3694,21 +4045,6 @@ dependencies = [ "rand_core 0.6.4", ] -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", -] - -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - [[package]] name = "rand_core" version = "0.5.1" @@ -3765,15 +4101,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "redox_syscall" version = "0.5.3" @@ -3859,12 +4186,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "rend" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" dependencies = [ - "winapi", + "bytecheck", ] [[package]] @@ -3879,9 +4206,9 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http", - "http-body", - "hyper", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.30", "hyper-rustls", "ipnet", "js-sys", @@ -3895,7 +4222,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration", "tokio", "tokio-rustls", @@ -3942,6 +4269,35 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "rkyv" +version = "0.7.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" +dependencies = [ + "bitvec", + "bytecheck", + "bytes", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", + "tinyvec", + "uuid", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "rlp" version = "0.5.2" @@ -3984,6 +4340,32 @@ dependencies = [ "time", ] +[[package]] +name = "rust_decimal" +version = "1.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" +dependencies = [ + "arrayvec 0.7.6", + "borsh", + "bytes", + "num-traits", + "rand 0.8.5", + "rkyv", + "serde", + "serde_json", +] + +[[package]] +name = "rust_decimal_macros" +version = "1.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da991f231869f34268415a49724c6578e740ad697ba0999199d6f22b3949332c" +dependencies = [ + "quote", + "rust_decimal", +] + [[package]] name = "rustc-demangle" version = "0.1.24" @@ -4046,9 +4428,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.35" +version = "0.38.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a85d50532239da68e9addb745ba38ff4612a242c1c7ceea689c4bc7c2f43c36f" +checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" dependencies = [ "bitflags 2.6.0", "errno", @@ -4149,11 +4531,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4224,6 +4606,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + [[package]] name = "sec1" version = "0.7.3" @@ -4254,7 +4642,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "bitcoin_hashes", + "rand 0.8.5", "secp256k1-sys 0.9.2", + "serde", ] [[package]] @@ -4333,18 +4723,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -4353,9 +4743,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -4363,6 +4753,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -4399,7 +4799,7 @@ version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ - "darling", + "darling 0.20.10", "proc-macro2", "quote", "syn 2.0.77", @@ -4490,6 +4890,15 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + [[package]] name = "signature" version = "1.6.4" @@ -4506,6 +4915,12 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "simdutf8" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" + [[package]] name = "siphasher" version = "0.3.11" @@ -4527,6 +4942,27 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +[[package]] +name = "snafu" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b835cb902660db3415a672d862905e791e54d306c6e8189168c7f3d9ae1c79d" +dependencies = [ + "snafu-derive", +] + +[[package]] +name = "snafu-derive" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d1e02fca405f6280643174a50c942219f0bbf4dbf7d480f1dd864d6f211ae5" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "socket2" version = "0.5.7" @@ -4546,7 +4982,7 @@ dependencies = [ "base64 0.13.1", "bytes", "futures", - "http", + "http 0.2.12", "httparse", "log", "rand 0.8.5", @@ -4890,6 +5326,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "strsim" version = "0.11.1" @@ -5016,12 +5458,30 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.77", +] + [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + [[package]] name = "system-configuration" version = "0.5.1" @@ -5055,16 +5515,6 @@ version = "0.12.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" -[[package]] -name = "tempdir" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" -dependencies = [ - "rand 0.4.6", - "remove_dir_all", -] - [[package]] name = "tempfile" version = "3.12.0" @@ -5074,7 +5524,7 @@ dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.35", + "rustix 0.38.36", "windows-sys 0.59.0", ] @@ -5230,7 +5680,9 @@ dependencies = [ "bytes", "libc", "mio", + "parking_lot", "pin-project-lite", + "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.52.0", @@ -5269,9 +5721,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -5280,9 +5732,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -5327,15 +5779,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-trait", - "axum", + "axum 0.6.20", "base64 0.21.7", "bytes", "futures-core", "futures-util", "h2", - "http", - "http-body", - "hyper", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.30", "hyper-timeout", "percent-encoding", "pin-project", @@ -5393,8 +5845,8 @@ dependencies = [ "bytes", "futures-core", "futures-util", - "http", - "http-body", + "http 0.2.12", + "http-body 0.4.6", "http-range-header", "httpdate", "iri-string", @@ -6007,7 +6459,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.35", + "rustix 0.38.36", ] [[package]] diff --git a/lib/Cargo.toml b/lib/Cargo.toml index d1cc1ade18c..fa5ac846736 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -39,6 +39,7 @@ serde_json = "1.0" tokio = { version = "1.1", features = ["rt-multi-thread"] } async-trait = "0.1" regex = "1.5" +indexmap = "2.2.6" ## build @@ -60,6 +61,7 @@ prettyplease = "0.2.4" libsecp256k1 = "0.7" ripemd = { version = "0.1", default-features = false } sha3 = "0.10" +sha2 = { version = "0.10", default-features = false } ## common @@ -81,7 +83,7 @@ jsonrpsee-core = "0.16" jsonrpsee-server = "0.16" jsonrpsee-types = "0.16" -tempdir = "0.3" +axum = { version = "0.7.5", features = ["macros"] } rocksdb = { version = "0.21", default-features = false } statrs = "0.16" @@ -91,8 +93,6 @@ parking_lot = "0.12.1" spin = "0.9.8" rayon = "1.8.0" -bitcoin = "0.31" - ### eth evm = { version = "0.39", default-features = false, features = ["with-serde", "tracing"] } @@ -121,6 +121,11 @@ lru = "0.12" sp-io = "24.0" substrate-bn = "0.6" +#### Ocean dependencies +bitcoin = "0.31" +cached = { version = "0.48", features = ["async"] } +defichain-rpc = { version = "0.18.0", git = "https://github.com/defich/rust-defichain-rpc.git"} + ### Local crates ain-cpp-imports = { path = "./ain-cpp-imports" } ain-db = { path = "./ain-db" } diff --git a/lib/ain-cpp-imports/build.rs b/lib/ain-cpp-imports/build.rs index e85997ba4af..b360376635d 100644 --- a/lib/ain-cpp-imports/build.rs +++ b/lib/ain-cpp-imports/build.rs @@ -10,7 +10,7 @@ fn main() -> Result<()> { .parent() .and_then(std::path::Path::parent) .map(|x| x.join("src")) - .ok_or(format_err!("path err"))?; + .ok_or_else(|| format_err!("path err"))?; let ffi_rs_src_path = &manifest_dir_path.join("src/bridge.rs"); let ffi_exports_h_path = &cpp_src_path.join("ffi/ffiexports.h"); diff --git a/lib/ain-cpp-imports/src/bridge.rs b/lib/ain-cpp-imports/src/bridge.rs index a7f8201def3..4c712bfcdc7 100644 --- a/lib/ain-cpp-imports/src/bridge.rs +++ b/lib/ain-cpp-imports/src/bridge.rs @@ -8,6 +8,36 @@ pub mod ffi { pub rbf_fee_increment: u64, } + #[derive(Debug, Clone)] + pub struct PoolPairCreationHeight { + pub id: u32, + pub id_token_a: u32, + pub id_token_b: u32, + pub creation_height: u32, + } + + #[derive(Debug, Clone)] + pub struct DSTToken { + pub id: u32, + pub name: String, + pub symbol: String, + pub symbol_key: String, + pub decimal: u8, + pub is_dat: bool, + pub is_lps: bool, + pub tradable: bool, + pub mintable: bool, + pub finalize: bool, + pub is_loan_token: bool, + pub minted: i64, + pub limit: i64, + pub creation_tx: String, + pub creation_height: i32, + pub destruction_tx: String, + pub destruction_height: i32, + pub collateral_address: String, + } + #[derive(Debug, Clone)] pub struct DST20Token { pub id: u64, @@ -49,6 +79,8 @@ pub mod ffi { unsafe extern "C++" { include!("ffi/ffiexports.h"); type Attributes; + type PoolPairCreationHeight; + type DSTToken; type DST20Token; type TransactionData; type SystemTxType; @@ -56,6 +88,8 @@ pub mod ffi { type TokenAmount; fn getChainId() -> u64; + fn getRPCPort() -> i32; + fn getRPCAuth() -> String; fn isMining() -> bool; fn publishEthTransaction(data: Vec) -> String; fn getAccounts() -> Vec; @@ -77,6 +111,8 @@ pub mod ffi { fn getEthSyncStatus() -> [i64; 2]; fn getAttributeValues(mnview_ptr: usize) -> Attributes; fn CppLogPrintf(message: String); + fn getPoolPairs() -> Vec; + fn getDSTToken(id: String) -> UniquePtr; #[allow(clippy::ptr_arg)] fn getDST20Tokens(mnview_ptr: usize, tokens: &mut Vec) -> bool; fn getClientVersion() -> String; @@ -96,5 +132,6 @@ pub mod ffi { old_amount: TokenAmount, new_amount: &mut TokenAmount, ) -> bool; + fn isSkippedTx(tx_hash: [u8; 32]) -> bool; } } diff --git a/lib/ain-cpp-imports/src/lib.rs b/lib/ain-cpp-imports/src/lib.rs index 55c8942be83..55b3af31acb 100644 --- a/lib/ain-cpp-imports/src/lib.rs +++ b/lib/ain-cpp-imports/src/lib.rs @@ -9,6 +9,12 @@ use bridge::ffi; #[cfg(test)] #[allow(non_snake_case)] mod ffi { + use core::ffi::c_void; + use cxx::{memory::UniquePtrTarget, UniquePtr}; + use std::mem::MaybeUninit; + + const UNIMPL_MSG: &str = "This cannot be used on a test path"; + pub struct Attributes { pub block_gas_target_factor: u64, pub block_gas_limit: u64, @@ -16,6 +22,55 @@ mod ffi { pub rbf_fee_increment: u64, } + pub struct PoolPairCreationHeight { + pub id: u32, + pub id_token_a: u32, + pub id_token_b: u32, + pub creation_height: u32, + } + + pub struct DSTToken { + pub id: u32, + pub name: String, + pub symbol: String, + pub symbol_key: String, + pub decimal: u8, + pub is_dat: bool, + pub is_lps: bool, + pub tradable: bool, + pub mintable: bool, + pub finalize: bool, + pub is_loan_token: bool, + pub minted: i64, + pub limit: i64, + pub creation_tx: String, + pub creation_height: i32, + pub destruction_tx: String, + pub destruction_height: i32, + pub collateral_address: String, + } + + unsafe impl UniquePtrTarget for DSTToken { + fn __typename(_f: &mut std::fmt::Formatter) -> std::fmt::Result { + unimplemented!("{}", UNIMPL_MSG) + } + fn __null() -> MaybeUninit<*mut c_void> { + unimplemented!("{}", UNIMPL_MSG) + } + unsafe fn __raw(_raw: *mut Self) -> MaybeUninit<*mut c_void> { + unimplemented!("{}", UNIMPL_MSG) + } + unsafe fn __get(_repr: MaybeUninit<*mut c_void>) -> *const Self { + unimplemented!("{}", UNIMPL_MSG) + } + unsafe fn __release(_repr: MaybeUninit<*mut c_void>) -> *mut Self { + unimplemented!("{}", UNIMPL_MSG) + } + unsafe fn __drop(_repr: MaybeUninit<*mut c_void>) { + unimplemented!("{}", UNIMPL_MSG) + } + } + pub struct DST20Token { pub id: u64, pub name: String, @@ -49,10 +104,15 @@ mod ffi { pub amount: u64, } - const UNIMPL_MSG: &str = "This cannot be used on a test path"; pub fn getChainId() -> u64 { unimplemented!("{}", UNIMPL_MSG) } + pub fn getRPCPort() -> i32 { + unimplemented!("{}", UNIMPL_MSG) + } + pub fn getRPCAuth() -> String { + unimplemented!("{}", UNIMPL_MSG) + } pub fn isMining() -> bool { unimplemented!("{}", UNIMPL_MSG) } @@ -119,6 +179,14 @@ mod ffi { // Just the logs are skipped. } + pub fn getPoolPairs() -> Vec { + unimplemented!("{}", UNIMPL_MSG) + } + + pub fn getDSTToken(_id: String) -> UniquePtr { + unimplemented!("{}", UNIMPL_MSG) + } + #[allow(clippy::ptr_arg)] pub fn getDST20Tokens(_mnview_ptr: usize, _tokens: &mut Vec) -> bool { unimplemented!("{}", UNIMPL_MSG) @@ -166,9 +234,14 @@ mod ffi { ) -> bool { unimplemented!("{}", UNIMPL_MSG) } + pub fn isSkippedTx(_tx_hash: [u8; 32]) -> bool { + unimplemented!("{}", UNIMPL_MSG) + } } pub use ffi::Attributes; +pub use ffi::DSTToken; +pub use ffi::PoolPairCreationHeight; pub use ffi::SystemTxData; pub use ffi::SystemTxType; pub use ffi::TokenAmount; @@ -179,6 +252,24 @@ pub fn get_chain_id() -> Result> { Ok(chain_id) } +/// Returns the RPC port. +pub fn get_rpc_port() -> i32 { + ffi::getRPCPort() +} + +/// Returns the RPC authorization string. +pub fn get_rpc_auth() -> Result<(String, String), Box> { + match ffi::getRPCAuth() + .splitn(2, ':') + .map(String::from) + .collect::>() + .as_slice() + { + [user, pass] => Ok((user.clone(), pass.clone())), + _ => Err("Error getting user and password".into()), + } +} + /// Retrieves the client version string. pub fn get_client_version() -> String { ffi::getClientVersion() @@ -315,6 +406,16 @@ pub fn log_print(message: &str) { ffi::CppLogPrintf(message.to_owned()); } +/// List pool pairs +pub fn get_pool_pairs() -> Vec { + ffi::getPoolPairs() +} + +/// Get token +pub fn get_dst_token(id: String) -> cxx::UniquePtr { + ffi::getDSTToken(id) +} + /// Fetches all DST20 tokens in view, returns the result of the migration #[allow(clippy::ptr_arg)] pub fn get_dst20_tokens(mnview_ptr: usize, tokens: &mut Vec) -> bool { @@ -384,5 +485,9 @@ pub fn split_tokens_from_evm( ffi::migrateTokensFromEVM(mnview_ptr, old_amount, new_amount) } +pub fn is_skipped_tx(tx_hash: [u8; 32]) -> bool { + ffi::isSkippedTx(tx_hash) +} + #[cfg(test)] mod tests {} diff --git a/lib/ain-db/Cargo.toml b/lib/ain-db/Cargo.toml index b319eba6251..02579266ce0 100644 --- a/lib/ain-db/Cargo.toml +++ b/lib/ain-db/Cargo.toml @@ -11,4 +11,4 @@ bincode.workspace = true rocksdb.workspace = true anyhow.workspace = true num_cpus.workspace = true - +log.workspace = true diff --git a/lib/ain-db/src/lib.rs b/lib/ain-db/src/lib.rs index 87d06d66014..8b1356417eb 100644 --- a/lib/ain-db/src/lib.rs +++ b/lib/ain-db/src/lib.rs @@ -1,4 +1,5 @@ use std::{ + collections::BTreeMap, fmt::Debug, iter::Iterator, marker::PhantomData, @@ -6,8 +7,8 @@ use std::{ sync::Arc, }; pub mod version; - use anyhow::format_err; +use log::debug; use rocksdb::{ BlockBasedOptions, Cache, ColumnFamily, ColumnFamilyDescriptor, DBIterator, Direction, IteratorMode, Options, DB, @@ -49,10 +50,14 @@ fn get_db_default_options() -> Options { pub struct Rocks(DB); impl Rocks { - pub fn open(path: &PathBuf, cf_names: &[&'static str], opts: Option) -> Result { - let cf_descriptors = cf_names - .iter() - .map(|cf_name| ColumnFamilyDescriptor::new(*cf_name, Options::default())); + pub fn open( + path: &PathBuf, + cf_names: Vec<(&'static str, Option)>, + opts: Option, + ) -> Result { + let cf_descriptors = cf_names.into_iter().map(|(cf_name, opts)| { + ColumnFamilyDescriptor::new(cf_name, opts.unwrap_or_else(Options::default)) + }); let db_opts = opts.unwrap_or_else(get_db_default_options); let db = DB::open_cf_descriptors(&db_opts, path, cf_descriptors)?; @@ -67,6 +72,10 @@ impl Rocks { Ok(()) } + pub fn compact(&self) { + self.0.compact_range(None::<&[u8]>, None::<&[u8]>); + } + pub fn cf_handle(&self, cf: &str) -> Result<&ColumnFamily> { self.0 .cf_handle(cf) @@ -99,6 +108,66 @@ impl Rocks { self.0.flush()?; Ok(()) } + + pub fn dump_table_sizes(&self, cf_names: &[&'static str]) -> Result<()> { + let mut stats: BTreeMap = BTreeMap::new(); // (size, entries, avg_size) + let mut total_size: u64 = 0; + let mut total_entries: u64 = 0; + + for cf_name in cf_names.iter() { + if let Some(cf) = self.0.cf_handle(cf_name) { + let size = self + .0 + .property_int_value_cf(cf, "rocksdb.estimate-live-data-size")? + .unwrap_or(0); + let entries = self + .0 + .property_int_value_cf(cf, "rocksdb.estimate-num-keys")? + .unwrap_or(0); + let avg_size = if entries > 0 { + size as f64 / entries as f64 + } else { + 0.0 + }; + + stats.insert(cf_name.to_string(), (size, entries, avg_size)); + total_size += size; + total_entries += entries; + } + } + + debug!("RocksDB Table Statistics:"); + debug!("{:-<80}", ""); + debug!( + "{:<30} {:>10} {:>15} {:>15} {:>10}", + "Table Name", "Size (MB)", "Entries", "Avg Size (B)", "%% of Total" + ); + debug!("{:-<80}", ""); + + for (name, (size, entries, avg_size)) in stats.iter() { + let size_mb = *size as f64 / (1024.0 * 1024.0); + let percentage = (*size as f64 / total_size as f64) * 100.0; + debug!( + "{:<30} {:>10.2} {:>15} {:>15.2} {:>9.2}%%", + name, size_mb, entries, avg_size, percentage + ); + } + + debug!("{:-<80}", ""); + let total_avg_size = if total_entries > 0 { + total_size as f64 / total_entries as f64 + } else { + 0.0 + }; + debug!( + "Total size: {:.2} MB", + total_size as f64 / (1024.0 * 1024.0) + ); + debug!("Total entries: {}", total_entries); + debug!("Overall average entry size: {:.2} bytes", total_avg_size); + + Ok(()) + } } // diff --git a/lib/ain-dftx/Cargo.toml b/lib/ain-dftx/Cargo.toml index 5b50027c31b..b0f0d4ee428 100644 --- a/lib/ain-dftx/Cargo.toml +++ b/lib/ain-dftx/Cargo.toml @@ -10,3 +10,4 @@ ain-macros.workspace = true bitcoin.workspace = true hex.workspace = true bitflags = "2.4.1" +lazy_static.workspace = true diff --git a/lib/ain-dftx/src/lib.rs b/lib/ain-dftx/src/lib.rs index 8de5dbc801a..f647a3b40f4 100644 --- a/lib/ain-dftx/src/lib.rs +++ b/lib/ain-dftx/src/lib.rs @@ -1,4 +1,5 @@ pub mod custom_tx; +mod skipped_tx; pub mod types; pub use bitcoin::{ @@ -7,5 +8,6 @@ pub use bitcoin::{ }; pub use crate::types::*; +pub use skipped_tx::is_skipped_tx; pub const COIN: i64 = 100_000_000; diff --git a/lib/ain-dftx/src/skipped_tx.rs b/lib/ain-dftx/src/skipped_tx.rs new file mode 100644 index 00000000000..20f3196bfe2 --- /dev/null +++ b/lib/ain-dftx/src/skipped_tx.rs @@ -0,0 +1,374 @@ +use bitcoin::Txid; +use std::{collections::HashSet, str::FromStr}; + +lazy_static::lazy_static! { + static ref MAINNET_SKIPPED_TXS: HashSet = { + [ + "ffcafd71820269ffe9cbecc12619154a3c5b272f1437d4e0242ff73d2bf09e4e", + "4218d310d9a67b5e9a93d2a9ecf17bd70bda7a3670191efbc755b05c5fb8a6a4", + "e189126e56b85e66144c93b7c2d91baaa2680aed62fed09d3ea954a995d0a8d1", + "80c77656eaec77f893671e66bb2eefbbf73e141b65d8a5e1d151bc97fc9d2ef8", + "a49e34c38fd899bab21fbb01e0bf77df916648b972992bfbf2867fd040261097", + "fa28cb031aa4fdc88302e4c60f4abd9e3822bf5fc2e502fcd264b21b07b823f3", + "9436003f88a641fbaafc6e00fe1bfb2ddd4ca304b55d458f593300f9ca2d3285", + "31cc6209e24332464a7ac7adcfe93c247ca5a3c5b4f9acc340d133b158f95c39", + "5587a6da89e32ba21823f2b4d7cf6cf7f0e3ec4f1e1b286ca311fce0665941df", + "e80649287312ca0200fb057c1defa16015246164af690db9892c5c348b34c937", + "95b399d9fb1ef3b63a5e0f0071204f6046022df2be95849918093a52c35a0c78", + "d97aaefd4397824528132d91b9cab0e4c3abfc08c893be4cea02191cc18c5ded", + "985f8d38d9aa6160be02933d36b6683eb47ebe2d4a7735d068de37781e7c614d", + "35a07b93d238552a4f14294b7f791b2f8293a90ffe25344e55ef97a6008e07a0", + "28d7798ed6c3f7d2fbf4cdcd30aff42035c6a154514263ffacd92f7de5db96b2", + "0444d4825ee094e90ae94f88f19a5a0d1092fbb5d3d882a674ed07ccdb2eb1a7", + "96d86ce92175c714a544f8f45fca08cc17e2f72cf6352b9cd930f152965277e7", + "6f9c9383d72b4548c444ab7bd1159c70b62d16a34aeb49a84a646a35d30c726a", + "681cffe5fc82e2fd544f61050f95f3d8b489de56b4fb48edf72cb4bc5e77bc70", + "af7c6d1a872a5bfca3333b7eebf568091e4e471b81dd3f7bbc84e85ae5c480d8", + "ecf1564390f291f73e0e399aaaa1deeb4ea43cde7777d6435b29d65800fe68f8", + "dc554a7a4a29ad1023beff8e95d8b0d82adba69f5ee1332fd31312b49cc40c1c", + "6480180d1fe751aa650e60f2b6c57a2fea77c97d5e6e8e2ee1316aa5d6b03b88", + "009f8d084c01dfafb11ed86456629240ae71c0ca402f0c22e3b5ce59fa82d5e1", + "a50abd1cb347b5740f87baec6df029951bf45b2d21df3120d7e915ee1d1ff182", + "a87c7a281f4ac908caedbbd06111f4db7e2edae8a14a13b156458047b7747f86", + "2ea0780ee8e987d45dd01840214a3a1840d88029bd679f940b36c80efb91a207", + "cb28e755ff0e4590a6fd558129b5c61872da691140feced04d482b4c69eb344f", + "a5fbe7163536e87d91c36f45b0975542c45fad7284c62cb2592548662dfe3796", + "9c405cbed7db2d50e38592795f6fd5fd04923cf72b0cf601ef300547ff8adae0", + "aa57e4eeaaa603262b8f87284b7b396900c5572587b85795a62b5ccd43fcee44", + "4394684bfd9987f5321a3a32dae684aae76e7bd5949b360a1a0d70cbdf484ede", + "b77e714aea068a5cd9a29d428529476ec06f268efb6ef81592768f172140c00d", + "e8d67ff43e9316538cbae7c7328b0c8b5c9ffc20c7a727aed36d4d12d153de63", + "8b092dd476b64f39b8bbdaee14ddf762df9117dc73eb3315c7b3536e39d9cc68", + "7adc7c48e532fb00719346647d8125fc868699582c072fc92bd50f6e43ac4f52", + "db1b1175e41b998e44c7c6743661b0a80c1633419681ae4d4e5cfef5fd5c538a", + "60b3f187e94a003fb80bb13c6f4f13cea06eab3b2b353c282d0fba5317300eb1", + "1aec8a6c95012b57008cdac7d195bbfa2fd6d656a61039f7fcb60410fe201aa4", + "6c9e7811cb0445688bb596eaf2d217c307d76cb89b7346453d2a2807e7063eb4", + "386322e8dc69d8d27acbd0ac855485d1dbacdc41617c0d22e6ebc893fa31a9cd", + "4fa58b67d30847579414952e5ead0968f2318d8f39562e10fe5943ce31acb509", + "5529972027b390820bac87825d00c8021751bcb6982b42e5ad8afc7e7c1bd4d7", + "4e2170cb14a5aecec592166a8d550af22a0056706cf2b178edd9e01c8296ec66", + "4dc1deee1d1ab15c0d36515c34def7bbec08ab75bdc31b628353f72418e0413a", + "f6df01ac392911db4f7db20a36bdee8ff890e4cbb222308917cc85796ffef454", + "bf0010e6d193e7cfbbdc37ebabdd9a44392faa5f96e5056a64c5393d0cf35c57", + "d82a31354457546bd1f137bb969740d4f7e1b8c55eafbdb53668c94a59def865", + "fc2f320fbd1fe0fa9cee42d77916bcbbed99d708a6179d46fe824b7d81a7d640", + "bdcf70f61811407b1bece47c4d059c120f66271490b53cdaafa3969937d78fac", + "84b89b3ebb27f421f0fb5aca8cb278cb4e923c60a5a7663500abb5d28ed94078", + "f835245b51c96e7644a83e6cc25a97a01bf8937f8aaf07062922568df0ad5bf4", + "711aa9bfd5e257ed96737f443c81be2f11dc293d382db07052651af1e9b23c20", + "b235982686d53f7b3b320f82b9d2e0797a3e01118ce8eeecad001a36be86e71c", + "e5bbe632a8f2de73b69076fc52fd4d5c34ce2ebbd5b764f34c3630716dda81de", + "76a83f2249ce51e1a7bbaf5202b4d97d0b81444b1529fa8cb370dc3e804a7534", + "0801761fcfe11c6e5308988e451759ff2ffa785850a1b926beba59ccd395c4d6", + "d6683e99370460b684227a0e53d2808e28953c320f6dc727f5dccb310ce781c8", + "9d37c6e758035a8fb666862d1e386439c023b0ce287ffac0cc3f3c6d9276173b", + "0abc9dfde8b520373ed087d0e21587fcfde06099db2cb1397162c2aef7960a79", + "bf8ea24e8b67d6a8b54f38d98e3931dbeb2a28fe1175e441a4a6e1e75182b063", + "1b3e3a35fb203ddfdb59c234d11e4cdb2810358bc5d9b2f11347fe565d40dbc1", + "0ab0b76352e2d865761f4c53037041f33e1200183d55cdf6b09500d6f16b7329", + "c05c88e81acc4ceec896f023f881e3a2361168bc2c7d58c705b48c010a73e646", + "d00a428dab82dd77ad1cd98aff19c2bc3e8ea66c12bb69840c01c3d5a9efeffd", + "edc12f97ba7ef5adb5bfaa5bb23dd07845b0b82ae154114750d1f71097ec6d16", + "e19a5096afbaa2813537ba4f58a0ceea8f995384fd4e060bfd275eab5aeb7c8b", + "8c26822077d2067e4a1ddc1c60d63669c800167bff39d6125a359dbb49064a19", + "288d0b2f3b53f664c7767dbd5a74753fd3770f380c7cbf336c24ce3b7ecd60ee", + "75f08b3e30cd781acc0efeb785c37a3c95fc97e92429e84e8c8604dc356ee9e9", + "061a3d67ec232b0a2a7da96bffb33db75bfb8e2a99c4fccd1ed154c150e7e6ba", + "132f565363fb143d6d93386cacd23fcde80da586a1acad157821c4ef7d5404cf", + "fabb98a77e9918bc3a7eaf8dfeb9be868a21f800d5eb765ecbb6214dd3cac1c9", + "fa3e91f15bb99ff620301d0fd5f555bef82271098291f178318e39c7033d7000", + "d320ddf6a776c7c8519f894627a635b86f67712c1f556f4283e27bad1a0cd3c6", + "f7b010a21ea6e5a32b5dfb318df158423e386f5b6409ba3b39ef817fa3441c91", + "4ed05f0b57c558bd649b9cee5bf26b9d8287d0c9cf55b87fcd3734c7d15b71df", + "c26e803c660b8e75afb42e634ae98a8246b95d795f0e10706b9b446746aa65c3", + "256080dbcdd79384641daaf1acdc4d98ada2418e363b8dcf4ffe264f5a98f768", + "aa903d2a785e48d7d56cdb8cc045c7b217805ca8b39e75aeb92cf7d6d924bce6", + "509fee1659ab82b5a98b9c6172c4530cc5cedf0116e13d96de3a4cfa9138f352", + "820d04bdf2fe71ab37cdc3f2b9bd27329c39d0ecf17a01a4cbab145d65c673ce", + "b9c5bd0dcc52e8ab625122246b23f364d1816fdd725573f14f4e4c4b23d5aae9", + "d335014ef95af4c4b43560af2fbe6b7cb5cf7e418f33369de37f2d47ce14df14", + "5bdfabdb059e3d5990e7e34104bb75db2567b1c4e7e70947390ab2eea13a7b79", + "06582a883b4268ac889a88f4b118b7783be19d04955f73e4db3dedbf9e153da2", + "b400a43760311d5c5d5cff76697e409c1f93736a52b2fcfc040ec72102177669", + "43ce52b9cf9413d67833cf4ff218ed83f1577412b775e701de6c0a975877149d", + "70e28ab44d09d20186f5d687bab0aa96c5c50bc9fc4df49656717f9461f8293a", + "52a984f2e987775d1e6f573aa73b9fe29e3c53d99401b9252f47f3911a605835", + "1433fc1db5c6b3f06d547d49335d0001661219bb71a3ebdb946fcbc700cc9118", + "948e9ba6d053bfb6cd46f936e32ed68458cedb679a7ade9639c28fadb17894ee", + "df6570476da12df2b0c872842c4c6db8b14995153d8c82acf1defce526788391", + "01f90a6b8b32002198c31a2f1848594dc54936437391e708c7912229740c2ff7", + "99306b726f16b5d71bd5f119fc544e4fb048690215a61d95a6ad0bd6130cc8ab", + "eeed5a04aecf2eab3d25dfb36493f6658b16de92bc2a45a318d1cac9db0c49d6", + "18e806b716f96a85f897d5eeb884ce5ffe814f4f059c143856362670f98efbfd", + "1aa7f0435c287d1f7afb02418f0ec91a86cf7254b060dafbc721c3c2e84070ac", + "7a8ffa4da5d1c39d42d28d61b8936d345ccdec6f1c8941bf6cf8a072b745a8bf", + "d99d2dbc1a863b3635a41f5c2fb920a9bde40c316b5f2f339fb16837c2b2d2e3", + "2ac7800b0bfe2df8781e92f013f9f23b7ac6f37c5417054bc6f31fe845c4f2ad", + "dfc374165b8e4aad391cb3cb3572c483c813577804bdb702b011d93bb2ec9370", + "b7147548e97017123da2515b5ea350ba4a22ddd65942d0778500bd8cb6cb8899", + "588cc9c95b9f1ce65389b79d7da24c6f67e8651839073dd192528767be228bcf", + "9a1d9b9489ae66af0ab222dc688ae1c0888825def98ea8393f960213471c18e5", + "8ff534b441444245b891b7a6d0e412b190f2cef1b3c829c67748efdff62b88d8", + "8140dd3a265de4ac57500b00ca50685d1e77ad9647a3ae3bb1f494fadfe21e4b", + "29e7f896f333da8b06a9207948f28d4062d3050e9187ae34a98e53bc79e4d3e5", + "123ac611e350aa2cfe125c5817fc53fb01ef82653623137318d0d154981a8cd4", + "eb0522f3ef2b2c0dfb06089ad316fe15821d22fe39aafe1ab590a19c35b669ef", + "bd8fce56da2295796189408b4f45890dcb96354700668a03a83b9cd5f278a97c", + "3184a12cf00029214f37c836a93b829985419bc859e007e45f60837ac1ab143b", + "d6ff9862aa08900591e4d93aa59558c689b6f123ba2a49dc5bc396b2859eb1c7", + "d1e201c417fe9dd5d78a37f8177ac8aba2da52dc3e17cf9df81c74ee52ee4fa0", + "ffbee4e013aae65a27b93d3fd454e7ee1215d9ff0901c103ca02120083994ebf", + "5c31f86eeae9d06f181e524f9aa06b80ddb5424504ee45d6f516c91d9d2cf09e", + "3a7c04eba93a9be69dbfda3ff42a527424de34b9a8fdfa34452426c0df9ef7f8", + "5d5ad9adfe55114a4651a138d60e48f9e8a617b412525e72cc1bd3368ff9bbc3", + "03fa1265fb8b26b571baf6dd95c5018e4b05f69942889948045fc603761a5b3a", + "94707e6bf2e554f3ee3178f32cf1c7faa2b228e2f7f64638465f4f34b1688a5e", + "a371f737969d5721e815c5b8f462f178f0c1138894ae675fd27390f6a6915928", + "6478eaf3069bbd1c2352b9cb7d5d3316978dd56defc11df997542d88310969a0", + "ce2f2ecb4a7480acbb7ed40ea40a23906f5528dd52c6cd7ed43c8b557b228fa7", + "17a6e8df99fcaf396bf039f65dd2fad80d95493a6f77a22087a29eba2238f1fb", + "ef3f52c7d4470093959c2cc206d5ec3e429cda400263b7e129a8eaaeb17dba7d", + "5c34c639ba7bf651fb6ed2482f4d3f4f7da3241f6700606d4b5ad4584d989ce8", + "7adbe577fc9f076bb42fc6d1e868d5ed92e67657babef2f2da0fdc8c793ee2df", + "f426daa5da12162431328007d86c63695f7d21e89d66bb6d32b850df58fbada9", + "812bcc962b5169fe46db31b36b9e91ccb018f7089b9e3e5aeba589dde0b0bab0", + "fb6557bfe280cede8b5d4e1ae65838da70f36ab73862fffe16e567fbdb91b42b", + "7d1878abb072569a0c9be0874de4aa12476f47be3441004af75e563dfd575d8f", + "e9f10e2e06287c15291bf44ec3885ecf01c8412ff4ef7c0486688d5314c2feec", + "d6353ab7516fbeb9acb10b420e62a3465da8d5f59061e0985f0eb7a6b7598e2c", + "875a7a81bae526732cef14bd8a6ee550009dd6f90cd5220a747cd2b8c575ec8a", + "2f8cb458f888ba9986d527d49f8d39f781004faa6aa85593cb5e7cb1defa12f5", + "beff4e88f527995dfaf9fbf3bd49a90a1c6463fd5a90a9fdf78bac2811047df3", + "06ecbca73dfedf0cb235d370e1a427b8fa0cc2bef922d06df5cbd50b589f992b", + "5083598634a96a755b0d103b936470c2786e0c29f9ff42cd8f3cdae3c61528cb", + "277bfe059dcd5f3f4a348b0d3074a91df25974ca1018aaa944bbe339ce05b969", + "5af51fb4aae0a9ecca7a5065c132c1a9d4d328b73eddcd7cd447ef362b84da70", + "157a14f72f2f109a182fec8ed951740218ae26d3a01c0c2240f2e7eccab9999e", + "95de81aeb5440c1d7131a1ad73f17380049f02d9f554f95cb6b58a1214f4ea4e", + "92cbb8851b926c043c2de871301d6c3b7a704ce5a38b345c508e249964d7dfed", + "1a4cf7a044a77a2b13f805afaa676fd8e5495f06cd64e91ea7b022df1c04a820", + "50c55370521bc26feaaf8a6e4586c4fe69cffefe275b70ebc06019f589ab1bea", + "1a98f0e894f6e67880745e4f3479b5394d3ff52ec4b2f352c31f3ec1d96ca537", + "09f8a8bd29a1fb018e5ad5220604ce27ede3b39b6e445b3a1a9bb4f7db6d6f7a", + "e7edd6749d757fdd249b73df3869aab32b65ec14ad881637c3342c855861c59e", + "3adc8cdfecbd3beb91f8b491cd188a021b2e7ed21b5d0356a5a1d3b3cc27f6fa", + "978045603a6de7dbb184d9a382dd2d28d8317f4ecf5b5cd963b8ce4031c6c3cb", + "ed1d1e8f48d5bc1fe4c01ea822d83b3eac7d831cfd36b2a5aaa990a59558ef95", + "63999cc3e7b792bd4b67881d033b6cb7a6bed9527d52feb7ce1472f0727f6fd0", + "03fd7081c9cf4d64e411bd5fb28126846a51824c9d09c88594bcc3f78e9b3592", + "9be31bb296af4b7707ad5b6b5e1633103cb1fc9dfc7f72e1c441456416e9343c", + "aedd9d98046589bf65eecf5e50a36edf0460966384e25976a541acbfa566caa8", + "219795ea1ad60c6756122605162ffc296375ecf4e819c19d3bef9c05563dd2ef", + "829286014c009a0019db9dada32abf06cb46f9add37de7f19cd10a1f864968b8", + "94ad5ff7a9594ab49aa5935ca9027d76e945e0a5b8a807248ae688b5a2d64343", + "cad0e3a95673aec32f0dc003f14c81be366e8c996f55ea9810a1ac32c99440c6", + "622209d0116ec3b96de970d1a518030f5b42680190ccb12888c9cac62cd5b984", + "95e1893be196cadd0ff74348d13d0b264297425076dfebd42bac6f4448d76b83", + "4b9233212376ae74576641939cdd32577b8a982b4e7612ff83218996207c461c", + "c5dd2b39accd01a6a94ea220c7774982b5b299a8c181cda755ea001baf733342", + "c6d65947caeee0616a2f07bed8e8037c2e5707d6962a0017585f588e0d1fceaa", + "1856799bff3fb09ff5f15048f0750c81db025b8719a7292f7aeaeb47ebe28d52", + "ea25bd807290b1fe569bca2b332faa60e7b4ed96fa7389d2a6a124f7bfb512dc", + "d78f7f438bc4f2a8ceacec17329b4a48103ea49252150cba641d9acdb4d6f4b1", + "c6c422fdc2b22b8a540b56404b0fea444e9ba9510556f2520d7c3bb6301a27ab", + "ca5d755250e6e21fa19a40378318c4fc1e8214b46807dee5167a5a0d08237f76", + "8bfce5e34e5377e484854ccb7d782a0425f4251fc197a788bfb50a749098e0da", + "38d6f1edb5d1ffd9dc4f016f3a8de96ca7479a79b5085ecacb7944aefb65ab87", + "df92287851ad5b344963677d506557e71effcca6795cee74e8a8cf0025e7c40c", + "40e831325e39b725c6ce398c590eedaa91d4e41736bfe5a9e45f6d41bc0bfacb", + "96a9bb1896fccc97fb06400128a25b6e93d8fc47ba538963599421cdca20e08d", + "2d543b981b45b9c63296a15acf5ce967fa5972dab279a9048ce2b9a72658f5a8", + "36c4e9e7773454bd92eaff36d077e5da07a610a4ab3f89b5d2e73dde1eab729b", + "78ed73b0370fdf229c2c6feb8eb9c8401e73e861b76065200cc0c7a2d98ff2e2", + "d155250a08ccc1e0799b175e2ccf2a1f7c0cb670b57962aacf1092c7101ec7e6", + "e0078962a2d434ef3fd3631584a19553809de6aab3329b86924e1c01014293f6", + "eff1e9fcc6d1f5766427a1581724649dd9ab480f0aede615b02cfb8b76a9ac76", + "908773fe4b11284c0e2dc64ad0dd343e32c72affebd49cfe36f1db7ac17c85d6", + "de8aae7fcdef1b4edef0c348f3a65d24219194bc56681d71846cf5716b30a9f5", + "35978e45b2a870b85976e4f556db8996d02b203fe4d5770ca9862fda96b168ce", + "ab17ef14546a3d0eadaefdbf51c9368d5673de44b5541638506169ccb966044d", + "4db941d19688e2c539687b7ea45b84dfda212f98e33d44ca0d4298acc637938e", + "461b21ba1ac8a231bc6f0e0d0c5af8fecc9d4c3177a1abb03e7456bbb16a1f88", + "1dc9821a64325a77fa0a561e33a66cd162b302c43290266a329f3825a426d7c5", + "0cedc6ff163ff1f37e6d6090971fa97b7a84764e4cc419f860494bcf3dc64332", + "8a329c4ed566f5599090e359c3eb6d8862232d3fea3407ad81c6bbf50a2bf1a6", + "93fa0c70d4617331e8453fe593add9247cd183901038207a5bf4d20df70c78ca", + "b587f2d5c377f440ddc2be3c3671108d6eb0a0fb446877859007401ac0b2abf6", + "2c5fb421f908ac47ac166d9f1cc953668190a833c8809ee1cd447da1233dd7fd", + "10bc24c667d06f94b2634b41e03e7f1a3d7f5c4934df8c191090378970b6dc3f", + "27c31a3ae3c4246b8d940f107d70a374154906203b37dddd65779e17dde505ad", + "361a4b3043695ec2be73d1120c3614376ccd0aba128bfc9a940638cfee9a9b23", + "f6335610dce1ff2e8ea7769768469f99d31981b92cf9c9af130c267a6856eb75", + "e1b480d98cdf41ea694b792eb94039ebe2ba6343597f2cb691c261c0ec4679d2", + "b20892a7530c6da1e9835f23f9ca51351b412e39badc81189ec99cb8230f8ad2", + "7a0a32f122c470b5f0932abde695a24f5ecbe7b74f9bb2b77bb7dcc1ac408cf6", + "f3c7815a2bee014903640767e1bdbc02d99000b61d5a42f3b8fc589aa05e7888", + "929c05d2684fa8e26e0ac2c3c5ac2e5657f60c7567f887f674e7b41aa2cfab80", + "4cb8b86a4e8c60a227cdc0a3b43271c9e181e79d9f4193117f09aeb65e0d1c29", + "8e958e4288e6f7fa47a36ced20f0fd5d89592321fc4f97b01e89d08086628ccc", + "33f740882d3f5d9e2afd027958dbd8b32032e04adb4ae596e07390125158f3d3", + "63211da6fd4b4e8ff533ce44edf6ada47ffb21d047a4033e4504fa34341798ca", + "e256170cb22cfedad942b8008aedd15dd7a84927137af51aff313ec28b9b1bf8", + "b4ae94a0098b7c32f29d7aaf3411d05cc9411ff9f64dbdd2b75d46dd54b3e1f1", + "c91b4759aad11edd22bf8e46ff9a23149be8d5084c0b2012e88c2aa796a824e2", + "a7f91cd66f653bb01b93139e8b34143164ef7b8c5600a571c1140a12580d3967", + "fd00d352ba385f9e593a1282c6fb62582e55f5f8d3303fd7888ab85fb3716eeb", + "19ed20cf54db296060368bda5ac4ae58e1470b9b90c565e95329ad289cc69fa5", + "ed91a663c19bc1f74f465f26717bdf3671cf02036f6d1f5ef45964483ab746e6", + "b7dfedfcee131def65c51aed3bdf27d1cd075fc53ce57759c8441353250c36f0", + "57c855ab849e7371e968437e94948d2881c38ed8616c336e0d9782c54e955a71", + "bb86c0621b2a056ad909d4ae8d3f8ad4f48791820b76234fed000aa3d19d93f2", + "e2bbeb69cf34ad45c67db09dcf74c7d45c02e51982dddfb661edbde5e796f29f", + "a8cb070f17d22587103416527404ba4a4a7e65e73b4de34fe7419e295e203344", + "f54e138ec0392f42e036a944a8530e6855e710d6449145afe08669803287d3fb", + "a9a0c1df293972d5e9a6542d8e8faa9161255f0b79e0956b02ecb057fd47bf48", + "4a4983ac422bce3ada4d528a8caf3a86b2691d56964489c14236aff7d6203db7", + "c4b684815bf64df9806307699619c6179afcf6124a4edbcd1a1616fe80a45def", + "4d1c5585299c93e27bd8a9daaa70c4f0d73c567fc09611a67d01a795bae67dae", + "75c4e427dc68b13b24e1d944f7192995bf64f31d6d54416c4c7388527e79ffe2", + "3f658e7312e33eae2fbf13aaf40289363397db9fee494dab4c6873db7ff52fdd", + "9d3445a8144c662c45617a2419234ffad8fac12fc16331659bb828e6600d4af0", + "722eef4ac927c70886414c221eb51e737b3dd8280762e0fe8c246f1d0bcc20d1", + "9f08929babed6484acfe78cbb3ca1443ef7ee24b08b76f7eacf13ff87a14a9a3", + "2ac642d59056d6a84ddaf8ebbb057d2a21e39792aec3cde95f57a604c940838f", + "9cced3a84b6ddae3f016e144f3708f4821eaf620c29bf42ca8203ddbf971049b", + "b340faa755f66bc5726c09e7a7f9810db07a307ec6ebd3c01d39582ea306ae20", + "84b536d7ce03eec38ef3d52d5799ca6b84778b3a558d3090d745f62369eaa7a7", + "2b81ec9f2a03d5a40f125a6b7f725370bcb50254185c7563103a5a61030c3927", + "53933f73b4ca96510db7cfceddb3cb98447dca20fb4fd2d3ea8fbd05dcfdddc5", + "c287fe6d2586ddce466e7ff59cb0b8205fe734401cf42aa93cd5257ad51439f7", + "60b1592300b31f11b05b73aeb1b77887ac9a2f24bb94d0ed5c18cab770eb8100", + "faa830e9a8baee6660a8db62d05a34996e48beb748428bd2a004b5e17d896cce", + "c152ce2b46b5e49508dc18938c2b8a575b3e001fa4397f13ea65554ca6f3a3d3", + "cdfa1ad30596b60ab3f8a4fdb931530d1a96b9a176323d969d519ea7636b678d", + "bf23fb3cd240215ca03e8d732ebfd9c4414d9da57aadadb2a60ae49ab4d3b35e", + "22f644f1f763bb17aa11f3f3d23a5f41b501862941d9c2838c54d6f5ada7faf6", + "7a41a5337902fd5fc0eb97fc7e75aca7f8dff38bc10a36c90f859f710d6e4a36", + "957835f7767f2580d340c4039b2d1144993b9bd908be62db7c9cb80291f215c2", + "cf3bf007426b200e1315fb650d6c8fcc544db1efe7723f5cba940c0c3fe4e9a4", + "75d368aa68cb05409c46640938bc18e825b70ea7047481b6f03da5a6a7cbcc1b", + "667ae4748eb46489dcda951c3d6dde00e6cf47cbb5b42f38ee772e480dfc567c", + "3bfdbfe217b0aabeef2915a0915e47f6a09c28022c134bf29a0082935d7c4432", + "8625b04ebe9e444f5a90d83ac8fd27c119756ad9f120984b8f06c92839e57bde", + "79276bbe931838786f0c5eb1c4480e8418e4537e2968f41b1f18a094cc06631a", + "be8083239c54aa5c421ca3da87a51837fd2c9e65b0be084b80c9ac72ea53a57f", + "966f3a7b327614fb89c64204c79ef35a3246bfe8f5e44fb4b8da859d44f03dd3", + "2a217ed1d535b5de810784af9640c66756ff819444eb8fa8a802b6399397dae9", + "ad0c940caff402378d73eeacf7b55c05fc37804185c56ad1bb103aa21d29f85a", + "6a5fe7712d01fd99ea0a50fb66a8610993ab7eeaaae3f9fa190409bd07557a56", + "994205f817be06676c32b84d2fead2f60be8f622b3bd0e9e7869bec972994fd5", + "432fbb3913bb16ce8479cca0e01fa97a3c04eae617735dbbb48c70a9d9faa0be", + "343782f7647b1117f531832d1533e39ea564a7cb3cb86196430b16a863e154b1", + "3140c8d7d29b4b75973d12c4d10b4a6e44c3e533ae8ab08c16a79703bd08ea8f", + "2987cff1bf6b28603610e45cbc52213630e9ca4622945783d3187b961d8ae87f", + "7a2941df71dd3d5215c31444924ed412d3401a1c2053ecdbf75692a789447e8b", + "d5754cc668e6e05548dee85dd9908fb36b48fbe446276f281fa7c37363aa42d8", + "0491bdd1148c092ea1be94806c72217321dd4d7902ea4adf4106814ff32f098a", + "9ce7bd10d5eabe9db99472f1e4a0f1c0a4a3a1411f7e195bf269d4a3b30680ed", + "4f5a896678477391b3ff5ab74980a05aba7ca00430b0d9045024d66fc6aeedad", + "67aa62e5bcd4c205444e141379ac3a2e7ceb1ac36a5ec50d5cc02ab193d3426c", + "0c4296687f5a2f6664579adb4afb106ba47ff915e2b63fe935f44d0dc9a25edd", + "cee5e6eb2909701a0288151739fde30e0a2723b18ccd59aa7db5afd25a60d5a2", + "c0468d09d74a0fb30ce5dfda7c53866e33e3e457ad742fca33807e37a1d27d0d", + "0ef807072182cc5f030c3fab2be14b00bee1edbe9c7e5cb358b1126347fe1aaf", + "d42c3c7c4bc3d7dfa80d1df4aa110368e6592254e6e6753bfa27a590b1374984", + "8380fc35c16081ffbb6a023c989a9ad4d9644de4c8073d39e5d43b479f96e2cb", + "444dd409031ddfa0b3ffcf92bee7e9f08bda1b9c193a755cdfc3954878d715d8", + "a29a64be3e5f6948b1fea4fcf5d7d4ffd418b0dd18419536f453d95e79bd919c", + "a27765c77fd9511f89728c7099c563c7da1ffba7e42c1ae674aae82985804721", + "28d0d8925ad4b4df33a2fc7f6f21e39873a8f9d76a9a636ed2ad327259f7e5fc", + "48b2c6ad69a190a2d95b22a2169f768502f15c936ba06f748e9777b8086a8963", + "6182bb04e8eb98b629378998e01fa8c47629f27f134e14d7397efda6c74f88ac", + "ae40b0bd14003a588df523c2837f0b05a4e0cc8157be010e4109b7526b34dd95", + "34de8bdb2b03958ef3d81a744db7aa3f7f95cd112d0f53d63143f45f6de8bfe4", + "23b55d16e7c48d0502d428e07574ff7ea928e44bf1442bca732b5927db2370a5", + "9008838391ee77748d96ceebce05b02182aba985963a461f985d34ebefc0e9b8", + "40d860046e0ac1163ce27af02fcf5b2280f7dbfb23eec29300d2ba4b9c9c693b", + "a638f5c833734bd1a185c613c1559a5bb87b62dee5a4ada713a7e8c48bb39930", + "e31a5f0e938b587742348ff003fe635e31f6113030d9e8f30d4e22ddc89144cd", + "5cf6572465f3cfe67cb431e588cc4246f79f245001188dda15c4d85b4801321d", + "6dc0bf6130add1e40bfe6311951a1a2fe602c8796d234a86b6aff4985ee27feb", + "8a3b3dbcc386361f76841bbcdead026afa2a6c568e8e97c156fb849b6a465dc4", + "320daeb9ccea9f34ee629136fbd55b07f222b9b3ce1f94987a7e20fed17206e5", + "2f49861cd2e01b26c5a84ce4cee2438327761afee50a48c89d6d7b49018b7fcf", + "e69c8cf076852b36e45e5be83218a3accf29094e4e9895b36aa56f9a10d284b4", + "a8566fa6c2fec3f79f2611c1db9dbb25b856fedc9b3856008397b0f0178778b1", + "b9200a2a81d114b953e3563d9856822753ba4b7703943b0df920352e4ad300d4", + "31e319dcc4c3ec0f089e371d39a790b7e967896917c26926aac256954cbc52f9", + "fbd3031cef8c0c8330f83153e9753313edfb899a170c20531d06ea9a45217856", + "e563c3665885e9254e14440b42e3c2bf0eae51180571c3e2a2f8c99bc8a8fd83", + "07288958348e0cc68880849818d806834e84b6541a622d1e952ed0a85efa2d98", + "1fcd0987e064964e8796c3e297d6b8d6477ccca8dd37175bf3d0935161baf2b6", + "df3f905163ab3d87e9867b18dec20fdc7301e1312761470fe7827f3c7a3db96a", + "9de32735e458f45ebdcbb307321f693781b3f005cc375570ba5eb9bdad3cafb6", + "a7fbca77957e7ae4b135dc04daced4b319bfe100bdb612b0d6c8fe330ca876c8", + "9b2e6addb9131b2aa8d9d01bb4a3684a9980fa59ef8eb6ddbf9c8804b041f4ce", + "a9481433389c5a2acdcab26e600e500c3709249dc14c01b7025de2719fdb68e2", + "09dcb87aaca6537622c5c8125f39df290bd8343cd89b4c497046992634c147da", + "537102a29f389fca9edf41239c62b332fe92a71527588182e893d46b5f2ae7cc", + "c004039ccbe8bfa4625372d3c9754eb16e814a2a370c7e8bdf6021aeb9633963", + "b1186a4e8caea3d1d0c06f4c00ee9fa386ae3f3b8a8dd92d1adae8aaf6462b1f", + "88f4aca157bee6712046cf5efee0dec950c966ac32e68c4e51328411f48ddc42", + "1e2688c6aff721fde920d60a838b5462dd4447e251581e7ca97a4fd7564be63a", + "e716d47275fcec2ca23863880f9c21ff53fe74a87c30c23943e2b1b468352566", + "ea0ee24c027c79c5a9fd9b47ae556e6aaddfe2b97e00fd855e91aee45cd064fd", + "b7adfa74a8d03dc963202c231fa527ea6f9168afa6b6f0c8cc4fe0ec0f7baacd", + "3807d25698eed478f6802982ad5bdbee3083bfa07253305f96e76bea6d9f5d7c", + "9d04e40577c3ac00b0f66f4cbba694f01ddb0e56ca303160a8acce6b649675d3", + "2c47612ef589a6f2b82ce23672f581aecb065640201df8d02a185e2de15635d0", + "adf24254e91a47236003315e0b8c25fa5f86d5bbe5d35bd083a488e2b89a8c7a", + "cca693215a23955ee155ff41fa4027523b05dd2e1afade1878ab171daed11078", + "8692daae3b0608de881e01c86d8c1e45cba1f32996869ce8e6cf3b8f793a41f0", + "398f15b36141ed45f0724b8e1060d3a67f28e4d99ae2c064d4d0fa8e8c720bf7", + "d570289b50e3e8f57e70ed955b7b87992ffdb16f3bc4908a9a9425d0210a14d8", + "5ae446d92abc358e0077b932494baf9983a31a27f548268390a2094e98985222", + "16f423a7835fe2e2cfb869494e5bd43b25660452f62b377b69884a28b5a323c8", + "88331376b5428f50a91b84af5edf56e438e59ebd2a83cd840c20f0ff966fbfff", + "af9d12d94e4c3ae112f5cabeba7f6eba5105e1ba2c4f71f4fd90cb0f3f671ebc", + "a3debcaa4d4b6d950822b0fb612a436278d23b305128972e9737aac0c703097f", + "03753f72efd39aec75a3840a4acef16830ff9d25f41ddbc6d13c1dbf5cdb71ef", + "17f1ae9c3247e556215929dcb383ba18aaac2ae9b6f3784ea7d738beff46cce5", + "a0c43101293a4183f74a575b10384341e9a3e5106c21e3882dd42ba792f6beb6", + "dccf54f2eb6e441304cbc4ceb659b936cd36fa75c70609bf31a4287b40d0df8a", + "e9c9204781d9bf34edcd52eb222ae469f310c37b45e9d6b8e86017a81b54a889", + "9f84dc532aef6865b04161a61bcaaa0371ccb22db5e92366f314e67d46b020ef", + "e282255e252194f1fe94ea32e063020ad0ed7471ba3d959d71841251f545ec50", + "8ee17d97bc69a166939da3a10815d207843861c949a64795a276b3b7e6d76cee", + "51792977284ef54f288a9cb144ed0b0dab129146c5206e7c56c3e5b94058b1ba", + "63c8831e07ce87543117ab11e4fda4ac7d10dcb2dbe0ed9a69e0035c3f010ded", + "bc87d9b6d2dcd9d2ac6e86aacc394853f349214ba910d63588924232088d7a53", + "ad9b8e4446a0326cea32e64346803c15595cdaa7eb48660b981db6ef3eafd168", + "2e724245446b50e8e94adb0c7d6dac108324c7063646a07080c670e0842291f1", + "938628f96d7e1bf5fada66b1681c8cf3731463451a66a9a792e099ca09f59910", + "58484171dbe293667f81b76c6f0d493928a92543bf46076cc62de3d0d1ce02cf", + "0863180da779d530ff5184908bc1ddd85910a5c643d1317fe0f3796f47eba414", + "27273069347a23a4e5fee10346ab67813f99b8e09c3d31d4b1c4384fa6bed588", + "f872f706976de4ebf97429946b83d7c1e0480496f26442d4253440a0f462ba82", + "949c7243483d52b85e6ef058fe8814b5fd6b307a529fd34c07daa8eae5759770", + "7a1001461506e3b5a4de3c3de74a1a838ab6b6d1530042ff9bd802e6bda90e91" + ].into_iter().map(Txid::from_str).map(Result::unwrap).collect() + }; + + static ref TESTNET_SKIPPED_TXS: HashSet = { + [ + "6c8eaa51142b069305e07aea0ee247a467f1fc7d1378428005b8034e069e8cda", + "9dbd8e2f2f331e0d314968baedaf49065e45fbd0316c95e8e1e02548437a76f4", + "4f5f620484e5359eafb9fe799b568a59ca75202bfe6aa1546c54820c99889437", + "b22fca714f70f68e45bef2babc5d7f0a1c81fd892ddfd347edb43cc80fc31db2", + "08e864402d066cb7e0f65f587c2450c03f982f04b06728f455df0930a1f062dc", + "a20faada80dc028a821cf5c3889071442fe7d5f1487e669e907512f060da86e7", + "97cb98df139d608a79e277fe6edd4439867be032aa6463c13cab18de12632db3", + "b90b85e05f58d73d339be4563bcbf6c9fd041a1ddf175538135fae96937453df", + "af264140932d5ec7049639ae1d8d3453a307d6ed3272a7d12ead4d09370cca9c", + "a65a1c2e37461c01e19f99675e448eb23b64d0dd1234547a30b45ebf5e9e7ba2", + "dfd54905c6a323d92fd923a2fcc592e005fa8488e2f3774cb585f7fd33068433", + "f5166734c9fe55d46325c2406454daddc698ff9d76491dff8e6553027d53878f", + "e30ad732c4ec80946a9aef649e3eef521bf408f29539c0ff254b67272cc908ea" + ].into_iter().map(Txid::from_str).map(Result::unwrap).collect() + }; +} + +pub fn is_skipped_tx(tx: &Txid) -> bool { + MAINNET_SKIPPED_TXS.contains(tx) || TESTNET_SKIPPED_TXS.contains(tx) +} diff --git a/lib/ain-dftx/src/types/common.rs b/lib/ain-dftx/src/types/common.rs index a6360a2a18c..32acc7d7302 100644 --- a/lib/ain-dftx/src/types/common.rs +++ b/lib/ain-dftx/src/types/common.rs @@ -1,3 +1,5 @@ +use std::slice::Iter; + use bitcoin::{ consensus::{Decodable, Encodable}, io::{self, ErrorKind}, @@ -169,3 +171,9 @@ impl Decodable for VarInt { Ok(VarInt(n)) } } + +impl CompactVec { + pub fn iter(&self) -> Iter<'_, T> { + self.0.iter() + } +} diff --git a/lib/ain-dftx/src/types/mod.rs b/lib/ain-dftx/src/types/mod.rs index fa7a10c4b5f..69d09799eba 100644 --- a/lib/ain-dftx/src/types/mod.rs +++ b/lib/ain-dftx/src/types/mod.rs @@ -24,6 +24,10 @@ use self::{ }; use crate::custom_tx::CustomTxType; +pub type Token = String; +pub type Currency = String; +pub type Weightage = u8; + #[derive(Debug, PartialEq, Eq)] pub enum DfTx { AccountToAccount(AccountToAccount), @@ -50,7 +54,7 @@ pub enum DfTx { PaybackLoanV2(PaybackLoanV2), PlaceAuctionBid(PlaceAuctionBid), PoolAddLiquidity(PoolAddLiquidity), - PoolCreatePair(PoolCreatePair), + CreatePoolPair(CreatePoolPair), PoolRemoveLiquidity(PoolRemoveLiquidity), PoolSwap(PoolSwap), PoolUpdatePair(PoolUpdatePair), @@ -130,7 +134,7 @@ impl DfTx { DfTx::UpdateMasternode(_) => b'm', DfTx::UpdateTokenAny(_) => b'n', DfTx::AppointOracle(_) => b'o', - DfTx::PoolCreatePair(_) => b'p', + DfTx::CreatePoolPair(_) => b'p', DfTx::PoolRemoveLiquidity(_) => b'r', DfTx::PoolSwap(_) => b's', DfTx::UpdateOracle(_) => b't', @@ -181,7 +185,7 @@ impl Decodable for DfTx { DfTx::CreateMasternode(CreateMasternode::consensus_decode(r)?) } CustomTxType::CreatePoolPair => { - DfTx::PoolCreatePair(PoolCreatePair::consensus_decode(r)?) + DfTx::CreatePoolPair(CreatePoolPair::consensus_decode(r)?) } CustomTxType::CreateToken => DfTx::CreateToken(CreateToken::consensus_decode(r)?), CustomTxType::CreateVoc => DfTx::CreateVoc(CreateProposal::consensus_decode(r)?), @@ -287,7 +291,7 @@ impl Encodable for DfTx { DfTx::BurnToken(data) => data.consensus_encode(w), DfTx::CloseVault(data) => data.consensus_encode(w), DfTx::CreateMasternode(data) => data.consensus_encode(w), - DfTx::PoolCreatePair(data) => data.consensus_encode(w), + DfTx::CreatePoolPair(data) => data.consensus_encode(w), DfTx::CreateCfp(data) => data.consensus_encode(w), DfTx::CreateToken(data) => data.consensus_encode(w), DfTx::CreateVoc(data) => data.consensus_encode(w), diff --git a/lib/ain-dftx/src/types/oracles.rs b/lib/ain-dftx/src/types/oracles.rs index 79480367afe..45e6cd0116c 100644 --- a/lib/ain-dftx/src/types/oracles.rs +++ b/lib/ain-dftx/src/types/oracles.rs @@ -4,6 +4,7 @@ use bitcoin::{hash_types::Txid, io, ScriptBuf}; use super::{ common::CompactVec, price::{CurrencyPair, TokenPrice}, + Weightage, }; #[derive(ConsensusEncoding, Debug, PartialEq, Eq)] @@ -21,7 +22,7 @@ pub struct RemoveOracle { #[derive(ConsensusEncoding, Debug, PartialEq, Eq)] pub struct AppointOracle { pub script: ScriptBuf, - pub weightage: u8, + pub weightage: Weightage, pub price_feeds: CompactVec, } @@ -29,6 +30,6 @@ pub struct AppointOracle { pub struct UpdateOracle { pub oracle_id: Txid, pub script: ScriptBuf, - pub weightage: u8, + pub weightage: Weightage, pub price_feeds: CompactVec, } diff --git a/lib/ain-dftx/src/types/pool.rs b/lib/ain-dftx/src/types/pool.rs index 6997f0945e4..d9550fef9be 100644 --- a/lib/ain-dftx/src/types/pool.rs +++ b/lib/ain-dftx/src/types/pool.rs @@ -1,3 +1,5 @@ +use std::fmt; + use ain_macros::ConsensusEncoding; use bitcoin::{io, ScriptBuf}; @@ -47,7 +49,7 @@ pub struct PoolRemoveLiquidity { } #[derive(ConsensusEncoding, Debug, PartialEq, Eq)] -pub struct PoolCreatePair { +pub struct CreatePoolPair { pub token_a: VarInt, pub token_b: VarInt, pub commission: i64, @@ -65,3 +67,8 @@ pub struct PoolUpdatePair { pub owner_address: ScriptBuf, pub custom_rewards: Maybe>, } +impl fmt::Display for PoolId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "PoolId: {:?}", self.id) + } +} diff --git a/lib/ain-evm/Cargo.toml b/lib/ain-evm/Cargo.toml index 591b611f5b3..2a323605960 100644 --- a/lib/ain-evm/Cargo.toml +++ b/lib/ain-evm/Cargo.toml @@ -58,5 +58,4 @@ substrate-bn.workspace = true rocksdb = { workspace = true, default-features = false } [dev-dependencies] -tempdir.workspace = true once_cell.workspace = true diff --git a/lib/ain-evm/src/services.rs b/lib/ain-evm/src/services.rs index a3cdb26633a..88291d631a6 100644 --- a/lib/ain-evm/src/services.rs +++ b/lib/ain-evm/src/services.rs @@ -3,16 +3,13 @@ use std::{ atomic::{AtomicBool, Ordering}, Arc, }, - thread::{self, JoinHandle}, + time::Duration, }; -use anyhow::{format_err, Result}; +use anyhow::Result; use jsonrpsee_server::ServerHandle; -use parking_lot::Mutex; -use tokio::{ - runtime::{Builder, Handle as AsyncHandle}, - sync::mpsc::{self, Sender}, -}; +use parking_lot::{Mutex, RwLock}; +use tokio::runtime::Runtime; use crate::{evm::EVMServices, storage::traits::FlushableStorage}; @@ -36,11 +33,10 @@ lazy_static::lazy_static! { } pub struct Services { - pub tokio_runtime: AsyncHandle, - pub tokio_runtime_channel_tx: Sender<()>, - pub tokio_worker: Mutex>>, + tokio_runtime: RwLock>, pub json_rpc_handles: Mutex>, pub websocket_handles: Mutex>, + pub ocean_handle: Mutex>>, pub evm: Arc, } @@ -52,56 +48,46 @@ impl Default for Services { impl Services { pub fn new() -> Self { - let r = Builder::new_multi_thread().enable_all().build().unwrap(); - let (tx, mut rx) = mpsc::channel(1); + let runtime = Runtime::new().expect("Failed to create Tokio runtime"); Services { - tokio_runtime_channel_tx: tx, - tokio_runtime: r.handle().clone(), - tokio_worker: Mutex::new(Some(thread::spawn(move || { - log::info!("Starting tokio waiter"); - r.block_on(async move { - rx.recv().await; - }); - }))), + tokio_runtime: RwLock::new(Some(runtime)), json_rpc_handles: Mutex::new(vec![]), websocket_handles: Mutex::new(vec![]), + ocean_handle: Mutex::new(None), evm: Arc::new(EVMServices::new().expect("Error initializing handlers")), } } + pub fn runtime(&self) -> impl std::ops::Deref + '_ { + parking_lot::RwLockReadGuard::map(self.tokio_runtime.read(), |opt| { + opt.as_ref().expect("Runtime has been shut down") + }) + } pub fn stop_network(&self) -> Result<()> { - { - let json_rpc_handles = self.json_rpc_handles.lock(); - for server in &*json_rpc_handles { + for handles in [&self.json_rpc_handles, &self.websocket_handles] { + let mut handles = handles.lock(); + for server in handles.drain(..) { server.stop()?; } } - { - let websocket_handles = self.websocket_handles.lock(); - for server in &*websocket_handles { - server.stop()?; - } + if let Some(handle) = self.ocean_handle.lock().take() { + handle.abort(); } + Ok(()) } pub fn stop(&self) -> Result<()> { - let _ = self.tokio_runtime_channel_tx.blocking_send(()); - - self.tokio_worker - .lock() - .take() - .ok_or(format_err!( - "failed to stop tokio runtime, early termination" - ))? - .join() - .map_err(|_| format_err!("failed to stop tokio runtime"))?; - // Persist EVM State to disk self.evm.core.flush()?; self.evm.storage.flush()?; + + if let Some(runtime) = self.tokio_runtime.write().take() { + runtime.shutdown_timeout(Duration::from_secs(10)); + } + Ok(()) } } diff --git a/lib/ain-evm/src/storage/block_store.rs b/lib/ain-evm/src/storage/block_store.rs index 86fb4f709fc..91f40e342c0 100644 --- a/lib/ain-evm/src/storage/block_store.rs +++ b/lib/ain-evm/src/storage/block_store.rs @@ -1,5 +1,3 @@ -use ain_db::version::{DBVersionControl, Migration}; -use ain_db::{Column, ColumnName, DBError, LedgerColumn, Rocks, TypedColumn}; use anyhow::format_err; use ethereum::{BlockAny, TransactionV2}; use ethereum_types::{H160, H256, U256}; @@ -9,6 +7,11 @@ use std::{ time::Instant, }; +use ain_db::{ + version::{DBVersionControl, Migration}, + Column, ColumnName, DBError, LedgerColumn, Result as DBResult, Rocks, TypedColumn, +}; + use super::{ migration::MigrationV1, traits::{BlockStorage, FlushableStorage, ReceiptStorage, Rollback, TransactionStorage}, @@ -22,7 +25,6 @@ use crate::{ }, EVMError, Result, }; -use ain_db::Result as DBResult; #[derive(Debug, Clone)] pub struct BlockStore(Arc); @@ -31,7 +33,13 @@ impl BlockStore { pub fn new(path: &Path) -> Result { let path = path.join("indexes"); fs::create_dir_all(&path)?; - let backend = Arc::new(Rocks::open(&path, &COLUMN_NAMES, None)?); + + let cf_with_opts = COLUMN_NAMES + .into_iter() + .zip(std::iter::repeat(None)) + .collect::>(); + + let backend = Arc::new(Rocks::open(&path, cf_with_opts, None)?); let store = Self(backend); store.startup()?; Ok(store) diff --git a/lib/ain-evm/src/storage/migration.rs b/lib/ain-evm/src/storage/migration.rs index aeba029607b..1f88c92c251 100644 --- a/lib/ain-evm/src/storage/migration.rs +++ b/lib/ain-evm/src/storage/migration.rs @@ -1,10 +1,9 @@ -use ain_db::{version::Migration, DBError}; +use ain_db::{version::Migration, DBError, Result as DBResult}; use anyhow::format_err; use rayon::prelude::*; use super::{block_store::BlockStore, db::columns}; use crate::Result; -use ain_db::Result as DBResult; /// Migration for version 1. /// Context: diff --git a/lib/ain-grpc/Cargo.toml b/lib/ain-grpc/Cargo.toml index 9f149398aa7..04534b59b79 100644 --- a/lib/ain-grpc/Cargo.toml +++ b/lib/ain-grpc/Cargo.toml @@ -6,7 +6,8 @@ build = "build.rs" [dependencies] ain-evm = { path = "../ain-evm" } -ain-cpp-imports = { path = "../ain-cpp-imports" } +ain-cpp-imports.workspace = true +ain-ocean = { path = "../ain-ocean" } cxx.workspace = true env_logger.workspace = true evm = { workspace = true, default-features = false, features = ["with-serde"] } @@ -36,6 +37,8 @@ tower-http.workspace = true tower.workspace = true hyper.workspace = true rand.workspace = true +axum.workspace = true +defichain-rpc.workspace = true [build-dependencies] heck.workspace = true diff --git a/lib/ain-grpc/build.rs b/lib/ain-grpc/build.rs index 2594bf2e396..878131905a3 100644 --- a/lib/ain-grpc/build.rs +++ b/lib/ain-grpc/build.rs @@ -266,6 +266,9 @@ fn compile_proto_and_generate_services( // There's no way to compile protos using custom generator in tonic, // so we're left with creating a prost config and using that for codegen. let mut config = Config::new(); + println!("protoc_include : {:?}", protoc_include); + println!("dir : {:?}", dir); + config.protoc_arg("--experimental_allow_proto3_optional"); config.out_dir(out_dir); config.service_generator(Box::new(gen)); config diff --git a/lib/ain-grpc/src/lib.rs b/lib/ain-grpc/src/lib.rs index 215347feac2..8adec6475c0 100644 --- a/lib/ain-grpc/src/lib.rs +++ b/lib/ain-grpc/src/lib.rs @@ -18,6 +18,7 @@ mod trace; mod transaction; mod transaction_request; mod utils; +use defichain_rpc::{Auth, Client}; #[cfg(test)] mod tests; @@ -29,6 +30,7 @@ use std::{ }; use ain_evm::services::{IS_SERVICES_INIT_CALL, SERVICES}; +use ain_ocean::SERVICES as OCEAN_SERVICES; use anyhow::{format_err, Result}; use hyper::{header::HeaderValue, Method}; use jsonrpsee::core::server::rpc_module::Methods; @@ -78,7 +80,7 @@ pub fn init_network_json_rpc_service(addr: String) -> Result<()> { let addr = addr.as_str().parse::()?; let max_connections = ain_cpp_imports::get_max_connections(); let max_response_size = ain_cpp_imports::get_max_response_byte_size(); - let runtime = &SERVICES; + let services = &SERVICES; let middleware = if !ain_cpp_imports::get_cors_allowed_origin().is_empty() { let origin = ain_cpp_imports::get_cors_allowed_origin(); @@ -94,13 +96,12 @@ pub fn init_network_json_rpc_service(addr: String) -> Result<()> { tower::ServiceBuilder::new().layer(CorsLayer::new()) }; - let handle = runtime.tokio_runtime.clone(); - let server = runtime.tokio_runtime.block_on( + let server = services.runtime().block_on( ServerBuilder::default() .set_middleware(middleware) .max_connections(max_connections) .max_response_body_size(max_response_size) - .custom_tokio_runtime(handle) + .custom_tokio_runtime(services.runtime().handle().clone()) .build(addr), )?; @@ -109,15 +110,55 @@ pub fn init_network_json_rpc_service(addr: String) -> Result<()> { ain_cpp_imports::print_port_usage(2, local_addr.port()); let mut methods: Methods = Methods::new(); - methods.merge(MetachainRPCModule::new(Arc::clone(&runtime.evm)).into_rpc())?; - methods.merge(MetachainDebugRPCModule::new(Arc::clone(&runtime.evm)).into_rpc())?; - methods.merge(MetachainNetRPCModule::new(Arc::clone(&runtime.evm)).into_rpc())?; - methods.merge(MetachainWeb3RPCModule::new(Arc::clone(&runtime.evm)).into_rpc())?; + methods.merge(MetachainRPCModule::new(Arc::clone(&services.evm)).into_rpc())?; + methods.merge(MetachainDebugRPCModule::new(Arc::clone(&services.evm)).into_rpc())?; + methods.merge(MetachainNetRPCModule::new(Arc::clone(&services.evm)).into_rpc())?; + methods.merge(MetachainWeb3RPCModule::new(Arc::clone(&services.evm)).into_rpc())?; - runtime.json_rpc_handles.lock().push(server.start(methods)?); + services + .json_rpc_handles + .lock() + .push(server.start(methods)?); Ok(()) } +pub async fn init_ocean_server(addr: String) -> Result<()> { + let addr = addr.parse::()?; + let services = &SERVICES; + + let listener = tokio::net::TcpListener::bind(addr).await?; + + let local_addr = listener.local_addr()?; + info!("Starting ocean server at {}", local_addr); + ain_cpp_imports::print_port_usage(4, local_addr.port()); + + let (user, pass) = ain_cpp_imports::get_rpc_auth().map_err(|e| format_err!("{e}"))?; + let client = Arc::new( + Client::new( + &format!("http://127.0.0.1:{}", ain_cpp_imports::get_rpc_port()), + Auth::UserPass(user, pass), + ) + .await?, + ); + let network = ain_cpp_imports::get_network(); + + let ocean_router = ain_ocean::ocean_router(&OCEAN_SERVICES, client, network).await?; + + let server_handle = services.runtime().spawn(async move { + if let Err(e) = axum::serve(listener, ocean_router).await { + log::error!("Server encountered an error: {}", e); + } + }); + + *services.ocean_handle.lock() = Some(server_handle); + Ok(()) +} + +pub fn init_network_rest_ocean(addr: String) -> Result<()> { + info!("Starting REST Ocean server at {}", addr); + SERVICES.runtime().block_on(init_ocean_server(addr)) +} + pub fn init_network_grpc_service(_addr: String) -> Result<()> { // log::info!("Starting gRPC server at {}", addr); // Commented out for now as nothing to serve @@ -134,14 +175,13 @@ pub fn init_network_subscriptions_service(addr: String) -> Result<()> { let addr = addr.as_str().parse::()?; let max_connections = ain_cpp_imports::get_max_connections(); let max_response_size = ain_cpp_imports::get_max_response_byte_size(); - let runtime = &SERVICES; + let services = &SERVICES; - let handle = runtime.tokio_runtime.clone(); - let server = runtime.tokio_runtime.block_on( + let server = services.runtime().block_on( ServerBuilder::default() .max_subscriptions_per_connection(max_connections) .max_response_body_size(max_response_size) - .custom_tokio_runtime(handle) + .custom_tokio_runtime(services.runtime().handle().clone()) .set_id_provider(MetachainSubIdProvider) .build(addr), )?; @@ -152,11 +192,14 @@ pub fn init_network_subscriptions_service(addr: String) -> Result<()> { let mut methods: Methods = Methods::new(); methods.merge( - MetachainPubSubModule::new(Arc::clone(&runtime.evm), runtime.tokio_runtime.clone()) - .into_rpc(), + MetachainPubSubModule::new( + Arc::clone(&services.evm), + services.runtime().handle().clone(), + ) + .into_rpc(), )?; - runtime + services .websocket_handles .lock() .push(server.start(methods)?); diff --git a/lib/ain-macros/src/lib.rs b/lib/ain-macros/src/lib.rs index 510624d3b01..958c3a28c13 100644 --- a/lib/ain-macros/src/lib.rs +++ b/lib/ain-macros/src/lib.rs @@ -61,6 +61,60 @@ pub fn ffi_fallible(_attr: TokenStream, item: TokenStream) -> TokenStream { TokenStream::from(expanded) } +#[proc_macro_attribute] +pub fn ocean_endpoint(_attr: TokenStream, item: TokenStream) -> TokenStream { + let input = parse_macro_input!(item as ItemFn); + let inputs = &input.sig.inputs; + + let name = &input.sig.ident; + + let output = &input.sig.output; + let inner_type = match output { + ReturnType::Type(_, type_box) => match &**type_box { + Type::Path(type_path) => type_path.path.segments.last().and_then(|pair| { + if let syn::PathArguments::AngleBracketed(angle_bracketed_args) = &pair.arguments { + angle_bracketed_args.args.first() + } else { + None + } + }), + _ => None, + }, + _ => None, + }; + + let param_names: Vec<_> = inputs + .iter() + .filter_map(|arg| { + if let syn::FnArg::Typed(pat_type) = arg { + Some(&pat_type.pat) + } else { + None + } + }) + .collect(); + + let expanded = quote! { + pub async fn #name(axum::extract::OriginalUri(uri): axum::extract::OriginalUri, #inputs) -> std::result::Result, ApiError> { + #input + + match #name(#(#param_names),*).await { + Err(e) => { + log::debug!("[ocean_endpoint] error : {e:?}"); + let (status, message) = e.into_code_and_message(); + Err(ApiError::new( + status, + message, + uri.to_string() + )) + }, + Ok(v) => Ok(axum::Json(v)) + } + } + }; + + TokenStream::from(expanded) +} #[proc_macro_derive(ConsensusEncoding)] pub fn consensus_encoding_derive(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeriveInput); diff --git a/lib/ain-ocean/Cargo.toml b/lib/ain-ocean/Cargo.toml new file mode 100644 index 00000000000..b781c645935 --- /dev/null +++ b/lib/ain-ocean/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "ain-ocean" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[profile.test] +debug = true + +[profile.dev] +opt-level = 0 +debug = true + +[dependencies] +ain-cpp-imports.workspace = true +ain-db.workspace = true +ain-macros.workspace = true + +axum.workspace = true +hyper.workspace = true +log.workspace = true +serde.workspace = true +serde_with.workspace = true +hex.workspace = true +ain-dftx.workspace = true +bitcoin = { workspace = true, features = ["serde"] } +tokio = { workspace = true, features = ["full"] } +serde_json = "1.0" +futures = "0.3.29" +jsonrpsee.workspace = true +rocksdb.workspace = true +cached.workspace = true +lazy_static.workspace = true +bincode.workspace = true +defichain-rpc.workspace = true +jsonrpc-async = "2.0.2" +serde_urlencoded = "0.7" +rust_decimal = { version = "1.34", features = ["serde", "serde-float", "serde-with-str"] } +rust_decimal_macros = "1.34" +num_cpus.workspace = true +petgraph = { version = "0.6.4", features = ["serde-1"] } +parking_lot.workspace = true +indexmap.workspace = true +sha2.workspace = true +snafu = { version = "0.8.4" } + +[dev-dependencies] +tempfile = "3.8.1" diff --git a/lib/ain-ocean/README.md b/lib/ain-ocean/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/lib/ain-ocean/src/api/address.rs b/lib/ain-ocean/src/api/address.rs new file mode 100644 index 00000000000..d80bd5dd6f3 --- /dev/null +++ b/lib/ain-ocean/src/api/address.rs @@ -0,0 +1,567 @@ +use std::{collections::BTreeMap, str::FromStr, sync::Arc}; + +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use bitcoin::{hashes::Hash, hex::DisplayHex, BlockHash, Txid}; +use defichain_rpc::{ + json::{account::AccountHistory, vault::ListVaultOptions}, + AccountRPC, RpcApi, +}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use serde_with::skip_serializing_none; + +use super::{ + cache::get_token_cached, + common::{address_to_hid, parse_display_symbol}, + loan::{get_all_vaults, VaultResponse}, + path::Path, + query::{PaginationQuery, Query}, + response::{ApiPagedResponse, Response}, + AppContext, +}; +use crate::{ + error::ApiError, + model::{ + BlockContext, ScriptActivity, ScriptActivityTypeHex, ScriptAggregation, ScriptUnspent, + }, + storage::{RepositoryOps, SortOrder}, + Error, Result, +}; + +#[derive(Deserialize)] +struct Address { + address: String, +} + +#[derive(Deserialize)] +struct History { + address: String, + height: u32, + txno: u32, +} + +#[derive(Debug, Serialize)] +struct AddressHistory { + owner: String, + txid: Option, + txn: Option, + r#type: String, + amounts: Vec, + block: AddressHistoryBlock, +} + +impl From for AddressHistory { + fn from(history: AccountHistory) -> Self { + Self { + owner: history.owner, + txid: history.txid, + txn: history.txn, + r#type: history.r#type, + amounts: history.amounts, + block: AddressHistoryBlock { + height: history.block_height, + hash: history.block_hash, + time: history.block_time, + }, + } + } +} + +#[skip_serializing_none] +#[derive(Debug, Serialize)] +struct AddressHistoryBlock { + height: u64, + hash: Option, + time: Option, +} + +#[ocean_endpoint] +async fn get_account_history( + Path(History { + address, + height, + txno, + }): Path, + Extension(ctx): Extension>, +) -> Result> { + let res = ctx + .client + .get_account_history(&address, height, txno) + .await + .map_err(|_| Error::Other { + msg: "Record not found".to_string(), + })?; + + Ok(Response::new(res.into())) +} + +// NOTE(canonbrother): deprecated its never being used +// due to unfriendly complicated pagination handling internally +// async fn list_account_history(Path(Address { address }): Path
) -> String { +// format!("List account history for address {}", address) +// } + +#[derive(Debug, Serialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ScriptAggregationResponse { + pub id: String, + pub hid: String, + pub block: BlockContext, + pub script: ScriptAggregationScriptResponse, + pub statistic: ScriptAggregationStatisticResponse, + pub amount: ScriptAggregationAmountResponse, +} + +impl From for ScriptAggregationResponse { + fn from(v: ScriptAggregation) -> Self { + Self { + id: format!( + "{}{}", + hex::encode(v.block.height.to_be_bytes()), + hex::encode(v.hid) + ), + hid: hex::encode(v.hid), + block: v.block, + script: ScriptAggregationScriptResponse { + r#type: v.script.r#type, + hex: v.script.hex.as_hex().to_string(), + }, + statistic: ScriptAggregationStatisticResponse { + tx_count: v.statistic.tx_count, + tx_in_count: v.statistic.tx_in_count, + tx_out_count: v.statistic.tx_out_count, + }, + amount: ScriptAggregationAmountResponse { + tx_in: format!("{:.8}", v.amount.tx_in), + tx_out: format!("{:.8}", v.amount.tx_out), + unspent: format!("{:.8}", v.amount.unspent), + }, + } + } +} + +#[derive(Debug, Serialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ScriptAggregationScriptResponse { + pub r#type: String, + pub hex: String, +} + +#[derive(Debug, Serialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ScriptAggregationStatisticResponse { + pub tx_count: i32, + pub tx_in_count: i32, + pub tx_out_count: i32, +} + +#[derive(Debug, Serialize, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ScriptAggregationAmountResponse { + pub tx_in: String, + pub tx_out: String, + pub unspent: String, +} + +fn get_latest_aggregation( + ctx: &Arc, + hid: [u8; 32], +) -> Result> { + let latest = ctx + .services + .script_aggregation + .by_id + .list(Some((hid, u32::MAX)), SortOrder::Descending)? + .take(1) + .take_while(|item| match item { + Ok(((v, _), _)) => v == &hid, + _ => true, + }) + .map(|item| { + let (_, v) = item?; + let res = v.into(); + Ok(res) + }) + .collect::>>()?; + + Ok(latest.first().cloned()) +} + +#[ocean_endpoint] +async fn get_balance( + Path(Address { address }): Path
, + Extension(ctx): Extension>, +) -> Result> { + let hid = address_to_hid(&address, ctx.network)?; + let Some(aggregation) = get_latest_aggregation(&ctx, hid)? else { + return Ok(Response::new("0.00000000".to_string())); + }; + + Ok(Response::new(aggregation.amount.unspent)) +} + +#[ocean_endpoint] +async fn get_aggregation( + Path(Address { address }): Path
, + Extension(ctx): Extension>, +) -> Result>> { + let hid = address_to_hid(&address, ctx.network)?; + let aggregation = get_latest_aggregation(&ctx, hid)?; + Ok(Response::new(aggregation)) +} + +#[ocean_endpoint] +async fn list_vaults( + Path(Address { address }): Path
, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let options = ListVaultOptions { + verbose: Some(true), + owner_address: Some(address), + loan_scheme_id: None, + state: None, + }; + let vaults = get_all_vaults(&ctx, options, &query).await?; + + Ok(ApiPagedResponse::of( + vaults, + query.size, + |each| match each { + VaultResponse::Active(vault) => vault.vault_id.clone(), + VaultResponse::Liquidated(vault) => vault.vault_id.clone(), + }, + )) +} + +#[skip_serializing_none] +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ScriptActivityResponse { + pub id: String, + pub hid: String, + pub r#type: String, + pub type_hex: String, + pub txid: Txid, + pub block: BlockContext, + pub script: ScriptActivityScriptResponse, + pub vin: Option, + pub vout: Option, + pub value: String, + pub token_id: Option, +} + +impl From for ScriptActivityResponse { + fn from(v: ScriptActivity) -> Self { + let id = match v.type_hex { + ScriptActivityTypeHex::Vin => { + // TODO put vin instead ScriptActivityType + let vin = v.vin.as_ref().unwrap(); + format!( + "{}{}{}{}", + hex::encode(v.block.height.to_be_bytes()), + ScriptActivityTypeHex::Vin, + vin.txid, + hex::encode(vin.n.to_be_bytes()) + ) + } + ScriptActivityTypeHex::Vout => { + let vout = v.vout.as_ref().unwrap(); + format!( + "{}{}{}{}", + hex::encode(v.block.height.to_be_bytes()), + ScriptActivityTypeHex::Vout, + v.txid, + hex::encode(vout.n.to_be_bytes()) + ) + } + }; + Self { + id, + hid: hex::encode(v.hid), + r#type: v.r#type.to_string(), + type_hex: v.type_hex.to_string(), + txid: v.txid, + block: v.block, + script: ScriptActivityScriptResponse { + r#type: v.script.r#type, + hex: v.script.hex.to_lower_hex_string(), + }, + vin: v.vin.map(|vin| ScriptActivityVinVoutResponse { + txid: vin.txid, + n: vin.n, + }), + vout: v.vout.map(|vout| ScriptActivityVinVoutResponse { + txid: vout.txid, + n: vout.n, + }), + value: format!("{:.8}", v.value), + token_id: v.token_id, + } + } +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ScriptActivityScriptResponse { + pub r#type: String, + pub hex: String, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ScriptActivityVinVoutResponse { + pub txid: Txid, + pub n: usize, +} + +#[ocean_endpoint] +async fn list_transactions( + Path(Address { address }): Path
, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let hid = address_to_hid(&address, ctx.network)?; + let next = query + .next + .as_ref() + .map(|next| { + let height = &next[0..8]; + let vin_vout_type = &next[8..8 + 2]; + let txid = &next[8 + 2..64 + 8 + 2]; + let n = &next[64 + 8 + 2..]; + + let decoded_height = hex::decode(height)?; + let height = decoded_height.try_into().map_err(|_| Error::Other { + msg: format!("Invalid height: {}", height), + })?; + let vin_vout_type = match vin_vout_type { + "00" => ScriptActivityTypeHex::Vin, + _ => ScriptActivityTypeHex::Vout, + }; + let txid = Txid::from_str(txid)?; + let n = n.parse::()?; + Ok::<([u8; 4], ScriptActivityTypeHex, Txid, usize), Error>(( + height, + vin_vout_type, + txid, + n, + )) + }) + .transpose()? + .unwrap_or(( + [u8::MAX, u8::MAX, u8::MAX, u8::MAX], + ScriptActivityTypeHex::Vout, + Txid::from_byte_array([0xffu8; 32]), + usize::MAX, + )); + + let res = ctx + .services + .script_activity + .by_id + .list( + Some((hid, next.0, next.1, next.2, next.3)), + SortOrder::Descending, + )? + .skip(usize::from(query.next.is_some())) + .take(query.size) + .take_while(|item| match item { + Ok((k, _)) => k.0 == hid, + _ => true, + }) + .map(|item| { + let (_, v) = item?; + Ok(v.into()) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(res, query.size, |item| { + item.id.clone() + })) +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ScriptUnspentResponse { + pub id: String, + pub hid: String, + pub sort: String, + pub block: BlockContext, + pub script: ScriptUnspentScriptResponse, + pub vout: ScriptUnspentVoutResponse, +} + +impl From for ScriptUnspentResponse { + fn from(v: ScriptUnspent) -> Self { + Self { + id: format!("{}{}", v.id.0, hex::encode(v.id.1)), + hid: hex::encode(v.hid), + sort: format!( + "{}{}{}", + hex::encode(v.block.height.to_be_bytes()), + v.vout.txid, + hex::encode(v.vout.n.to_be_bytes()) + ), + block: v.block, + script: ScriptUnspentScriptResponse { + r#type: v.script.r#type, + hex: v.script.hex.to_lower_hex_string(), + }, + vout: ScriptUnspentVoutResponse { + txid: v.vout.txid, + n: v.vout.n, + value: format!("{:.8}", v.vout.value), + token_id: v.vout.token_id, + }, + } + } +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ScriptUnspentScriptResponse { + pub r#type: String, + pub hex: String, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ScriptUnspentVoutResponse { + pub txid: Txid, + pub n: usize, + pub value: String, + pub token_id: Option, +} + +#[ocean_endpoint] +async fn list_transaction_unspent( + Path(Address { address }): Path
, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let hid = address_to_hid(&address, ctx.network)?; + + let next = query + .next + .as_ref() + .map(|next| { + let height = &next[0..8]; + let txid = &next[8..64 + 8]; + let n = &next[64 + 8..]; + + let decoded_height = hex::decode(height)?; + let height = decoded_height.try_into().map_err(|_| Error::Other { + msg: format!("Invalid height: {}", height), + })?; + let txid = Txid::from_str(txid)?; + let n = n.parse::()?; + Ok::<([u8; 4], Txid, usize), Error>((height, txid, n)) + }) + .transpose()? + .unwrap_or(( + [0u8, 0u8, 0u8, 0u8], + Txid::from_byte_array([0x00u8; 32]), + usize::default(), + )); + + let res = ctx + .services + .script_unspent + .by_id + .list(Some((hid, next.0, next.1, next.2)), SortOrder::Ascending)? + .skip(usize::from(query.next.is_some())) + .take(query.size) + .take_while(|item| match item { + Ok((k, _)) => k.0 == hid, + _ => true, + }) + .map(|item| { + let (_, v) = item?; + let res = v.into(); + Ok(res) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(res, query.size, |item| { + item.sort.clone() + })) +} + +// Tokens owned by an address +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct AddressToken { + id: String, + amount: String, + symbol: String, + display_symbol: String, + symbol_key: String, + name: String, + #[serde(rename = "isDAT")] + is_dat: bool, + #[serde(rename = "isLPS")] + is_lps: bool, + is_loan_token: bool, +} + +#[ocean_endpoint] +async fn list_tokens( + Path(Address { address }): Path
, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let account: BTreeMap = ctx.client.call( + "getaccount", + &[ + address.into(), + json!({ + "limit": query.size, + "start": query.next.as_ref().and_then(|n| n.parse::().ok()).unwrap_or_default(), + "including_start": query.next.is_none() + }), + true.into(), + ], + ).await?; + + let mut vec = Vec::new(); + for (k, v) in account { + let Some((id, info)) = get_token_cached(&ctx, &k).await? else { + continue; + }; + + let address_token = AddressToken { + id, + amount: format!("{v:.8}"), + display_symbol: parse_display_symbol(&info), + symbol: info.symbol, + symbol_key: info.symbol_key, + name: info.name, + is_dat: info.is_dat, + is_lps: info.is_lps, + is_loan_token: info.is_loan_token, + }; + vec.push(address_token); + } + + Ok(ApiPagedResponse::of(vec, query.size, |item| { + item.id.clone() + })) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/:address/history/:height/:txno", get(get_account_history)) + // .route("/history", get(list_account_history)) + .route("/:address/balance", get(get_balance)) + .route("/:address/aggregation", get(get_aggregation)) + .route("/:address/tokens", get(list_tokens)) + .route("/:address/vaults", get(list_vaults)) + .route("/:address/transactions", get(list_transactions)) + .route( + "/:address/transactions/unspent", + get(list_transaction_unspent), + ) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/block.rs b/lib/ain-ocean/src/api/block.rs new file mode 100644 index 00000000000..c887bfd014b --- /dev/null +++ b/lib/ain-ocean/src/api/block.rs @@ -0,0 +1,177 @@ +use std::sync::Arc; + +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use bitcoin::{BlockHash, Txid}; +use rust_decimal::Decimal; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_with::serde_as; + +use super::{ + path::Path, + query::{PaginationQuery, Query}, + response::{ApiPagedResponse, Response}, + AppContext, +}; +use crate::{ + api::common::Paginate, + error::{ApiError, Error}, + model::{Block, BlockContext, Transaction}, + storage::{ + InitialKeyProvider, RepositoryOps, SecondaryIndex, SortOrder, TransactionByBlockHash, + }, + Result, +}; + +pub enum HashOrHeight { + Height(u32), + Id(BlockHash), +} + +impl<'de> Deserialize<'de> for HashOrHeight { + fn deserialize(deserializer: D) -> std::result::Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + if let Ok(height) = s.parse::() { + Ok(Self::Height(height)) + } else if let Ok(id) = s.parse::() { + Ok(Self::Id(id)) + } else { + Err(serde::de::Error::custom("Error parsing HashOrHeight")) + } + } +} + +#[serde_as] +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct TransactionResponse { + pub id: Txid, + pub txid: Txid, + pub order: usize, // tx order + pub block: BlockContext, + pub hash: String, + pub version: u32, + pub size: u64, + pub v_size: u64, + pub weight: u64, + #[serde(with = "rust_decimal::serde::str")] + pub total_vout_value: Decimal, + pub lock_time: u64, + pub vin_count: usize, + pub vout_count: usize, +} + +impl From for TransactionResponse { + fn from(v: Transaction) -> Self { + Self { + id: v.id, + txid: v.id, + order: v.order, + block: v.block, + hash: v.hash, + version: v.version, + size: v.size, + v_size: v.v_size, + weight: v.weight, + total_vout_value: v.total_vout_value, + lock_time: v.lock_time, + vin_count: v.vin_count, + vout_count: v.vout_count, + } + } +} + +#[ocean_endpoint] +async fn list_blocks( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let next = query + .next + .as_ref() + .map(|q| { + let height = q.parse::()?; + Ok::(height) + }) + .transpose()?; + + let repository = &ctx.services.block.by_height; + let blocks = repository + .list(next, SortOrder::Descending)? + .paginate(&query) + .map(|e| repository.retrieve_primary_value(e)) + .collect::>>()?; + + Ok(ApiPagedResponse::of(blocks, query.size, |block| { + block.clone().height + })) +} + +#[ocean_endpoint] +async fn get_block( + Path(id): Path, + Extension(ctx): Extension>, +) -> Result>> { + let block = if let Some(id) = match id { + HashOrHeight::Height(n) => ctx.services.block.by_height.get(&n)?, + HashOrHeight::Id(id) => Some(id), + } { + ctx.services.block.by_id.get(&id)? + } else { + None + }; + + Ok(Response::new(block)) +} + +#[ocean_endpoint] +async fn get_transactions( + Path(hash): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let repository = &ctx.services.transaction.by_block_hash; + + let next = query + .next + .as_ref() + .map_or(Ok(TransactionByBlockHash::initial_key(hash)), |q| { + let height: usize = q.parse::()?; + Ok::<(BlockHash, usize), Error>((hash, height)) + })?; + + let txs = repository + .list(Some(next), SortOrder::Ascending)? + .paginate(&query) + .take_while(|item| match item { + Ok(((h, _), _)) => h == &hash, + _ => true, + }) + .map(|el| repository.retrieve_primary_value(el)) + .map(|v| v.map(TransactionResponse::from)) + .collect::>>()?; + + Ok(ApiPagedResponse::of(txs, query.size, |tx| tx.order)) +} + +// Get highest indexed block +#[ocean_endpoint] +async fn get_highest( + Extension(ctx): Extension>, +) -> Result>> { + let block = ctx.services.block.by_height.get_highest()?; + + Ok(Response::new(block)) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/", get(list_blocks)) + .route("/highest", get(get_highest)) + .route("/:id", get(get_block)) + .route("/:hash/transactions", get(get_transactions)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/cache.rs b/lib/ain-ocean/src/api/cache.rs new file mode 100644 index 00000000000..c026ad05335 --- /dev/null +++ b/lib/ain-ocean/src/api/cache.rs @@ -0,0 +1,174 @@ +use std::{collections::HashMap, sync::Arc}; + +use cached::proc_macro::cached; +use defichain_rpc::{ + json::{ + loan::LoanSchemeResult, + poolpair::{PoolPairInfo, PoolPairPagination, PoolPairsResult}, + token::{TokenInfo, TokenPagination, TokenResult}, + GetNetworkInfoResult, + }, + jsonrpc_async::error::{Error as JsonRpcError, RpcError}, + Error, LoanRPC, MasternodeRPC, PoolPairRPC, RpcApi, TokenRPC, +}; + +use super::AppContext; +use crate::Result; + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("gettoken{symbol}") }"# +)] +pub async fn get_token_cached( + ctx: &Arc, + symbol: &str, +) -> Result> { + let res = ctx.client.get_token(symbol).await; + + let is_err = res.as_ref().is_err_and(|err| { + // allow `Token not found` err + err.to_string() + != Error::JsonRpc(JsonRpcError::Rpc(RpcError { + code: -5, + message: "Token not found".to_string(), + data: None, + })) + .to_string() + }); + if is_err { + return Err(res.unwrap_err().into()); + }; + + let res = res.ok(); + + let token = if let Some(res) = res { + res.0.into_iter().next() + } else { + None + }; + + Ok(token) +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("listtokens") }"# +)] +pub async fn list_tokens_cached(ctx: &Arc) -> Result { + let tokens = ctx + .client + .list_tokens( + Some(TokenPagination { + start: 0, + including_start: true, + limit: 1000, + }), + Some(true), + ) + .await?; + + Ok(tokens) +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("getpoolpair{id}") }"# +)] +pub async fn get_pool_pair_cached( + ctx: &Arc, + id: String, +) -> Result> { + let res = ctx.client.get_pool_pair(id, Some(true)).await; + + let is_err = res.as_ref().is_err_and(|err| { + // allow `Pool not found` err + err.to_string() + != Error::JsonRpc(JsonRpcError::Rpc(RpcError { + code: -5, + message: "Pool not found".to_string(), + data: None, + })) + .to_string() + }); + if is_err { + return Err(res.unwrap_err().into()); + }; + + let res = res.ok(); + + let pool_pair = if let Some(res) = res { + res.0.into_iter().next() + } else { + None + }; + + Ok(pool_pair) +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("listpoolpairs{limit:?}{next:?}") }"# +)] +pub async fn list_pool_pairs_cached( + ctx: &Arc, + limit: Option, + next: Option, +) -> Result { + let pool_pairs = ctx + .client + .list_pool_pairs( + Some(PoolPairPagination { + start: next.unwrap_or_default(), + including_start: next.is_none(), + limit: limit.unwrap_or(1000), + }), + Some(true), + ) + .await?; + Ok(pool_pairs) +} + +#[cached( + result = true, + time = 600, + key = "String", + sync_writes = true, + convert = r#"{ format!("getgov{id}") }"# +)] +pub async fn get_gov_cached( + ctx: &Arc, + id: String, +) -> Result> { + let gov = ctx.client.get_gov(id).await?; + Ok(gov) +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("getloanscheme{id}") }"# +)] +pub async fn get_loan_scheme_cached(ctx: &Arc, id: String) -> Result { + let loan_scheme = ctx.client.get_loan_scheme(id).await?; + Ok(loan_scheme) +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("getnetworkinfo") }"# +)] +pub async fn get_network_info_cached(ctx: &Arc) -> Result { + let info = ctx.client.get_network_info().await?; + Ok(info) +} diff --git a/lib/ain-ocean/src/api/common.rs b/lib/ain-ocean/src/api/common.rs new file mode 100644 index 00000000000..e6912d918e2 --- /dev/null +++ b/lib/ain-ocean/src/api/common.rs @@ -0,0 +1,279 @@ +use std::str::FromStr; + +use ain_dftx::{Currency, Token}; +use bitcoin::{Address, ScriptBuf}; +use defichain_rpc::json::token::TokenInfo; +use rust_decimal::Decimal; +use rust_decimal_macros::dec; +use snafu::OptionExt; + +use super::query::PaginationQuery; +use crate::{ + error::{ + InvalidAmountSnafu, InvalidFixedIntervalPriceSnafu, InvalidPoolPairSymbolSnafu, + InvalidTokenCurrencySnafu, + }, + hex_encoder::as_sha256, + network::Network, + Result, +}; + +#[must_use] +pub fn parse_display_symbol(token_info: &TokenInfo) -> String { + if token_info.is_lps { + let tokens: Vec<&str> = token_info.symbol.split('-').collect(); + if tokens.len() == 2 { + return format!( + "{}-{}", + parse_dat_symbol(tokens[0]), + parse_dat_symbol(tokens[1]) + ); + } + } else if token_info.is_dat { + return parse_dat_symbol(&token_info.symbol); + } + + token_info.symbol.clone() +} + +#[must_use] +pub fn parse_dat_symbol(symbol: &str) -> String { + let special_symbols = ["DUSD", "DFI", "csETH"]; + + if special_symbols.contains(&symbol) { + symbol.to_string() + } else { + format!("d{symbol}") + } +} + +pub fn parse_pool_pair_symbol(item: &str) -> Result<(String, String)> { + let mut parts = item.split('-'); + let a = parts + .next() + .context(InvalidPoolPairSymbolSnafu { item })? + .to_string(); + let b = parts + .next() + .context(InvalidPoolPairSymbolSnafu { item })? + .to_string(); + + Ok((a, b)) +} + +pub fn parse_token_currency(item: &str) -> Result<(Token, Currency)> { + let mut parts = item.split('-'); + let token = parts + .next() + .context(InvalidTokenCurrencySnafu { item })? + .to_string(); + let currency = parts + .next() + .context(InvalidTokenCurrencySnafu { item })? + .to_string(); + + Ok((token, currency)) +} + +pub fn parse_fixed_interval_price(item: &str) -> Result<(Token, Currency)> { + let mut parts = item.split('/'); + let token = parts + .next() + .context(InvalidFixedIntervalPriceSnafu { item })? + .to_string(); + let currency = parts + .next() + .context(InvalidFixedIntervalPriceSnafu { item })? + .to_string(); + + Ok((token, currency)) +} + +pub fn parse_amount(item: &str) -> Result<(String, String)> { + let mut parts = item.split('@'); + let amount = parts + .next() + .context(InvalidAmountSnafu { item })? + .to_string(); + let symbol = parts + .next() + .context(InvalidAmountSnafu { item })? + .to_string(); + + Ok((amount, symbol)) +} + +pub fn parse_query_height_txno(item: &str) -> Result<(u32, usize)> { + let mut parts = item.split('-'); + let height = parts.next().context(InvalidAmountSnafu { item })?; + let txno = parts.next().context(InvalidAmountSnafu { item })?; + + let height = height.parse::()?; + let txno = txno.parse::()?; + + Ok((height, txno)) +} + +#[must_use] +pub fn format_number(v: Decimal) -> String { + if v == dec!(0) { + "0".to_string() + } else { + format!("{v:.8}") + } +} + +pub fn from_script(script: &ScriptBuf, network: Network) -> Result { + Ok(Address::from_script(script, network.into())?.to_string()) +} + +#[test] +fn test_from_script() { + // OP_0 { type: 'OP_0', code: 0 }, + // OP_PUSHDATA { + // type: 'OP_PUSHDATA', + // hex: '05768f2d17f0016b5720bb49859cbb065041716f' + // } + let script = ScriptBuf::from_hex("001405768f2d17f0016b5720bb49859cbb065041716f").unwrap(); + let addr = from_script(&script, Network::Mainnet).unwrap(); + assert_eq!( + addr, + "df1qq4mg7tgh7qqkk4eqhdyct89mqegyzut0jjz8rg".to_string() + ) +} + +pub fn to_script(address: &str, network: Network) -> Result { + let addr = Address::from_str(address)?.require_network(network.into())?; + Ok(ScriptBuf::from(addr)) +} + +pub fn address_to_hid(address: &str, network: Network) -> Result<[u8; 32]> { + let script = to_script(address, network)?; + let bytes = script.to_bytes(); + Ok(as_sha256(&bytes)) +} + +/// Finds the balance of a specified token symbol within a list of token strings. +/// +/// This function iterates through a vector of token strings, where each string +/// represents an amount followed by a token symbol in the format "amount@symbol". +/// It searches for the specified token symbol and returns the corresponding balance. +/// If the token symbol is not found or if there are any parsing errors, it returns 0. +/// +/// # Arguments +/// +/// * `tokens` - A vector of strings representing token amounts with their symbols. +/// * `symbol` - A reference to a string representing the token symbol to find the balance for. +/// +/// # Examples +/// +/// ``` +/// use rust_decimal::Decimal; +/// use rust_decimal_macros::dec; +/// use ain_ocean::api::{common::find_token_balance}; +/// +/// let tokens = vec![ +/// "557.35080849@DFI".to_string(), +/// "9.98000000@BTC".to_string(), +/// "421.46947098@DUSD".to_string() +/// ]; +/// let balance = find_token_balance(tokens, "DFI"); +/// assert_eq!(balance, dec!(557.35080849)); +/// ``` +/// +/// # Returns +/// +/// The balance of the specified token symbol if found; otherwise, returns 0. +#[must_use] +pub fn find_token_balance(tokens: Vec, symbol: &str) -> Decimal { + tokens + .iter() + .find_map(|t| { + t.ends_with(symbol) + .then(|| t.split('@').next().and_then(|v| v.parse::().ok())) + .flatten() + }) + .unwrap_or_default() +} + +/// Provides a simulated pagination mechanism for iterators where native pagination is not available. +/// +/// This trait extends any Rust iterator to include a `paginate` method, allowing for pseudo-pagination +/// based on custom logic. It's should only be used to query defid list* RPC that don't implement native pagination +/// +/// # Warning +/// +/// This method should be used cautiously, as it involves retrieving all data from the data source +/// before applying pagination. This can lead to significant performance and resource usage issues, +/// especially with large datasets. It is recommended to use this approach only defid does not accept +/// any pagination parameter. +/// +/// # Parameters +/// +/// - `query`: A reference to `PaginationQuery` +/// - `skip_while`: A closure that determines the starting point of data to consider, mimicking the +/// 'start' parameter in traditional pagination. Once an item fails this condition, pagination starts. +/// +/// # Example +/// +/// This example is illustrative only and should not be executed directly as it involves API calls. +/// +/// ```rust,ignore +/// use ain_ocean::api::common::Paginate; +/// +/// let query = PaginationQuery { +/// next: Some(1) +/// limit: 3 +/// }; +/// +/// let skip_while = |el: &LoanSchemeResult| match &query.next { +/// None => false, +/// Some(v) => v != &el.id, +/// }; +/// let res: Vec<_> = ctx +/// .client +/// .list_loan_schemes() +/// .await? +/// .into_iter() +/// .fake_paginate(&query, skip_while) +/// .collect(); +/// +/// assert!(res.len() <= query.size, "The result should not contain more items than the specified limit"); +/// assert!(res[0].id > query.next.unwrap(), "The result should start after the requested start id"); +/// ``` +pub trait Paginate<'a, T>: Iterator + Sized { + fn fake_paginate( + self, + query: &PaginationQuery, + skip_while: F, + ) -> Box + 'a> + where + F: FnMut(&T) -> bool + 'a; + fn paginate(self, query: &PaginationQuery) -> Box + 'a>; +} + +impl<'a, T, I> Paginate<'a, T> for I +where + I: Iterator + 'a, +{ + fn fake_paginate( + self, + query: &PaginationQuery, + skip_while: F, + ) -> Box + 'a> + where + F: FnMut(&T) -> bool + 'a, + { + Box::new( + self.skip_while(skip_while) + .skip(usize::from(query.next.is_some())) + .take(query.size), + ) + } + fn paginate(self, query: &PaginationQuery) -> Box + 'a> { + Box::new( + self.skip(usize::from(query.next.is_some())) + .take(query.size), + ) + } +} diff --git a/lib/ain-ocean/src/api/debug.rs b/lib/ain-ocean/src/api/debug.rs new file mode 100644 index 00000000000..9123d8a53ba --- /dev/null +++ b/lib/ain-ocean/src/api/debug.rs @@ -0,0 +1,17 @@ +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use std::sync::Arc; + +use super::AppContext; +use crate::{error::ApiError, Result}; + +#[ocean_endpoint] +async fn dump_tables(Extension(ctx): Extension>) -> Result<()> { + ctx.services.store.dump_table_sizes() +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/dumptables", get(dump_tables)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/fee.rs b/lib/ain-ocean/src/api/fee.rs new file mode 100644 index 00000000000..024c890acec --- /dev/null +++ b/lib/ain-ocean/src/api/fee.rs @@ -0,0 +1,39 @@ +use std::sync::Arc; + +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use defichain_rpc::{json::mining::SmartFeeEstimation, RpcApi}; +use serde::Deserialize; + +use super::{query::Query, response::Response, AppContext}; +use crate::{error::ApiError, Result}; + +#[derive(Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct EstimateQuery { + confirmation_target: i32, +} + +#[ocean_endpoint] +async fn estimate_fee( + Query(EstimateQuery { + confirmation_target, + }): Query, + Extension(ctx): Extension>, +) -> Result> { + let estimation: SmartFeeEstimation = ctx + .client + .call( + "estimatesmartfee", + &[confirmation_target.into(), "CONSERVATIVE".into()], + ) + .await?; + + Ok(Response::new(estimation.feerate.unwrap_or(0.000_050_00))) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/estimate", get(estimate_fee)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/governance.rs b/lib/ain-ocean/src/api/governance.rs new file mode 100644 index 00000000000..0529b69c8fc --- /dev/null +++ b/lib/ain-ocean/src/api/governance.rs @@ -0,0 +1,118 @@ +use std::sync::Arc; + +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use bitcoin::Txid; +use defichain_rpc::{json::governance::*, GovernanceRPC}; +use serde::Deserialize; + +use super::{ + path::Path, + query::{PaginationQuery, Query}, + response::{ApiPagedResponse, Response}, + AppContext, +}; +use crate::{error::ApiError, model::ApiProposalInfo, Result}; + +#[derive(Deserialize, Default)] +pub struct GovernanceQuery { + #[serde(flatten)] + pub pagination: PaginationQuery, + pub status: Option, + pub r#type: Option, + pub cycle: Option, + pub all: Option, + pub masternode: Option, +} + +#[ocean_endpoint] +async fn list_gov_proposals( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let size = match query.all { + Some(true) => 0, + _ => query.pagination.size, + }; + + let opts = ListProposalsOptions { + pagination: Some(ListProposalsPagination { + limit: Some(size), + start: query.pagination.next.clone(), + ..ListProposalsPagination::default() + }), + status: query.status, + r#type: query.r#type, + cycle: query.cycle, + }; + let proposals = ctx.client.list_gov_proposals(Some(opts)).await?; + let mut proposals_with_string_amount: Vec = + proposals.into_iter().map(ApiProposalInfo::from).collect(); + proposals_with_string_amount.sort_by(|a, b| a.creation_height.cmp(&b.creation_height)); + Ok(ApiPagedResponse::of( + proposals_with_string_amount, + size, + |proposal| proposal.proposal_id.to_string(), + )) +} + +#[ocean_endpoint] +async fn get_gov_proposal( + Extension(ctx): Extension>, + Path(proposal_id): Path, +) -> Result> { + let txid: Txid = proposal_id.parse()?; + + let proposal = ctx.client.get_gov_proposal(txid).await?; + Ok(Response::new(proposal.into())) +} + +#[ocean_endpoint] +async fn list_gov_proposal_votes( + Path(proposal_id): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let proposal_id: Txid = proposal_id.parse()?; + + let size = match query.all { + Some(true) => 0, + _ => query.pagination.size, + }; + + let start = query + .pagination + .next + .map(|v| v.parse::()) + .transpose()?; + + let opts = ListGovProposalVotesOptions { + proposal_id: Some(proposal_id), + masternode: query.masternode, + pagination: Some(ListGovProposalVotesPagination { + limit: Some(size), + start, + ..ListGovProposalVotesPagination::default() + }), + cycle: query.cycle, + aggregate: None, + valid: None, + }; + let votes = ctx.client.list_gov_proposal_votes(Some(opts)).await?; + let len = votes.len(); + Ok(ApiPagedResponse::of(votes, size, |_| { + if let Some(next) = start { + next + len + } else { + len - 1 + } + })) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/proposals", get(list_gov_proposals)) + .route("/proposals/:id", get(get_gov_proposal)) + .route("/proposals/:id/votes", get(list_gov_proposal_votes)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/loan.rs b/lib/ain-ocean/src/api/loan.rs new file mode 100644 index 00000000000..e58281bac7d --- /dev/null +++ b/lib/ain-ocean/src/api/loan.rs @@ -0,0 +1,771 @@ +use std::{str::FromStr, sync::Arc}; + +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use bitcoin::{hashes::Hash, Txid}; +use defichain_rpc::{ + defichain_rpc_json::{ + loan::{CollateralTokenDetail, LoanSchemeResult}, + token::TokenInfo, + vault::{VaultActive, VaultLiquidationBatch}, + }, + json::vault::{ + AuctionPagination, AuctionPaginationStart, ListVaultOptions, VaultLiquidation, + VaultPagination, VaultResult, VaultState, + }, + LoanRPC, VaultRPC, +}; +use futures::future::try_join_all; +use log::trace; +use serde::{Serialize, Serializer}; +use snafu::OptionExt; + +use super::{ + cache::{get_loan_scheme_cached, get_token_cached}, + common::{ + from_script, parse_amount, parse_display_symbol, parse_fixed_interval_price, + parse_query_height_txno, Paginate, + }, + path::Path, + query::{PaginationQuery, Query}, + response::{ApiPagedResponse, Response}, + tokens::TokenData, + AppContext, +}; +use crate::{ + error::{ApiError, Error, NotFoundKind, NotFoundSnafu}, + model::{OraclePriceActive, VaultAuctionBatchHistory}, + storage::{RepositoryOps, SecondaryIndex, SortOrder}, + Result, +}; + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct LoanSchemeData { + id: String, + min_col_ratio: String, + interest_rate: String, +} + +impl From for LoanSchemeData { + fn from(value: LoanSchemeResult) -> Self { + Self { + id: value.id, + min_col_ratio: format!("{}", value.mincolratio), + interest_rate: format!("{}", value.interestrate), + } + } +} + +#[ocean_endpoint] +async fn list_scheme( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let skip_while = |el: &LoanSchemeResult| match &query.next { + None => false, + Some(v) => v != &el.id, + }; + + let res = ctx + .client + .list_loan_schemes() + .await? + .into_iter() + .fake_paginate(&query, skip_while) + .map(Into::into) + .collect(); + Ok(ApiPagedResponse::of(res, query.size, |loan_scheme| { + loan_scheme.id.clone() + })) +} + +#[ocean_endpoint] +async fn get_scheme( + Path(scheme_id): Path, + Extension(ctx): Extension>, +) -> Result> { + let scheme = ctx + .client + .get_loan_scheme(scheme_id) + .await + .map_err(|_| Error::NotFound { + kind: NotFoundKind::Scheme, + })?; + Ok(Response::new(scheme.into())) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct CollateralToken { + token_id: String, + token: TokenData, + factor: String, + activate_after_block: u32, + fixed_interval_price_id: String, + active_price: Option, +} + +impl CollateralToken { + fn from_with_id( + id: String, + detail: CollateralTokenDetail, + info: TokenInfo, + active_price: Option, + ) -> Self { + Self { + token_id: detail.token_id, + factor: format!("{}", detail.factor), + activate_after_block: 0, + fixed_interval_price_id: detail.fixed_interval_price_id, + token: TokenData::from_with_id(id, info), + active_price, + } + } +} + +async fn get_active_price( + ctx: &Arc, + fixed_interval_price_id: String, +) -> Result> { + let (token, currency) = parse_fixed_interval_price(&fixed_interval_price_id)?; + let repo = &ctx.services.oracle_price_active; + let keys = repo + .by_key + .list(Some((token, currency)), SortOrder::Descending)? + .take(1) + .flatten() + .collect::>(); + + if keys.is_empty() { + return Ok(None); + } + + let Some((_, id)) = keys.first() else { + return Ok(None); + }; + + let price = repo.by_id.get(id)?; + + let Some(price) = price else { + return Ok(None); + }; + + Ok(Some(price)) +} + +#[ocean_endpoint] +async fn list_collateral_token( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let skip_while = |el: &CollateralTokenDetail| match &query.next { + None => false, + Some(v) => v != &el.token_id, + }; + + let tokens = ctx.client.list_collateral_tokens().await?; + + let fut = tokens + .into_iter() + .fake_paginate(&query, skip_while) + .map(|v| async { + let (id, info) = get_token_cached(&ctx, &v.token_id) + .await? + .context(NotFoundSnafu { + kind: NotFoundKind::Token { + id: v.token_id.clone(), + }, + })?; + let active_price = get_active_price(&ctx, v.fixed_interval_price_id.clone()).await?; + Ok::(CollateralToken::from_with_id(id, v, info, active_price)) + }) + .collect::>(); + + let res = try_join_all(fut).await?; + + Ok(ApiPagedResponse::of(res, query.size, |loan_scheme| { + loan_scheme.token_id.clone() + })) +} + +#[ocean_endpoint] +async fn get_collateral_token( + Path(token_id): Path, + Extension(ctx): Extension>, +) -> Result> { + let collateral_token = ctx + .client + .get_collateral_token(token_id) + .await + .map_err(|_| Error::NotFound { + kind: NotFoundKind::CollateralToken, + })?; + let (id, info) = get_token_cached(&ctx, &collateral_token.token_id) + .await? + .context(NotFoundSnafu { + kind: NotFoundKind::Token { + id: collateral_token.token_id.clone(), + }, + })?; + let active_price = + get_active_price(&ctx, collateral_token.fixed_interval_price_id.clone()).await?; + + Ok(Response::new(CollateralToken::from_with_id( + id, + collateral_token, + info, + active_price, + ))) +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct LoanToken { + token_id: String, + token: TokenData, + interest: String, + fixed_interval_price_id: String, + active_price: Option, +} + +#[ocean_endpoint] +async fn list_loan_token( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let tokens = ctx.client.list_loan_tokens().await?; + + struct FlattenToken { + id: String, + data: TokenInfo, + fixed_interval_price_id: String, + interest: f64, + } + + let res = tokens + .into_iter() + .filter_map(|el| { + el.token + .0 + .into_iter() + .next() // Should always get a Hashmap with single entry here. + .map(|(id, data)| FlattenToken { + id, + data, + fixed_interval_price_id: el.fixed_interval_price_id, + interest: el.interest, + }) + }) + .fake_paginate(&query, |token| match &query.next { + None => false, + Some(v) => v != &token.data.creation_tx, + }) + .map(|flatten_token| { + let fixed_interval_price_id = flatten_token.fixed_interval_price_id.clone(); + let (token, currency) = parse_fixed_interval_price(&fixed_interval_price_id)?; + + let repo = &ctx.services.oracle_price_active; + let key = repo.by_key.get(&(token, currency))?; + let active_price = if let Some(key) = key { + repo.by_id.get(&key)? + } else { + None + }; + + let token = LoanToken { + token_id: flatten_token.data.creation_tx.clone(), + token: TokenData::from_with_id(flatten_token.id, flatten_token.data), + interest: format!("{:.2}", flatten_token.interest), + fixed_interval_price_id, + active_price, + }; + Ok::(token) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(res, query.size, |loan_scheme| { + loan_scheme.token_id.clone() + })) +} + +#[ocean_endpoint] +async fn get_loan_token( + Path(token_id): Path, + Extension(ctx): Extension>, +) -> Result> { + let loan_token_result = ctx + .client + .get_loan_token(token_id.clone()) + .await + .map_err(|_| Error::NotFound { + kind: NotFoundKind::LoanToken, + })?; + let Some(token) = loan_token_result + .token + .0 + .into_iter() + .next() + .map(|(id, info)| { + let fixed_interval_price_id = loan_token_result.fixed_interval_price_id.clone(); + let (token, currency) = parse_fixed_interval_price(&fixed_interval_price_id)?; + + let repo = &ctx.services.oracle_price_active; + let key = repo.by_key.get(&(token, currency))?; + let active_price = if let Some(key) = key { + repo.by_id.get(&key)? + } else { + None + }; + + Ok::(LoanToken { + token_id: info.creation_tx.clone(), + token: TokenData::from_with_id(id, info), + interest: format!("{:.2}", loan_token_result.interest), + fixed_interval_price_id, + active_price, + }) + }) + .transpose()? + else { + return Err(Error::NotFound { + kind: NotFoundKind::LoanToken, + }); + }; + + Ok(Response::new(token)) +} + +pub async fn get_all_vaults( + ctx: &Arc, + options: ListVaultOptions, + query: &PaginationQuery, +) -> Result> { + let pagination = VaultPagination { + start: query.next.clone(), + including_start: None, + limit: if query.size > 30 { + Some(30) + } else { + Some(query.size) + }, + }; + + let vaults = ctx.client.list_vaults(options, pagination).await?; + let mut list = Vec::new(); + for vault in vaults { + let each = match vault { + VaultResult::VaultActive(vault) => { + VaultResponse::Active(map_vault_active(ctx, vault).await?) + } + VaultResult::VaultLiquidation(vault) => { + VaultResponse::Liquidated(map_vault_liquidation(ctx, vault).await?) + } + }; + list.push(each); + } + + Ok(list) +} + +#[ocean_endpoint] +async fn list_vaults( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let options = ListVaultOptions { + verbose: Some(true), + owner_address: None, + loan_scheme_id: None, + state: None, + }; + let list = get_all_vaults(&ctx, options, &query).await?; + + Ok(ApiPagedResponse::of(list, query.size, |each| match each { + VaultResponse::Active(vault) => vault.vault_id.clone(), + VaultResponse::Liquidated(vault) => vault.vault_id.clone(), + })) +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +struct LoanScheme { + id: String, + min_col_ratio: String, + interest_rate: String, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct VaultActiveResponse { + pub vault_id: String, + loan_scheme: LoanScheme, + owner_address: String, + #[serde(default = "VaultState::Active")] + state: VaultState, + informative_ratio: String, + collateral_ratio: String, + collateral_value: String, + loan_value: String, + interest_value: String, + collateral_amounts: Vec, + loan_amounts: Vec, + interest_amounts: Vec, +} + +async fn map_loan_scheme(ctx: &Arc, id: String) -> Result { + let loan_scheme = get_loan_scheme_cached(ctx, id).await?; + Ok(LoanScheme { + id: loan_scheme.id, + min_col_ratio: loan_scheme.mincolratio.to_string(), + interest_rate: loan_scheme.interestrate.to_string(), + }) +} + +async fn map_vault_active( + ctx: &Arc, + vault: VaultActive, +) -> Result { + Ok(VaultActiveResponse { + vault_id: vault.vault_id, + loan_scheme: map_loan_scheme(ctx, vault.loan_scheme_id).await?, + owner_address: vault.owner_address, + state: VaultState::Active, + informative_ratio: vault.informative_ratio.to_string(), + collateral_ratio: vault.collateral_ratio.to_string(), + collateral_value: vault.collateral_value.to_string(), + loan_value: vault.loan_value.to_string(), + interest_value: vault.interest_value.to_string(), + collateral_amounts: map_token_amounts(ctx, vault.collateral_amounts).await?, + loan_amounts: map_token_amounts(ctx, vault.loan_amounts).await?, + interest_amounts: map_token_amounts(ctx, vault.interest_amounts).await?, + }) +} + +async fn map_vault_liquidation( + ctx: &Arc, + vault: VaultLiquidation, +) -> Result { + let loan_scheme = get_loan_scheme_cached(ctx, vault.loan_scheme_id).await?; + Ok(VaultLiquidatedResponse { + batches: map_liquidation_batches(ctx, &vault.vault_id, vault.batches).await?, + vault_id: vault.vault_id, + loan_scheme, + owner_address: vault.owner_address, + state: vault.state, + liquidation_height: vault.liquidation_height, + liquidation_penalty: vault.liquidation_penalty, + batch_count: vault.batch_count, + }) +} + +pub enum VaultResponse { + Active(VaultActiveResponse), + Liquidated(VaultLiquidatedResponse), +} + +impl Serialize for VaultResponse { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: Serializer, + { + match self { + Self::Active(v) => v.serialize(serializer), + Self::Liquidated(v) => v.serialize(serializer), + } + } +} + +#[ocean_endpoint] +async fn get_vault( + Path(vault_id): Path, + Extension(ctx): Extension>, +) -> Result> { + let vault = ctx + .client + .get_vault(vault_id, Some(false)) + .await + .map_err(|_| Error::NotFound { + kind: NotFoundKind::Vault, + })?; + let res = match vault { + VaultResult::VaultActive(vault) => { + VaultResponse::Active(map_vault_active(&ctx, vault).await?) + } + VaultResult::VaultLiquidation(vault) => { + VaultResponse::Liquidated(map_vault_liquidation(&ctx, vault).await?) + } + }; + + Ok(Response::new(res)) +} + +#[ocean_endpoint] +async fn list_vault_auction_history( + Path((vault_id, height, batch_index)): Path<(Txid, u32, u32)>, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + trace!( + "Auction history for vault id {}, height {}, batch index {}", + vault_id, + height, + batch_index + ); + let next = query + .next + .map(|q| { + let (height, txno) = parse_query_height_txno(&q)?; + Ok::<(u32, usize), Error>((height, txno)) + }) + .transpose()? + .unwrap_or_default(); + + let size = if query.size > 0 { query.size } else { 20 }; + + let auctions = ctx + .services + .auction + .by_height + .list( + Some((vault_id, batch_index, next.0, next.1)), + SortOrder::Descending, + )? + .take(size) + .take_while(|item| match item { + Ok((k, _)) => k.0 == vault_id && k.1 == batch_index, + _ => true, + }) + .map(|item| { + let (_, id) = item?; + + let auction = ctx + .services + .auction + .by_id + .get(&id)? + .context(NotFoundSnafu { + kind: NotFoundKind::Auction, + })?; + + Ok(auction) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(auctions, query.size, |auction| { + auction.sort.to_string() + })) +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct VaultLiquidatedResponse { + pub vault_id: String, + pub loan_scheme: LoanSchemeResult, + pub owner_address: String, + #[serde(default = "VaultState::in_liquidation")] + pub state: VaultState, + pub liquidation_height: u64, + pub liquidation_penalty: f64, + pub batch_count: usize, + pub batches: Vec, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct HighestBidResponse { + pub owner: String, + pub amount: Option, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct VaultLiquidatedBatchResponse { + index: u32, + collaterals: Vec, + loan: Option, + highest_bid: Option, + froms: Vec, +} + +#[derive(Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct VaultTokenAmountResponse { + pub id: String, + pub amount: String, + pub symbol: String, + pub display_symbol: String, + pub symbol_key: String, + pub name: String, + pub active_price: Option, +} + +async fn map_liquidation_batches( + ctx: &Arc, + vault_id: &str, + batches: Vec, +) -> Result> { + let repo = &ctx.services.auction; + let mut vec = Vec::new(); + for batch in batches { + let highest_bid = if let Some(bid) = batch.highest_bid { + let amount = map_token_amounts(ctx, vec![bid.amount]).await?; + let res = HighestBidResponse { + owner: bid.owner, + amount: amount.first().cloned(), + }; + Some(res) + } else { + None + }; + let id = ( + Txid::from_str(vault_id)?, + batch.index, + Txid::from_byte_array([0xffu8; 32]), + ); + let bids = repo + .by_id + .list(Some(id), SortOrder::Descending)? + .take_while(|item| match item { + Ok(((vid, bindex, _), _)) => vid.to_string() == vault_id && bindex == &batch.index, + _ => true, + }) + .collect::>(); + let froms = bids + .into_iter() + .map(|bid| { + let (_, v) = bid?; + let from_addr = from_script(&v.from, ctx.network)?; + Ok::(from_addr) + }) + .collect::>>()?; + vec.push(VaultLiquidatedBatchResponse { + index: batch.index, + collaterals: map_token_amounts(ctx, batch.collaterals).await?, + loan: map_token_amounts(ctx, vec![batch.loan]) + .await? + .first() + .cloned(), + froms, + highest_bid, + }); + } + Ok(vec) +} + +async fn map_token_amounts( + ctx: &Arc, + amounts: Vec, +) -> Result> { + if amounts.is_empty() { + return Ok(Vec::new()); + } + let amount_token_symbols = amounts + .into_iter() + .map(|amount| { + let (amount, token_symbol) = parse_amount(&amount)?; + Ok::<[String; 2], Error>([amount, token_symbol]) + }) + .collect::>>()?; + + let mut vault_token_amounts = Vec::new(); + for [amount, token_symbol] in amount_token_symbols { + let Some((id, token_info)) = get_token_cached(ctx, &token_symbol).await? else { + log::error!("Token {token_symbol} not found"); + continue; + }; + let repo = &ctx.services.oracle_price_active; + + let keys = repo + .by_key + .list(None, SortOrder::Descending)? + .collect::>(); + log::trace!("list_auctions keys: {:?}, token_id: {:?}", keys, id); + let active_price = repo + .by_key + .list(None, SortOrder::Descending)? + .take(1) + .take_while(|item| match item { + Ok((k, _)) => k.0 == id, + _ => true, + }) + .map(|el| repo.by_key.retrieve_primary_value(el)) + .collect::>>()?; + + vault_token_amounts.push(VaultTokenAmountResponse { + id, + display_symbol: parse_display_symbol(&token_info), + amount: amount.to_string(), + symbol: token_info.symbol, + symbol_key: token_info.symbol_key, + name: token_info.name, + active_price: active_price.first().cloned(), + }); + } + + Ok(vault_token_amounts) +} + +#[ocean_endpoint] +async fn list_auctions( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let start = query.next.as_ref().map(|next| { + let vault_id = &next[0..64]; + let height = &next[64..]; + AuctionPaginationStart { + vault_id: vault_id.to_string(), + height: height.parse::().unwrap_or_default(), + } + }); + + let pagination = AuctionPagination { + start, + including_start: None, + limit: if query.size > 30 { + Some(30) + } else { + Some(query.size) + }, + }; + + let mut vaults = Vec::new(); + let liquidation_vaults = ctx.client.list_auctions(Some(pagination)).await?; + for vault in liquidation_vaults { + let loan_scheme = get_loan_scheme_cached(&ctx, vault.loan_scheme_id).await?; + let res = VaultLiquidatedResponse { + batches: map_liquidation_batches(&ctx, &vault.vault_id, vault.batches).await?, + vault_id: vault.vault_id, + loan_scheme, + owner_address: vault.owner_address, + state: vault.state, + liquidation_height: vault.liquidation_height, + liquidation_penalty: vault.liquidation_penalty, + batch_count: vault.batch_count, + }; + vaults.push(res); + } + + Ok(ApiPagedResponse::of(vaults, query.size, |auction| { + format!("{}{}", auction.vault_id.clone(), auction.liquidation_height) + })) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/schemes", get(list_scheme)) + .route("/schemes/:id", get(get_scheme)) + .route("/collaterals", get(list_collateral_token)) + .route("/collaterals/:id", get(get_collateral_token)) + .route("/tokens", get(list_loan_token)) + .route("/tokens/:id", get(get_loan_token)) + .route("/vaults", get(list_vaults)) + .route("/vaults/:id", get(get_vault)) + .route( + "/vaults/:id/auctions/:height/batches/:batchIndex/history", + get(list_vault_auction_history), + ) + .route("/auctions", get(list_auctions)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/masternode/mod.rs b/lib/ain-ocean/src/api/masternode/mod.rs new file mode 100644 index 00000000000..c77f4b07d00 --- /dev/null +++ b/lib/ain-ocean/src/api/masternode/mod.rs @@ -0,0 +1,167 @@ +use std::sync::Arc; + +mod state; + +use ain_macros::ocean_endpoint; +use axum::{ + extract::{Path, Query}, + routing::get, + Extension, Router, +}; +use bitcoin::Txid; +use serde::{Deserialize, Serialize}; +use snafu::OptionExt; + +use self::state::{MasternodeService, MasternodeState}; +use super::{ + query::PaginationQuery, + response::{ApiPagedResponse, Response}, + AppContext, +}; +use crate::{ + api::common::Paginate, + error::{ApiError, Error, NotFoundKind, NotFoundSnafu}, + model::Masternode, + storage::{RepositoryOps, SortOrder}, + Result, SecondaryIndex, +}; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct MasternodeOwner { + pub address: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct MasternodeOperator { + pub address: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct MasternodeCreation { + pub height: u32, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct MasternodeResign { + pub tx: Txid, + pub height: i64, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct MasternodeData { + pub id: String, + pub sort: String, + pub state: MasternodeState, + pub minted_blocks: i32, + pub owner: MasternodeOwner, + pub operator: MasternodeOperator, + pub creation: MasternodeCreation, + pub resign: Option, + pub timelock: u16, +} + +impl MasternodeData { + fn from_with_state(v: Masternode, state: MasternodeState) -> Self { + Self { + id: v.id.to_string(), + sort: format!("{:08x}{}", v.block.height, v.id), + state, + minted_blocks: v.minted_blocks, + owner: MasternodeOwner { + address: v.owner_address, + }, + operator: MasternodeOperator { + address: v.operator_address, + }, + creation: MasternodeCreation { + height: v.creation_height, + }, + resign: v.resign_tx.map(|tx| MasternodeResign { + tx, + height: match v.resign_height { + None => -1, + Some(v) => i64::from(v), + }, + }), + timelock: v.timelock, + } + } +} + +#[ocean_endpoint] +async fn list_masternodes( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let repository = &ctx.services.masternode.by_height; + let next = query + .next + .as_ref() + .map(|q| { + let height = q[0..8].parse::()?; + let txid = q[8..].parse::()?; + + Ok::<(u32, bitcoin::Txid), Error>((height, txid)) + }) + .transpose()?; + + let height = ctx + .services + .block + .by_height + .get_highest()? + .map_or(0, |b| b.height); + + let masternodes = repository + .list(next, SortOrder::Descending)? + .paginate(&query) + .map(|el| repository.retrieve_primary_value(el)) + .map(|v| { + let mn = v?; + let state = MasternodeService::new(ctx.network).get_masternode_state(&mn, height); + Ok(MasternodeData::from_with_state(mn, state)) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of( + masternodes, + query.size, + |masternode| masternode.sort.to_string(), + )) +} + +#[ocean_endpoint] +async fn get_masternode( + Path(masternode_id): Path, + Extension(ctx): Extension>, +) -> Result> { + let height = ctx + .services + .block + .by_height + .get_highest()? + .map_or(0, |b| b.height); + + let mn = ctx + .services + .masternode + .by_id + .get(&masternode_id)? + .map(|mn| { + let state = MasternodeService::new(ctx.network).get_masternode_state(&mn, height); + MasternodeData::from_with_state(mn, state) + }) + .context(NotFoundSnafu { + kind: NotFoundKind::Masternode, + })?; + + Ok(Response::new(mn)) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/", get(list_masternodes)) + .route("/:id", get(get_masternode)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/masternode/state.rs b/lib/ain-ocean/src/api/masternode/state.rs new file mode 100644 index 00000000000..e43ab47da2d --- /dev/null +++ b/lib/ain-ocean/src/api/masternode/state.rs @@ -0,0 +1,64 @@ +use serde::{Deserialize, Serialize}; + +use crate::{model::Masternode, network::Network}; + +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum MasternodeState { + PreEnabled, + Enabled, + PreResigned, + Resigned, + PreBanned, + Banned, + #[default] + Unknown, +} + +pub struct MasternodeService { + network: Network, +} + +impl MasternodeService { + pub fn new(network: Network) -> Self { + Self { network } + } + + fn get_mn_activation_delay(&self, height: u32) -> u32 { + let eunos_height = self.network.fork_heights().df8_eunos_height; + if height < eunos_height { + self.network.params().activation_delay + } else { + self.network.params().new_activation_delay + } + } + + fn get_mn_resign_delay(&self, height: u32) -> u32 { + let eunos_height = self.network.fork_heights().df8_eunos_height; + if height < eunos_height { + self.network.params().resign_delay + } else { + self.network.params().new_resign_delay + } + } + + pub fn get_masternode_state(&self, masternode: &Masternode, height: u32) -> MasternodeState { + if let Some(resign_height) = masternode.resign_height { + let resign_delay = self.get_mn_resign_delay(resign_height); + if height < resign_height + resign_delay { + MasternodeState::PreResigned + } else { + MasternodeState::Resigned + } + } else { + let activation_delay = self.get_mn_activation_delay(masternode.creation_height); + if masternode.creation_height == 0 + || height >= masternode.creation_height + activation_delay + { + MasternodeState::Enabled + } else { + MasternodeState::PreEnabled + } + } + } +} diff --git a/lib/ain-ocean/src/api/mod.rs b/lib/ain-ocean/src/api/mod.rs new file mode 100644 index 00000000000..a68638145b4 --- /dev/null +++ b/lib/ain-ocean/src/api/mod.rs @@ -0,0 +1,168 @@ +use std::{net::SocketAddr, str::FromStr, sync::Arc}; + +use axum::{ + body::Body, + extract::{ConnectInfo, Request}, + http::{HeaderValue, StatusCode}, + middleware::{from_fn, Next}, + response::{IntoResponse, Response}, + Json, Router, +}; + +mod address; +mod block; +mod cache; +pub mod common; +mod debug; +mod fee; +mod governance; +mod loan; +mod masternode; +mod oracle; +mod path; +mod pool_pair; +pub mod prices; +mod query; +mod rawtx; +mod response; +mod stats; +mod tokens; +mod transactions; + +use defichain_rpc::Client; +use serde::{Deserialize, Serialize}; + +use crate::{network::Network, Result, Services}; + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +struct NotFound { + status_code: u16, + message: String, + error: &'static str, +} + +async fn not_found(req: Request) -> impl IntoResponse { + let method = req.method().clone(); + let path = req.uri().path().to_string(); + + let message = format!("Cannot {method} {path}"); + ( + StatusCode::NOT_FOUND, + Json(NotFound { + status_code: StatusCode::NOT_FOUND.as_u16(), + message, + error: "Not found", + }), + ) +} + +pub struct AppContext { + services: Arc, + client: Arc, + network: Network, +} + +// NOTE(canonbrother): manually scratch cors since CorsLayer + Axum can only be supported in `tower-http 0.5` +async fn cors(request: Request, next: Next) -> Response { + let mut response = next.run(request).await; + + response + .headers_mut() + .append("Access-Control-Allow-Origin", HeaderValue::from_static("*")); + response.headers_mut().append( + "Access-Control-Allow-Methods", + HeaderValue::from_static("GET"), + ); + response.headers_mut().append( + "Access-Control-Allow-Methods", + HeaderValue::from_static("POST"), + ); + response.headers_mut().append( + "Access-Control-Allow-Methods", + HeaderValue::from_static("PUT"), + ); + response.headers_mut().append( + "Access-Control-Allow-Methods", + HeaderValue::from_static("DELETE"), + ); + response.headers_mut().append( + "Access-Control-Allow-Headers", + HeaderValue::from_static("Content-Type"), + ); + response + .headers_mut() + .append("Access-Control-Max-Age", HeaderValue::from_static("10080")); // 60 * 24 * 7 + + response +} + +pub async fn ocean_router( + services: &Arc, + client: Arc, + network: String, +) -> Result { + let context = Arc::new(AppContext { + client, + services: services.clone(), + network: Network::from_str(&network)?, + }); + let main_router = Router::new() + .nest("/address/", address::router(Arc::clone(&context))) + .nest("/governance", governance::router(Arc::clone(&context))) + .nest("/loans", loan::router(Arc::clone(&context))) + .nest("/fee", fee::router(Arc::clone(&context))) + .nest("/masternodes", masternode::router(Arc::clone(&context))) + .nest("/oracles", oracle::router(Arc::clone(&context))) + .nest("/poolpairs", pool_pair::router(Arc::clone(&context))) + .nest("/prices", prices::router(Arc::clone(&context))) + .nest("/rawtx", rawtx::router(Arc::clone(&context))) + .nest("/stats", stats::router(Arc::clone(&context))) + .nest("/tokens", tokens::router(Arc::clone(&context))) + .nest("/transactions", transactions::router(Arc::clone(&context))) + .nest("/blocks", block::router(Arc::clone(&context))) + .fallback(not_found); + + let debug_router = Router::new() + .nest("/debug", debug::router(Arc::clone(&context))) + .layer(from_fn(localhost_only)); + + Ok(Router::new() + .nest( + format!("/v0/{}", context.network).as_str(), + main_router.merge(debug_router), + ) + .layer(from_fn(cors))) +} + +async fn localhost_only( + req: Request, + next: Next, +) -> std::result::Result { + let is_localhost = req + .extensions() + .get::>() + .map(|connect_info| connect_info.ip().is_loopback()) + .unwrap_or_else(|| { + req.headers() + .get("X-Forwarded-For") + .and_then(|addr| addr.to_str().ok()) + .map(|addr| addr.split(',').next().unwrap_or("").trim() == "127.0.0.1") + .or_else(|| { + req.headers() + .get("Host") + .and_then(|host| host.to_str().ok()) + .map(|host| { + host.starts_with("localhost:") || host.starts_with("127.0.0.1:") + }) + }) + .unwrap_or(false) + }); + + if is_localhost { + Ok(next.run(req).await) + } else { + println!("Access denied: Request is not from localhost"); + Err(StatusCode::FORBIDDEN) + } +} diff --git a/lib/ain-ocean/src/api/oracle.rs b/lib/ain-ocean/src/api/oracle.rs new file mode 100644 index 00000000000..e0ed721606e --- /dev/null +++ b/lib/ain-ocean/src/api/oracle.rs @@ -0,0 +1,164 @@ +use std::{str::FromStr, sync::Arc}; + +use ain_dftx::{Currency, Token, Weightage, COIN}; +use ain_macros::ocean_endpoint; +use axum::{ + extract::{Path, Query}, + routing::get, + Extension, Router, +}; +use bitcoin::Txid; +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; + +use super::{ + common::parse_token_currency, + query::PaginationQuery, + response::{ApiPagedResponse, Response}, + AppContext, +}; +use crate::{ + api::common::Paginate, + error::ApiError, + model::{BlockContext, Oracle, PriceFeed}, + storage::{RepositoryOps, SortOrder}, + Result, +}; + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct OracleResponse { + pub id: String, + pub owner_address: String, + pub weightage: Weightage, + pub price_feeds: Vec, + pub block: BlockContext, +} + +impl OracleResponse { + fn from_with_id(id: String, v: Oracle) -> Self { + Self { + id, + owner_address: v.owner_address, + weightage: v.weightage, + price_feeds: v.price_feeds, + block: v.block, + } + } +} + +#[ocean_endpoint] +async fn list_oracles( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let oracles = ctx + .services + .oracle + .by_id + .list(None, SortOrder::Descending)? + .take(query.size) + .map(|item| { + let (id, v) = item?; + Ok(OracleResponse::from_with_id(id.to_string(), v)) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(oracles, query.size, |oracle| { + oracle.id.clone() + })) +} + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceFeedResponse { + pub id: String, + pub key: String, + pub sort: String, + pub token: Token, + pub currency: Currency, + pub oracle_id: Txid, + pub txid: Txid, + pub time: i32, + pub amount: String, + pub block: BlockContext, +} + +#[ocean_endpoint] +async fn get_feed( + Path((oracle_id, key)): Path<(String, String)>, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let txid = Txid::from_str(&oracle_id)?; + + let (token, currency) = parse_token_currency(&key)?; + + let key = (token, currency, txid); + + let price_feed_list = ctx + .services + .oracle_price_feed + .by_id + .list(None, SortOrder::Descending)? + .paginate(&query) + .flatten() + .collect::>(); + + let mut oracle_price_feeds = Vec::new(); + + for ((token, currency, oracle_id, _), feed) in &price_feed_list { + if key.0.eq(token) && key.1.eq(currency) && key.2.eq(oracle_id) { + let amount = Decimal::from(feed.amount) / Decimal::from(COIN); + oracle_price_feeds.push(OraclePriceFeedResponse { + id: format!("{}-{}-{}-{}", token, currency, oracle_id, feed.txid), + key: format!("{}-{}-{}", token, currency, oracle_id), + sort: hex::encode(feed.block.height.to_string() + &feed.txid.to_string()), + token: token.to_owned(), + currency: currency.to_owned(), + oracle_id: oracle_id.to_owned(), + txid: feed.txid, + time: feed.time, + amount: amount.normalize().to_string(), + block: feed.block.clone(), + }); + } + } + + Ok(ApiPagedResponse::of( + oracle_price_feeds, + query.size, + |price_feed| price_feed.sort.clone(), + )) +} + +#[ocean_endpoint] +async fn get_oracle_by_address( + Path(address): Path, + Extension(ctx): Extension>, +) -> Result>> { + let oracle = ctx + .services + .oracle + .by_id + .list(None, SortOrder::Descending)? + .flatten() + .filter_map(|(id, oracle)| { + if oracle.owner_address == address { + let res = OracleResponse::from_with_id(id.to_string(), oracle); + return Some(res); + } + None + }) + .next(); + + Ok(Response::new(oracle)) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/", get(list_oracles)) + .route("/:oracleId/:key/feed", get(get_feed)) + .route("/:address", get(get_oracle_by_address)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/path.rs b/lib/ain-ocean/src/api/path.rs new file mode 100644 index 00000000000..26142753f50 --- /dev/null +++ b/lib/ain-ocean/src/api/path.rs @@ -0,0 +1,100 @@ +use axum::{ + async_trait, + extract::{path::ErrorKind, rejection::PathRejection, FromRequestParts}, + http::{request::Parts, StatusCode}, +}; +use serde::de::DeserializeOwned; + +use crate::error::ApiError; + +// We define our own `Path` extractor that customizes the error from `axum::extract::Path` +#[derive(Debug)] +pub struct Path(pub T); + +#[async_trait] +impl FromRequestParts for Path +where + // these trait bounds are copied from `impl FromRequest for axum::extract::path::Path` + T: DeserializeOwned + Send, + S: Send + Sync, +{ + type Rejection = ApiError; + + async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { + match axum::extract::Path::::from_request_parts(parts, state).await { + Ok(value) => Ok(Self(value.0)), + Err(rejection) => { + let error = match rejection { + PathRejection::FailedToDeserializePathParams(inner) => { + let kind = inner.into_kind(); + match &kind { + ErrorKind::WrongNumberOfParameters { .. } => ApiError::new( + StatusCode::BAD_REQUEST, + kind.to_string(), + parts.uri.to_string(), + ), + + ErrorKind::ParseErrorAtKey { key, .. } => ApiError::new( + StatusCode::BAD_REQUEST, + format!("key: {key}, {kind}"), + parts.uri.to_string(), + ), + + ErrorKind::ParseErrorAtIndex { index, .. } => ApiError::new( + StatusCode::BAD_REQUEST, + format!("index: {index}, {kind}"), + parts.uri.to_string(), + ), + + ErrorKind::ParseError { .. } => ApiError::new( + StatusCode::BAD_REQUEST, + kind.to_string(), + parts.uri.to_string(), + ), + + ErrorKind::InvalidUtf8InPathParam { key } => ApiError::new( + StatusCode::BAD_REQUEST, + format!("key: {key}, {kind}"), + parts.uri.to_string(), + ), + + ErrorKind::UnsupportedType { .. } => { + // this error is caused by the programmer using an unsupported type + // (such as nested maps) so respond with `500` instead + ApiError::new( + StatusCode::INTERNAL_SERVER_ERROR, + kind.to_string(), + parts.uri.to_string(), + ) + } + + ErrorKind::Message(msg) => ApiError::new( + StatusCode::BAD_REQUEST, + msg.clone(), + parts.uri.to_string(), + ), + + _ => ApiError::new( + StatusCode::BAD_REQUEST, + format!("Unhandled deserialization error: {kind}"), + parts.uri.to_string(), + ), + } + } + PathRejection::MissingPathParams(error) => ApiError::new( + StatusCode::INTERNAL_SERVER_ERROR, + error.to_string(), + parts.uri.to_string(), + ), + _ => ApiError::new( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Unhandled path rejection: {rejection}"), + parts.uri.to_string(), + ), + }; + + Err(error) + } + } + } +} diff --git a/lib/ain-ocean/src/api/pool_pair/mod.rs b/lib/ain-ocean/src/api/pool_pair/mod.rs new file mode 100644 index 00000000000..1fa71345332 --- /dev/null +++ b/lib/ain-ocean/src/api/pool_pair/mod.rs @@ -0,0 +1,626 @@ +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use defichain_rpc::{json::poolpair::PoolPairInfo, RpcApi}; +use futures::future::try_join_all; +use path::{ + get_all_swap_paths, get_token_identifier, sync_token_graph_if_empty, BestSwapPathResponse, + SwapPathsResponse, +}; +use petgraph::graphmap::UnGraphMap; +use price::DexPriceResponse; +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use serde_with::skip_serializing_none; +use service::{ + check_swap_type, find_swap_from, find_swap_to, get_aggregated_in_usd, get_apr, + get_total_liquidity_usd, get_usd_volume, PoolPairVolumeResponse, PoolSwapFromToData, SwapType, +}; + +use super::{ + cache::{get_pool_pair_cached, list_pool_pairs_cached}, + common::{parse_dat_symbol, parse_pool_pair_symbol, parse_query_height_txno}, + path::Path, + query::{PaginationQuery, Query}, + response::{ApiPagedResponse, Response}, + AppContext, +}; +use crate::{ + error::{ApiError, Error, NotFoundKind}, + model::{BlockContext, PoolSwap, PoolSwapAggregated, PoolSwapAggregatedId}, + storage::{InitialKeyProvider, RepositoryOps, SortOrder}, + PoolSwap as PoolSwapRepository, Result, TokenIdentifier, +}; + +pub mod path; +pub mod price; +pub mod service; + +#[derive(Deserialize)] +struct SwapAggregate { + id: String, + interval: u32, +} + +#[derive(Debug, Deserialize, Default)] +struct DexPrices { + denomination: String, +} + +#[skip_serializing_none] +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PoolSwapVerboseResponse { + id: String, + sort: String, + txid: String, + txno: usize, + pool_pair_id: String, + from_amount: String, + from_token_id: u64, + block: BlockContext, + from: Option, + to: Option, + r#type: Option, +} + +impl PoolSwapVerboseResponse { + fn map( + v: PoolSwap, + from: Option, + to: Option, + swap_type: Option, + ) -> Self { + Self { + id: format!("{}-{}", v.pool_id, v.txid), + sort: format!( + "{}{}", + hex::encode(v.block.height.to_be_bytes()), + hex::encode(v.txno.to_be_bytes()), + ), + txid: v.txid.to_string(), + txno: v.txno, + pool_pair_id: v.pool_id.to_string(), + from_amount: Decimal::new(v.from_amount, 8).to_string(), + from_token_id: v.from_token_id, + from, + to, + block: v.block, + r#type: swap_type, + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PoolSwapResponse { + id: String, + sort: String, + txid: String, + txno: usize, + pool_pair_id: String, + from_amount: String, + from_token_id: u64, + block: BlockContext, +} + +impl From for PoolSwapResponse { + fn from(v: PoolSwap) -> Self { + Self { + id: format!("{}-{}", v.pool_id, v.txid), + sort: format!( + "{}{}", + hex::encode(v.block.height.to_be_bytes()), + hex::encode(v.txno.to_be_bytes()), + ), + txid: v.txid.to_string(), + txno: v.txno, + pool_pair_id: v.pool_id.to_string(), + from_amount: Decimal::new(v.from_amount, 8).to_string(), + from_token_id: v.from_token_id, + block: v.block, + } + } +} + +#[derive(Serialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +struct PoolPairFeeResponse { + pct: Option, + in_pct: Option, + out_pct: Option, +} + +#[derive(Serialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +struct PoolPairTokenResponse { + id: String, + name: String, + symbol: String, + display_symbol: String, + reserve: String, + block_commission: String, + fee: Option, +} + +#[derive(Serialize, Debug, Clone, Default)] +struct PoolPairPriceRatioResponse { + ab: String, + ba: String, +} + +#[derive(Serialize, Debug, Clone, Default)] +struct PoolPairTotalLiquidityResponse { + token: Option, + usd: Option, +} + +#[derive(Serialize, Debug, Clone, Default)] +struct PoolPairCreationResponse { + tx: String, + height: i64, +} + +#[derive(Serialize, Debug, Clone, Default)] +pub struct PoolPairAprResponse { + pub total: Decimal, + pub reward: Decimal, + pub commission: Decimal, +} + +#[derive(Serialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +pub struct PoolPairResponse { + id: String, + symbol: String, + display_symbol: String, + name: String, + status: bool, + token_a: PoolPairTokenResponse, + token_b: PoolPairTokenResponse, + price_ratio: PoolPairPriceRatioResponse, + commission: String, + total_liquidity: PoolPairTotalLiquidityResponse, + trade_enabled: bool, + owner_address: String, + reward_pct: String, + reward_loan_pct: String, + custom_rewards: Option>, + creation: PoolPairCreationResponse, + apr: PoolPairAprResponse, + volume: PoolPairVolumeResponse, +} + +impl PoolPairResponse { + pub fn from_with_id( + id: String, + p: PoolPairInfo, + a_token_name: String, + b_token_name: String, + total_liquidity_usd: Decimal, + apr: PoolPairAprResponse, + volume: PoolPairVolumeResponse, + ) -> Result { + let (a, b) = parse_pool_pair_symbol(&p.symbol)?; + let a_parsed = parse_dat_symbol(&a); + let b_parsed = parse_dat_symbol(&b); + + Ok(Self { + id, + symbol: p.symbol.clone(), + display_symbol: format!("{a_parsed}-{b_parsed}"), + name: p.name, + status: p.status, + token_a: PoolPairTokenResponse { + symbol: a, + display_symbol: a_parsed, + id: p.id_token_a, + name: a_token_name, + reserve: p.reserve_a.to_string(), + block_commission: p.block_commission_a.to_string(), + fee: p.dex_fee_pct_token_a.map(|_| PoolPairFeeResponse { + pct: p.dex_fee_pct_token_a.map(|fee| fee.to_string()), + in_pct: p.dex_fee_in_pct_token_a.map(|fee| fee.to_string()), + out_pct: p.dex_fee_out_pct_token_a.map(|fee| fee.to_string()), + }), + }, + token_b: PoolPairTokenResponse { + symbol: b, + display_symbol: b_parsed, + id: p.id_token_b, + name: b_token_name, + reserve: p.reserve_b.to_string(), + block_commission: p.block_commission_b.to_string(), + fee: p.dex_fee_pct_token_b.map(|_| PoolPairFeeResponse { + pct: p.dex_fee_pct_token_b.map(|fee| fee.to_string()), + in_pct: p.dex_fee_in_pct_token_b.map(|fee| fee.to_string()), + out_pct: p.dex_fee_out_pct_token_b.map(|fee| fee.to_string()), + }), + }, + price_ratio: PoolPairPriceRatioResponse { + ab: p.reserve_a_reserve_b.to_string(), + ba: p.reserve_b_reserve_a.to_string(), + }, + commission: p.commission.to_string(), + total_liquidity: PoolPairTotalLiquidityResponse { + token: Some(p.total_liquidity.to_string()), + usd: Some(total_liquidity_usd.to_string()), + }, + trade_enabled: p.trade_enabled, + owner_address: p.owner_address, + reward_pct: p.reward_pct.to_string(), + reward_loan_pct: p.reward_loan_pct.to_string(), + custom_rewards: p.custom_rewards, + creation: PoolPairCreationResponse { + tx: p.creation_tx, + height: p.creation_height, + }, + apr, + volume, + }) + } +} + +async fn map_pool_pair_response( + ctx: &Arc, + id: String, + p: PoolPairInfo, +) -> Result { + let a_token = ain_cpp_imports::get_dst_token(p.id_token_a.clone()); + if a_token.is_null() { + return Err(Error::NotFound { + kind: NotFoundKind::Token { id: p.id_token_a }, + }); + } + let b_token = ain_cpp_imports::get_dst_token(p.id_token_b.clone()); + if b_token.is_null() { + return Err(Error::NotFound { + kind: NotFoundKind::Token { id: p.id_token_b }, + }); + } + + let total_liquidity_usd = get_total_liquidity_usd(ctx, &p).await?; + let apr = get_apr(ctx, &id, &p).await?; + let volume = get_usd_volume(ctx, &id).await?; + let res = PoolPairResponse::from_with_id( + id, + p, + a_token.name.to_owned(), + b_token.name.to_owned(), + total_liquidity_usd, + apr, + volume, + )?; + + Ok(res) +} + +#[ocean_endpoint] +async fn list_pool_pairs( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let next = query.next.map(|n| n.parse::()).transpose()?; + + let mut pools = list_pool_pairs_cached(&ctx, Some(query.size as u64), next).await?; + if pools.0.is_empty() { + pools = ctx + .client + .call( + "listpoolpairs", + &[ + json!({ + "limit": query.size, + "start": next.unwrap_or_default(), + "including_start": next.is_none() + }), + true.into(), + ], + ) + .await?; + } + + let fut = pools + .0 + .into_iter() + .filter(|(_, p)| !p.symbol.starts_with("BURN-")) + .map(|(id, p)| async { + let res = map_pool_pair_response(&ctx, id, p).await?; + Ok::(res) + }) + .collect::>(); + + let mut res = try_join_all(fut).await?; + + res.sort_by(|a, b| { + let a_num = a.id.parse::().unwrap_or(0); + let b_num = b.id.parse::().unwrap_or(0); + a_num.cmp(&b_num) + }); + + Ok(ApiPagedResponse::of(res, query.size, |pool| { + pool.id.clone() + })) +} + +#[ocean_endpoint] +async fn get_pool_pair( + Path(id): Path, + Extension(ctx): Extension>, +) -> Result>> { + if let Some((id, p)) = get_pool_pair_cached(&ctx, id).await? { + let res = map_pool_pair_response(&ctx, id, p).await?; + return Ok(Response::new(Some(res))); + }; + + Err(Error::NotFound { + kind: NotFoundKind::PoolPair, + }) +} + +#[ocean_endpoint] +async fn list_pool_swaps( + Path(id): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let next = query + .next + .as_ref() + .map(|q| { + let (height, txno) = parse_query_height_txno(q)?; + Ok::<(u32, u32, usize), Error>((id, height, txno)) + }) + .transpose()? + .unwrap_or(PoolSwapRepository::initial_key(id)); + + let size = if query.size > 200 { 200 } else { query.size }; + + let swaps = ctx + .services + .pool + .by_id + .list(Some(next), SortOrder::Descending)? + .take(size) + .take_while(|item| match item { + Ok((k, _)) => k.0 == id, + _ => true, + }) + .map(|item| { + let (_, swap) = item?; + Ok(PoolSwapResponse::from(swap)) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(swaps, query.size, |swap| { + swap.sort.to_string() + })) +} + +#[ocean_endpoint] +async fn list_pool_swaps_verbose( + Path(id): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let next = query + .next + .as_ref() + .map(|q| { + let (height, txno) = parse_query_height_txno(q)?; + Ok::<(u32, u32, usize), Error>((id, height, txno)) + }) + .transpose()? + .unwrap_or(PoolSwapRepository::initial_key(id)); + + let size = if query.size > 20 { 20 } else { query.size }; + + let fut = ctx + .services + .pool + .by_id + .list(Some(next), SortOrder::Descending)? + .take(size) + .take_while(|item| match item { + Ok((k, _)) => k.0 == id, + _ => true, + }) + .map(|item| async { + let (_, swap) = item?; + let from = find_swap_from(&ctx, &swap).await?; + let to = find_swap_to(&ctx, &swap).await?; + + let swap_type = check_swap_type(&ctx, &swap).await?; + + let res = PoolSwapVerboseResponse::map(swap, from, to, swap_type); + Ok::(res) + }) + .collect::>(); + + let swaps = try_join_all(fut).await?; + + Ok(ApiPagedResponse::of(swaps, query.size, |swap| { + swap.sort.to_string() + })) +} + +#[derive(Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +struct PoolSwapAggregatedAggregatedResponse { + amounts: HashMap, + usd: Decimal, +} + +#[derive(Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +struct PoolSwapAggregatedResponse { + id: String, + key: String, + bucket: i64, + aggregated: PoolSwapAggregatedAggregatedResponse, + block: BlockContext, +} + +impl PoolSwapAggregatedResponse { + fn with_usd(id: PoolSwapAggregatedId, p: PoolSwapAggregated, usd: Decimal) -> Self { + Self { + id: format!("{}-{}-{}", id.0, id.1, id.2), + key: format!("{}-{}", id.0, id.1), + bucket: p.bucket, + aggregated: PoolSwapAggregatedAggregatedResponse { + amounts: p.aggregated.amounts, + usd, + }, + block: p.block, + } + } +} + +#[ocean_endpoint] +async fn list_pool_swap_aggregates( + Path(SwapAggregate { id, interval }): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let pool_id = id.parse::()?; + + // bucket + let next = query + .next + .map(|bucket| { + let bucket = bucket.parse::()?; + Ok::(bucket) + }) + .transpose()? + .unwrap_or(i64::MAX); + + let repo = &ctx.services.pool_swap_aggregated; + let key_ids = repo + .by_key + .list(Some((pool_id, interval, next)), SortOrder::Descending)? + .take(query.size) + .take_while(|item| match item { + Ok((k, _)) => k.0 == pool_id && k.1 == interval, + _ => true, + }) + .flatten() + .collect::>(); + + let mut aggregated_usd = Vec::::new(); + for (_, id) in key_ids { + let aggregated = repo.by_id.get(&id)?; + let Some(aggregated) = aggregated else { + continue; + }; + let usd = get_aggregated_in_usd(&ctx, &aggregated.aggregated).await?; + let aggregate_with_usd = PoolSwapAggregatedResponse::with_usd(id, aggregated, usd); + aggregated_usd.push(aggregate_with_usd); + } + + Ok(ApiPagedResponse::of( + aggregated_usd, + query.size, + |aggregated| aggregated.bucket, + )) +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct AllSwappableTokensResponse { + from_token: TokenIdentifier, + swappable_tokens: Vec, +} + +#[ocean_endpoint] +async fn get_swappable_tokens( + Path(token_id): Path, + Extension(ctx): Extension>, +) -> Result> { + sync_token_graph_if_empty(&ctx).await?; + + let mut token_ids: HashSet = HashSet::new(); + + fn recur(graph: &UnGraphMap, token_ids: &mut HashSet, token_id: u32) { + if token_ids.contains(&token_id) { + return; + }; + token_ids.insert(token_id); + let edges = graph.edges(token_id).collect::>(); + for edge in edges { + recur(graph, token_ids, edge.0); + recur(graph, token_ids, edge.1); + } + } + + { + let graph = ctx.services.token_graph.lock().clone(); + recur(&graph, &mut token_ids, token_id.parse::()?); + } + + token_ids.remove(&token_id.parse::()?); + + let mut swappable_tokens = Vec::new(); + for id in token_ids { + let token = get_token_identifier(&ctx, &id.to_string()).await?; + swappable_tokens.push(token); + } + + Ok(Response::new(AllSwappableTokensResponse { + from_token: get_token_identifier(&ctx, &token_id).await?, + swappable_tokens, + })) +} + +#[ocean_endpoint] +async fn list_paths( + Path((token_id, to_token_id)): Path<(String, String)>, + Extension(ctx): Extension>, +) -> Result> { + let res = get_all_swap_paths(&ctx, &token_id, &to_token_id).await?; + + Ok(Response::new(res)) +} + +#[ocean_endpoint] +async fn get_best_path( + Path((from_token_id, to_token_id)): Path<(String, String)>, + Extension(ctx): Extension>, +) -> Result> { + let res = path::get_best_path(&ctx, &from_token_id, &to_token_id).await?; + Ok(Response::new(res)) +} + +#[ocean_endpoint] +async fn list_dex_prices( + Query(DexPrices { denomination }): Query, + Extension(ctx): Extension>, +) -> Result> { + let prices = price::list_dex_prices(&ctx, denomination).await?; + + Ok(Response::new(prices)) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/", get(list_pool_pairs)) + .route("/:id", get(get_pool_pair)) + .route("/:id/swaps", get(list_pool_swaps)) + .route("/:id/swaps/verbose", get(list_pool_swaps_verbose)) + .route("/paths/from/:fromTokenId/to/:toTokenId", get(list_paths)) + .route( + "/paths/best/from/:fromTokenId/to/:toTokenId", + get(get_best_path), + ) + .route( + "/:id/swaps/aggregate/:interval", + get(list_pool_swap_aggregates), + ) + .route("/paths/swappable/:tokenId", get(get_swappable_tokens)) + .route("/dexprices", get(list_dex_prices)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/pool_pair/path.rs b/lib/ain-ocean/src/api/pool_pair/path.rs new file mode 100644 index 00000000000..fc2d8979e3e --- /dev/null +++ b/lib/ain-ocean/src/api/pool_pair/path.rs @@ -0,0 +1,542 @@ +use std::{collections::HashSet, str::FromStr, sync::Arc, time::Duration}; + +use defichain_rpc::json::poolpair::PoolPairInfo; +use rust_decimal::{prelude::FromPrimitive, Decimal, RoundingStrategy}; +use rust_decimal_macros::dec; +use serde::Serialize; +use snafu::OptionExt; + +use super::AppContext; +use crate::{ + api::{ + cache::{get_pool_pair_cached, get_token_cached, list_pool_pairs_cached}, + common::{format_number, parse_dat_symbol}, + }, + error::{ + ArithmeticOverflowSnafu, ArithmeticUnderflowSnafu, NotFoundKind, NotFoundSnafu, OtherSnafu, + }, + network::Network, + Error, Result, TokenIdentifier, +}; + +enum TokenDirection { + In, + Out, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct PriceRatio { + ab: String, + ba: String, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct EstimatedDexFeesInPct { + ab: String, + ba: String, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SwapPathPoolPair { + pool_pair_id: String, + symbol: String, + token_a: TokenIdentifier, + token_b: TokenIdentifier, + price_ratio: PriceRatio, + commission_fee_in_pct: String, + #[serde(skip_serializing_if = "Option::is_none")] + estimated_dex_fees_in_pct: Option, +} + +#[derive(Debug)] +pub struct EstimatedLessDexFeeInfo { + pub estimated_return: Decimal, + pub estimated_return_less_dex_fees: Decimal, +} + +#[derive(Debug)] +struct StackSet { + set: HashSet, + stack: Vec, + size: usize, +} + +impl StackSet { + fn new() -> Self { + Self { + set: HashSet::new(), + stack: Vec::new(), + size: 0, + } + } + + fn has(&self, value: &u32) -> bool { + self.set.contains(value) + } + + fn push(&mut self, value: u32) { + self.stack.push(value); + self.set.insert(value); + self.size += 1; + } + + fn pop(&mut self) { + if let Some(value) = self.stack.pop() { + self.set.remove(&value); + self.size -= 1; + } + } + + fn path(&self, value: u32) -> Vec { + let mut path = self.stack.clone(); + path.push(value); + path + } + + fn of(value: u32, is_cycle: bool) -> Self { + let mut set = Self::new(); + if !is_cycle { + set.push(value); + } else { + set.stack.push(value); + } + set + } +} + +pub async fn get_token_identifier(ctx: &Arc, id: &str) -> Result { + let (id, token) = get_token_cached(ctx, id).await?.context(NotFoundSnafu { + kind: NotFoundKind::Token { id: id.to_string() }, + })?; + Ok(TokenIdentifier { + id, + display_symbol: parse_dat_symbol(&token.symbol), + name: token.name, + symbol: token.symbol, + }) +} + +pub async fn get_all_swap_paths( + ctx: &Arc, + from_token_id: &String, + to_token_id: &String, +) -> Result { + sync_token_graph_if_empty(ctx).await?; + + if from_token_id == to_token_id { + return Err(Error::Other { + msg: "Invalid tokens: fromToken must be different from toToken".to_string(), + }); + } + + let mut res = SwapPathsResponse { + from_token: get_token_identifier(ctx, from_token_id).await?, + to_token: get_token_identifier(ctx, to_token_id).await?, + paths: vec![], + }; + + if !ctx + .services + .token_graph + .lock() + .contains_node(from_token_id.parse::()?) + || !ctx + .services + .token_graph + .lock() + .contains_node(to_token_id.parse::()?) + { + return Ok(res); + } + + res.paths = compute_paths_between_tokens(ctx, from_token_id, to_token_id).await?; + + Ok(res) +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct SwapPathsResponse { + pub from_token: TokenIdentifier, + pub to_token: TokenIdentifier, + pub paths: Vec>, +} + +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub struct BestSwapPathResponse { + pub from_token: TokenIdentifier, + pub to_token: TokenIdentifier, + pub best_path: Vec, + #[serde(with = "rust_decimal::serde::str")] + pub estimated_return: Decimal, + #[serde(with = "rust_decimal::serde::str")] + pub estimated_return_less_dex_fees: Decimal, +} + +pub async fn get_best_path( + ctx: &Arc, + from_token_id: &String, + to_token_id: &String, +) -> Result { + let SwapPathsResponse { + from_token, + to_token, + paths, + } = get_all_swap_paths(ctx, from_token_id, to_token_id).await?; + + let mut best_path = Vec::::new(); + let mut best_return = dec!(0); + let mut best_return_less_dex_fees = dec!(0); + + for path in paths { + let path_len = path.len(); + let EstimatedLessDexFeeInfo { + estimated_return, + estimated_return_less_dex_fees, + } = compute_return_less_dex_fees_in_destination_token(&path, from_token_id).await?; + + if path_len == 1 { + return Ok(BestSwapPathResponse { + from_token, + to_token, + best_path: path, + estimated_return: estimated_return + .round_dp_with_strategy(8, RoundingStrategy::AwayFromZero), + estimated_return_less_dex_fees: estimated_return_less_dex_fees + .round_dp_with_strategy(8, RoundingStrategy::AwayFromZero), + }); + }; + + if estimated_return > best_return { + best_return = estimated_return; + } + + if estimated_return_less_dex_fees > best_return_less_dex_fees { + best_return_less_dex_fees = estimated_return_less_dex_fees; + best_path = path; + }; + } + + Ok(BestSwapPathResponse { + from_token, + to_token, + best_path, + estimated_return: best_return.round_dp_with_strategy(8, RoundingStrategy::AwayFromZero), + estimated_return_less_dex_fees: best_return_less_dex_fees + .round_dp_with_strategy(8, RoundingStrategy::AwayFromZero), + }) +} + +fn all_simple_paths( + ctx: &Arc, + from_token_id: &str, + to_token_id: &str, +) -> Result>> { + let from_token_id = from_token_id.parse::()?; + let to_token_id = to_token_id.parse::()?; + + let graph = &ctx.services.token_graph; + if !graph.lock().contains_node(from_token_id) { + return Err(Error::Other { + msg: format!("from_token_id not found: {from_token_id:?}"), + }); + } + if !graph.lock().contains_node(to_token_id) { + return Err(Error::Other { + msg: format!("to_token_id not found: {to_token_id:?}"), + }); + } + + let is_cycle = from_token_id == to_token_id; + + let mut stack = vec![graph + .lock() + .neighbors_directed(from_token_id, petgraph::Direction::Outgoing) + .collect::>()]; + let mut visited = StackSet::of(from_token_id, is_cycle); + + let mut paths: Vec> = Vec::new(); + while !stack.is_empty() { + let child = stack.last_mut().and_then(std::vec::Vec::pop); + if let Some(child) = child { + if visited.has(&child) { + continue; + } + if child == to_token_id { + let mut p = visited.path(child); + if is_cycle { + p[0] = from_token_id; + } + paths.push(p); + } + visited.push(child); + if !visited.has(&to_token_id) { + stack.push( + graph + .lock() + .neighbors_directed(child, petgraph::Direction::Outgoing) + .collect::>(), + ); + } else { + visited.pop(); + } + } else { + stack.pop(); + visited.pop(); + } + } + + Ok(paths) +} + +fn get_dex_fees_pct( + pool_pair_info: PoolPairInfo, + from_token_id: &String, + to_token_id: &String, +) -> Option { + let PoolPairInfo { + id_token_a, + id_token_b, + dex_fee_in_pct_token_a, + dex_fee_out_pct_token_a, + dex_fee_in_pct_token_b, + dex_fee_out_pct_token_b, + .. + } = pool_pair_info; + + let token_a_direction = if id_token_a == *from_token_id { + TokenDirection::In + } else { + TokenDirection::Out + }; + + let token_b_direction = if id_token_b == *to_token_id { + TokenDirection::Out + } else { + TokenDirection::In + }; + + if dex_fee_in_pct_token_a.is_none() + && dex_fee_out_pct_token_a.is_none() + && dex_fee_in_pct_token_b.is_none() + && dex_fee_out_pct_token_b.is_none() + { + return None; + } + + Some(EstimatedDexFeesInPct { + ba: match token_a_direction { + TokenDirection::In => format!("{:.8}", dex_fee_in_pct_token_a.unwrap_or_default()), + TokenDirection::Out => format!("{:.8}", dex_fee_out_pct_token_a.unwrap_or_default()), + }, + ab: match token_b_direction { + TokenDirection::In => format!("{:.8}", dex_fee_in_pct_token_b.unwrap_or_default()), + TokenDirection::Out => format!("{:.8}", dex_fee_out_pct_token_b.unwrap_or_default()), + }, + }) +} + +pub async fn compute_paths_between_tokens( + ctx: &Arc, + from_token_id: &String, + to_token_id: &String, +) -> Result>> { + let mut pool_pair_paths = Vec::new(); + + let graph = &ctx.services.token_graph; + + let paths = all_simple_paths(ctx, from_token_id, to_token_id)?; + + for path in paths { + if path.len() > 4 { + continue; + } + + let mut pool_pairs = Vec::new(); + + for i in 1..path.len() { + let token_a = path[i - 1]; + let token_b = path[i]; + + let pool_pair_id = graph + .lock() + .edge_weight(token_a, token_b) + .context(OtherSnafu { + msg: format!( + "Unexpected error encountered during path finding - could not find edge between {token_a} and {token_b}" + ) + })? + .to_string(); + + let Some((_, pool_pair_info)) = get_pool_pair_cached(ctx, pool_pair_id.clone()).await? + else { + return Err(Error::Other { + msg: format!("Pool pair by id {pool_pair_id} not found"), + }); + }; + + let estimated_dex_fees_in_pct = + get_dex_fees_pct(pool_pair_info.clone(), from_token_id, to_token_id); + + let PoolPairInfo { + symbol, + id_token_a, + id_token_b, + reserve_a_reserve_b: ab, + reserve_b_reserve_a: ba, + commission, + .. + } = pool_pair_info; + + let swap_path_pool_pair = SwapPathPoolPair { + pool_pair_id, + symbol, + token_a: get_token_identifier(ctx, &id_token_a).await?, + token_b: get_token_identifier(ctx, &id_token_b).await?, + price_ratio: PriceRatio { + ab: format_number(Decimal::from_f64(ab).unwrap_or_default()), + ba: format_number(Decimal::from_f64(ba).unwrap_or_default()), + }, + commission_fee_in_pct: format_number( + Decimal::from_f64(commission).unwrap_or_default(), + ), + estimated_dex_fees_in_pct, + }; + + pool_pairs.push(swap_path_pool_pair); + } + + pool_pair_paths.push(pool_pairs); + } + + Ok(pool_pair_paths) +} + +pub async fn compute_return_less_dex_fees_in_destination_token( + path: &Vec, + from_token_id: &String, +) -> Result { + let mut estimated_return_less_dex_fees = dec!(1); + let mut estimated_return = dec!(1); + + let mut from_token_id = from_token_id.to_owned(); + let mut price_ratio; + let mut from_token_fee_pct; + let mut to_token_fee_pct; + + for pool in path { + if from_token_id == pool.token_a.id { + pool.token_b.id.clone_into(&mut from_token_id); + price_ratio = Decimal::from_str(pool.price_ratio.ba.as_str())?; + (from_token_fee_pct, to_token_fee_pct) = + if let Some(estimated_dex_fees_in_pct) = &pool.estimated_dex_fees_in_pct { + let ba = Decimal::from_str(estimated_dex_fees_in_pct.ba.as_str())?; + let ab = Decimal::from_str(estimated_dex_fees_in_pct.ab.as_str())?; + (Some(ba), Some(ab)) + } else { + (None, None) + }; + } else { + pool.token_a.id.clone_into(&mut from_token_id); + price_ratio = Decimal::from_str(pool.price_ratio.ab.as_str())?; + (from_token_fee_pct, to_token_fee_pct) = + if let Some(estimated_dex_fees_in_pct) = &pool.estimated_dex_fees_in_pct { + let ab = Decimal::from_str(estimated_dex_fees_in_pct.ab.as_str())?; + let ba = Decimal::from_str(estimated_dex_fees_in_pct.ba.as_str())?; + (Some(ab), Some(ba)) + } else { + (None, None) + }; + }; + + estimated_return = estimated_return + .checked_mul(price_ratio) + .context(ArithmeticOverflowSnafu)?; + + // less commission fee + let commission_fee_in_pct = Decimal::from_str(pool.commission_fee_in_pct.as_str())?; + let commission_fee = estimated_return_less_dex_fees + .checked_mul(commission_fee_in_pct) + .context(ArithmeticOverflowSnafu)?; + estimated_return_less_dex_fees = estimated_return_less_dex_fees + .checked_sub(commission_fee) + .context(ArithmeticUnderflowSnafu)?; + + // less dex fee from_token + let from_token_estimated_dex_fee = from_token_fee_pct + .unwrap_or_default() + .checked_mul(estimated_return_less_dex_fees) + .context(ArithmeticOverflowSnafu)?; + + estimated_return_less_dex_fees = estimated_return_less_dex_fees + .checked_sub(from_token_estimated_dex_fee) + .context(ArithmeticUnderflowSnafu)?; + + // convert to to_token + let from_token_estimated_return_less_dex_fee = estimated_return_less_dex_fees + .checked_mul(price_ratio) + .context(ArithmeticOverflowSnafu)?; + let to_token_estimated_dex_fee = to_token_fee_pct + .unwrap_or_default() + .checked_mul(from_token_estimated_return_less_dex_fee) + .context(ArithmeticOverflowSnafu)?; + + // less dex fee to_token + estimated_return_less_dex_fees = from_token_estimated_return_less_dex_fee + .checked_sub(to_token_estimated_dex_fee) + .context(ArithmeticUnderflowSnafu)?; + } + + Ok(EstimatedLessDexFeeInfo { + estimated_return, + estimated_return_less_dex_fees, + }) +} + +pub async fn sync_token_graph(ctx: &Arc) -> Result<()> { + let mut interval = tokio::time::interval(Duration::from_secs(120)); + + loop { + let pools = list_pool_pairs_cached(ctx, None, None).await?; + + // addTokensAndConnectionsToGraph + for (k, v) in pools.0 { + // isPoolPairIgnored + if !v.status { + continue; + } + // skip mainnet BURN-DFI pool + if ctx.network == Network::Mainnet && k == "48" { + continue; + } + let id_token_a = v.id_token_a.parse::()?; + let id_token_b = v.id_token_b.parse::()?; + let graph = &ctx.services.token_graph; + if !graph.lock().contains_node(id_token_a) { + graph.lock().add_node(id_token_a); + } + if !graph.lock().contains_node(id_token_b) { + graph.lock().add_node(id_token_b); + } + if !graph.lock().contains_edge(id_token_a, id_token_b) { + graph.lock().add_edge(id_token_a, id_token_b, k); + } + } + + // wait 120s + interval.tick().await; + } // end of loop +} + +pub async fn sync_token_graph_if_empty(ctx: &Arc) -> Result<()> { + if ctx.services.token_graph.lock().edge_count() == 0 { + let ctx_cloned = ctx.clone(); + tokio::spawn(async move { sync_token_graph(&ctx_cloned).await }); + return Ok(()); + }; + Ok(()) +} diff --git a/lib/ain-ocean/src/api/pool_pair/price.rs b/lib/ain-ocean/src/api/pool_pair/price.rs new file mode 100644 index 00000000000..e2e70d87e46 --- /dev/null +++ b/lib/ain-ocean/src/api/pool_pair/price.rs @@ -0,0 +1,92 @@ +use std::{collections::HashMap, sync::Arc}; + +use defichain_rpc::json::token::TokenInfo; +use rust_decimal::Decimal; +use serde::Serialize; +use snafu::OptionExt; + +use super::{path::get_best_path, AppContext}; +use crate::{ + api::{ + cache::{get_token_cached, list_tokens_cached}, + common::parse_display_symbol, + }, + error::{Error, NotFoundKind, NotFoundSnafu}, + Result, TokenIdentifier, +}; + +#[derive(Clone, Debug, Serialize, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct DexPrice { + pub token: TokenIdentifier, + #[serde(with = "rust_decimal::serde::str")] + pub denomination_price: Decimal, +} + +#[derive(Clone, Debug, Serialize, Eq, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct DexPriceResponse { + pub denomination: TokenIdentifier, + pub dex_prices: HashMap, +} + +fn is_untradable_token(token: &TokenInfo) -> bool { + token.is_lps || !token.is_dat || token.symbol == *"BURN" || !token.tradeable +} + +pub async fn list_dex_prices(ctx: &Arc, symbol: String) -> Result { + let (denomination_token_id, denomination_token_info) = get_token_cached(ctx, &symbol) + .await? + .context(NotFoundSnafu { + kind: NotFoundKind::Token { id: symbol }, + })?; + + if is_untradable_token(&denomination_token_info) { + return Err(Error::Other { + msg: format!( + "Token \"{}\" is invalid as it is not tradeable", + denomination_token_info.symbol + ), + }); + }; + + let tokens = list_tokens_cached(ctx) + .await? + .0 + .into_iter() + .filter(|(_, info)| !is_untradable_token(info)) + .collect::>(); + + let mut dex_prices = HashMap::::new(); + + // For every token available, compute estimated return in denomination token + for (id, info) in tokens { + if id == denomination_token_id { + continue; + } + let best_path = get_best_path(ctx, &id, &denomination_token_id).await?; + + dex_prices.insert( + info.clone().symbol, + DexPrice { + token: TokenIdentifier { + id, + display_symbol: parse_display_symbol(&info), + name: info.name, + symbol: info.symbol, + }, + denomination_price: best_path.estimated_return, + }, + ); + } + + Ok(DexPriceResponse { + denomination: TokenIdentifier { + id: denomination_token_id, + display_symbol: parse_display_symbol(&denomination_token_info), + name: denomination_token_info.name, + symbol: denomination_token_info.symbol, + }, + dex_prices, + }) +} diff --git a/lib/ain-ocean/src/api/pool_pair/service.rs b/lib/ain-ocean/src/api/pool_pair/service.rs new file mode 100644 index 00000000000..9042eaaa701 --- /dev/null +++ b/lib/ain-ocean/src/api/pool_pair/service.rs @@ -0,0 +1,749 @@ +use std::{collections::HashMap, str::FromStr, sync::Arc}; + +use ain_dftx::{deserialize, pool::CompositeSwap, DfTx, Stack}; +use bitcoin::Txid; +use cached::proc_macro::cached; +use defichain_rpc::{json::poolpair::PoolPairInfo, BlockchainRPC}; +use rust_decimal::{prelude::FromPrimitive, Decimal}; +use rust_decimal_macros::dec; +use serde::{Deserialize, Serialize}; +use snafu::OptionExt; + +use super::{AppContext, PoolPairAprResponse}; +use crate::{ + api::{ + cache::{get_gov_cached, get_pool_pair_cached, get_token_cached}, + common::{from_script, parse_amount, parse_display_symbol, parse_pool_pair_symbol}, + pool_pair::path::{get_best_path, BestSwapPathResponse}, + }, + error::{ + ArithmeticOverflowSnafu, ArithmeticUnderflowSnafu, DecimalConversionSnafu, Error, + NotFoundKind, OtherSnafu, + }, + indexer::PoolSwapAggregatedInterval, + model::{PoolSwap, PoolSwapAggregatedAggregated}, + storage::{RepositoryOps, SecondaryIndex, SortOrder}, + Result, +}; + +#[allow(clippy::upper_case_acronyms)] +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum SwapType { + BUY, + SELL, +} + +#[derive(Serialize, Debug, Clone, Default)] +pub struct PoolPairVolumeResponse { + pub d30: Decimal, + pub h24: Decimal, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PoolSwapFromToData { + pub address: String, + pub amount: String, + pub symbol: String, + pub display_symbol: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PoolSwapFromTo { + pub from: Option, + pub to: Option, +} + +#[cached( + result = true, + time = 600, + key = "String", + sync_writes = true, + convert = r#"{ format!("getusdperdfi") }"# +)] +pub async fn get_usd_per_dfi(ctx: &Arc) -> Result { + let mut usdt = get_pool_pair_cached(ctx, "USDT-DFI".to_string()).await?; + if usdt.is_none() { + usdt = get_pool_pair_cached(ctx, "DFI-USDT".to_string()).await?; + } + + let mut usdc = get_pool_pair_cached(ctx, "USDC-DFI".to_string()).await?; + if usdc.is_none() { + usdc = get_pool_pair_cached(ctx, "DFI-USDC".to_string()).await?; + } + + let mut total_usd = dec!(0); + let mut total_dfi = dec!(0); + + fn add( + p: PoolPairInfo, + mut total_usd: Decimal, + mut total_dfi: Decimal, + ) -> Result<(Decimal, Decimal)> { + let reserve_a = Decimal::from_f64(p.reserve_a).unwrap_or_default(); + let reserve_b = Decimal::from_f64(p.reserve_b).unwrap_or_default(); + if p.id_token_a == "0" { + total_usd = total_usd + .checked_add(reserve_b) + .context(ArithmeticOverflowSnafu)?; + total_dfi = total_dfi + .checked_add(reserve_a) + .context(ArithmeticOverflowSnafu)?; + } else if p.id_token_b == "0" { + total_usd = total_usd + .checked_add(reserve_a) + .context(ArithmeticOverflowSnafu)?; + total_dfi = total_dfi + .checked_add(reserve_b) + .context(ArithmeticOverflowSnafu)?; + } + Ok((total_usd, total_dfi)) + } + + if let Some((_, usdt)) = usdt { + (total_usd, total_dfi) = add(usdt, total_usd, total_dfi)?; + }; + + if let Some((_, usdc)) = usdc { + (total_usd, total_dfi) = add(usdc, total_usd, total_dfi)?; + }; + + if !total_usd.is_zero() { + let res = total_usd + .checked_div(total_dfi) + .context(ArithmeticUnderflowSnafu)?; + return Ok(res); + }; + + Ok(dec!(0)) +} + +async fn get_total_liquidity_usd_by_best_path( + ctx: &Arc, + p: &PoolPairInfo, +) -> Result { + let token = ain_cpp_imports::get_dst_token("USDT".to_string()); + if token.is_null() { + return Ok(dec!(0)); + } + let usdt_id = token.id.to_string(); + + let mut a_token_rate = dec!(1); + let mut b_token_rate = dec!(1); + + if p.id_token_a != usdt_id { + let BestSwapPathResponse { + estimated_return, .. + } = get_best_path(ctx, &p.id_token_a, &usdt_id).await?; + a_token_rate = estimated_return; + } + + if p.id_token_a != usdt_id { + let BestSwapPathResponse { + estimated_return, .. + } = get_best_path(ctx, &p.id_token_b, &usdt_id).await?; + b_token_rate = estimated_return; + } + + let reserve_a = Decimal::from_f64(p.reserve_a).unwrap_or_default(); + let reserve_b = Decimal::from_f64(p.reserve_b).unwrap_or_default(); + + let a = a_token_rate + .checked_mul(reserve_a) + .context(ArithmeticOverflowSnafu)?; + + let b = b_token_rate + .checked_mul(reserve_b) + .context(ArithmeticOverflowSnafu)?; + + let res = a.checked_add(b).context(ArithmeticOverflowSnafu)?; + + Ok(res) +} + +pub async fn get_total_liquidity_usd(ctx: &Arc, p: &PoolPairInfo) -> Result { + let (a, b) = parse_pool_pair_symbol(&p.symbol)?; + + let reserve_a = Decimal::from_f64(p.reserve_a).unwrap_or_default(); + let reserve_b = Decimal::from_f64(p.reserve_b).unwrap_or_default(); + + if ["DUSD", "USDT", "USDC"].contains(&a.as_str()) { + return reserve_a + .checked_mul(dec!(2)) + .context(ArithmeticOverflowSnafu); + }; + + if ["DUSD", "USDT", "USDC"].contains(&b.as_str()) { + return reserve_b + .checked_mul(dec!(2)) + .context(ArithmeticOverflowSnafu); + }; + + let usdt_per_dfi = get_usd_per_dfi(ctx).await?; + if usdt_per_dfi.is_zero() { + return Ok(usdt_per_dfi); + }; + + if a == "DFI" { + return reserve_a + .checked_mul(dec!(2)) + .context(ArithmeticOverflowSnafu)? + .checked_mul(usdt_per_dfi) + .context(ArithmeticOverflowSnafu); + }; + + if b == "DFI" { + return reserve_b + .checked_mul(dec!(2)) + .context(ArithmeticOverflowSnafu)? + .checked_mul(usdt_per_dfi) + .context(ArithmeticOverflowSnafu); + }; + + let res = get_total_liquidity_usd_by_best_path(ctx, p).await?; + Ok(res) +} + +fn calculate_rewards(accounts: &[String], dfi_price_usdt: Decimal) -> Result { + let rewards = accounts.iter().try_fold(dec!(0), |accumulate, account| { + let (amount, token) = parse_amount(account)?; + + if token != "0" && token != "DFI" { + return Ok(accumulate); + } + + let yearly = Decimal::from_str(&amount)? + .checked_mul(dec!(2880)) + .and_then(|v| v.checked_mul(dec!(365))) + .and_then(|v| v.checked_mul(dfi_price_usdt)) + .context(ArithmeticOverflowSnafu)?; + accumulate + .checked_add(yearly) + .context(ArithmeticOverflowSnafu) + })?; + Ok(rewards) +} + +async fn get_yearly_custom_reward_usd(ctx: &Arc, p: &PoolPairInfo) -> Result { + if p.custom_rewards.is_none() { + return Ok(dec!(0)); + }; + + let dfi_price_usdt = get_usd_per_dfi(ctx).await?; + if dfi_price_usdt.is_zero() { + return Ok(dfi_price_usdt); + }; + + p.custom_rewards.as_ref().map_or(Ok(dec!(0)), |rewards| { + calculate_rewards(rewards, dfi_price_usdt) + }) +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("getdailydfireward") }"# +)] +async fn get_daily_dfi_reward(ctx: &Arc) -> Result { + let gov = get_gov_cached(ctx, "LP_DAILY_DFI_REWARD".to_string()).await?; + + let reward = gov + .get("LP_DAILY_DFI_REWARD") + .and_then(serde_json::Value::as_f64) // eg: { "LP_DAILY_DFI_REWARD": 3664.80000000 } + .unwrap_or_default(); + + let daily_dfi_reward = Decimal::from_f64(reward).context(DecimalConversionSnafu)?; + Ok(daily_dfi_reward) +} + +async fn get_loan_token_splits(ctx: &Arc) -> Result> { + let splits = get_gov_cached(ctx, "LP_LOAN_TOKEN_SPLITS".to_string()) + .await? + .get("LP_LOAN_TOKEN_SPLITS") + .cloned(); + Ok(splits) +} + +async fn get_yearly_reward_pct_usd(ctx: &Arc, p: &PoolPairInfo) -> Result { + let dfi_price_usd = get_usd_per_dfi(ctx).await?; + let daily_dfi_reward = get_daily_dfi_reward(ctx).await?; + + let reward_pct = Decimal::from_f64(p.reward_pct).unwrap_or_default(); + reward_pct + .checked_mul(daily_dfi_reward) + .context(ArithmeticOverflowSnafu)? + .checked_mul(dec!(365)) + .context(ArithmeticOverflowSnafu)? + .checked_mul(dfi_price_usd) + .context(ArithmeticOverflowSnafu) +} + +async fn get_block_subsidy(eunos_height: u32, height: u32) -> Result { + let eunos_height = Decimal::from_u32(eunos_height).context(DecimalConversionSnafu)?; + let height = Decimal::from_u32(height).context(DecimalConversionSnafu)?; + let mut block_subsidy = dec!(405.04); + + if height >= eunos_height { + let reduction_amount = dec!(0.01658); // 1.658% + let mut reductions = height + .checked_sub(eunos_height) + .context(ArithmeticUnderflowSnafu)? + .checked_div(dec!(32690)) + .context(ArithmeticUnderflowSnafu)? + .floor(); + + while reductions >= dec!(0) { + let amount = reduction_amount + .checked_mul(block_subsidy) + .context(ArithmeticOverflowSnafu)?; + if amount <= dec!(0.00001) { + return Ok(dec!(0)); + }; + block_subsidy = block_subsidy + .checked_sub(amount) + .context(ArithmeticUnderflowSnafu)?; + reductions = reductions + .checked_sub(dec!(1)) + .context(ArithmeticUnderflowSnafu)?; + } + }; + + Ok(block_subsidy) +} + +#[cached( + result = true, + time = 600, + key = "String", + sync_writes = true, + convert = r#"{ format!("getloanemission") }"# +)] +async fn get_loan_emission(ctx: &Arc) -> Result { + let info = ctx.client.get_blockchain_info().await?; + let eunos_height = info + .softforks + .get("eunos") + .and_then(|eunos| eunos.height) + .context(OtherSnafu { + msg: "BlockchainInfo eunos height field is missing", + })?; + + get_block_subsidy(eunos_height, info.blocks).await +} + +async fn get_yearly_reward_loan_usd(ctx: &Arc, id: &String) -> Result { + let splits = get_loan_token_splits(ctx).await?; + let Some(value) = splits else { + return Ok(dec!(0)); + }; + let split = value + .as_object() + .and_then(|obj| obj.get(id)) + .and_then(serde_json::Value::as_f64) + .unwrap_or_default(); + if split == 0.0 { + return Ok(dec!(0)); + } + let split = Decimal::from_f64(split).context(DecimalConversionSnafu)?; + + let dfi_price_usd = get_usd_per_dfi(ctx).await?; + + let loan_emission = get_loan_emission(ctx).await?; + + loan_emission + .checked_mul(split) // 60 * 60 * 24 / 30, 30 seconds = 1 block + .context(ArithmeticOverflowSnafu)? + .checked_mul(dec!(2880)) // 60 * 60 * 24 / 30, 30 seconds = 1 block + .context(ArithmeticOverflowSnafu)? + .checked_mul(dec!(365)) // 60 * 60 * 24 / 30, 30 seconds = 1 block + .context(ArithmeticOverflowSnafu)? + .checked_mul(dfi_price_usd) // 60 * 60 * 24 / 30, 30 seconds = 1 block + .context(ArithmeticOverflowSnafu) +} + +async fn gather_amount( + ctx: &Arc, + pool_id: u32, + interval: u32, + count: usize, +) -> Result { + let repository = &ctx.services.pool_swap_aggregated; + + let swaps = repository + .by_key + .list(Some((pool_id, interval, i64::MAX)), SortOrder::Descending)? + .take(count) + .take_while(|item| match item { + Ok((k, _)) => k.0 == pool_id && k.1 == interval, + _ => true, + }) + .map(|e| repository.by_key.retrieve_primary_value(e)) + .collect::>>()?; + + let mut aggregated = HashMap::::new(); + + for swap in swaps { + let token_ids = swap.aggregated.amounts.keys(); + for token_id in token_ids { + let from_amount = swap + .aggregated + .amounts + .get(token_id) + .map(|amt| Decimal::from_str(amt)) + .transpose()? + .unwrap_or(dec!(0)); + + let amount = if let Some(amount) = aggregated.get(token_id) { + amount + .checked_add(from_amount) + .context(ArithmeticOverflowSnafu)? + } else { + from_amount + }; + + aggregated.insert(*token_id, amount); + } + } + + let mut volume = dec!(0); + + for token_id in aggregated.keys() { + let token_price = get_token_usd_value(ctx, token_id).await?; + let amount = aggregated.get(token_id).copied().unwrap_or(dec!(0)); + volume = volume + .checked_add( + token_price + .checked_mul(amount) + .context(ArithmeticOverflowSnafu)?, + ) + .context(ArithmeticOverflowSnafu)?; + } + + Ok(volume) +} + +#[cached( + result = true, + time = 900, // 15 mins + key = "String", + convert = r#"{ format!("getusdvolume{id}") }"# +)] +pub async fn get_usd_volume(ctx: &Arc, id: &str) -> Result { + let pool_id = id.parse::()?; + Ok(PoolPairVolumeResponse { + h24: gather_amount(ctx, pool_id, PoolSwapAggregatedInterval::OneHour as u32, 24).await?, + d30: gather_amount(ctx, pool_id, PoolSwapAggregatedInterval::OneDay as u32, 30).await?, + }) +} + +/// Estimate yearly commission rate by taking 24 hour commission x 365 days +async fn get_yearly_commission_estimate( + ctx: &Arc, + id: &str, + p: &PoolPairInfo, +) -> Result { + let volume = get_usd_volume(ctx, id).await?; + let commission = Decimal::from_f64(p.commission).unwrap_or_default(); + commission + .checked_mul(volume.h24) + .context(ArithmeticOverflowSnafu)? + .checked_mul(dec!(365)) + .context(ArithmeticOverflowSnafu) +} + +pub async fn get_apr( + ctx: &Arc, + id: &String, + p: &PoolPairInfo, +) -> Result { + let custom_usd = get_yearly_custom_reward_usd(ctx, p).await?; + let pct_usd = get_yearly_reward_pct_usd(ctx, p).await?; + let loan_usd = get_yearly_reward_loan_usd(ctx, id).await?; + let total_liquidity_usd = get_total_liquidity_usd(ctx, p).await?; + + if total_liquidity_usd.is_zero() { + return Ok(PoolPairAprResponse::default()); + } + + let yearly_usd = custom_usd + .checked_add(pct_usd) + .context(ArithmeticOverflowSnafu)? + .checked_add(loan_usd) + .context(ArithmeticOverflowSnafu)?; + + if yearly_usd.is_zero() { + return Ok(PoolPairAprResponse::default()); + }; + + // 1 == 100%, 0.1 = 10% + let reward = yearly_usd + .checked_div(total_liquidity_usd) + .context(ArithmeticUnderflowSnafu)?; + + let yearly_commission = get_yearly_commission_estimate(ctx, id, p).await?; + let commission = yearly_commission + .checked_div(total_liquidity_usd) + .context(ArithmeticUnderflowSnafu)?; + + let total = reward + .checked_add(commission) + .context(ArithmeticOverflowSnafu)?; + + Ok(PoolPairAprResponse { + total, + reward, + commission, + }) +} + +async fn get_pool_pair(ctx: &Arc, a: &str, b: &str) -> Result> { + let ab = get_pool_pair_cached(ctx, format!("{a}-{b}")).await?; + if let Some((_, info)) = ab { + Ok(Some(info)) + } else { + let ba = get_pool_pair_cached(ctx, format!("{b}-{a}")).await?; + if let Some((_, info)) = ba { + Ok(Some(info)) + } else { + Ok(None) + } + } +} + +#[cached( + result = true, + time = 300, // 5 mins + key = "String", + convert = r#"{ format!("gettokenusdvalue{token_id}") }"# +)] +async fn get_token_usd_value(ctx: &Arc, token_id: &u64) -> Result { + let info = ain_cpp_imports::get_dst_token(token_id.to_string()); + if info.is_null() { + return Err(Error::NotFound { + kind: NotFoundKind::Token { + id: token_id.to_string(), + }, + }); + } + + if ["DUSD", "USDT", "USDC"].contains(&info.symbol.as_str()) { + return Ok(dec!(1)); + }; + + let dusd_pool = get_pool_pair(ctx, &info.symbol, "DUSD").await?; + if let Some(p) = dusd_pool { + let (a, _) = parse_pool_pair_symbol(&p.symbol)?; + let reserve_a = Decimal::from_f64(p.reserve_a).context(DecimalConversionSnafu)?; + let reserve_b = Decimal::from_f64(p.reserve_b).context(DecimalConversionSnafu)?; + if a == "DUSD" { + return reserve_a + .checked_div(reserve_b) + .context(ArithmeticUnderflowSnafu); + }; + return reserve_b + .checked_div(reserve_a) + .context(ArithmeticUnderflowSnafu); + } + + let dfi_pool = get_pool_pair(ctx, &info.symbol, "DFI").await?; + if let Some(p) = dfi_pool { + let usd_per_dfi = get_usd_per_dfi(ctx).await?; + let reserve_a = Decimal::from_f64(p.reserve_a).context(DecimalConversionSnafu)?; + let reserve_b = Decimal::from_f64(p.reserve_b).context(DecimalConversionSnafu)?; + if p.id_token_a == *"0" { + return reserve_a + .checked_div(reserve_b) + .context(ArithmeticUnderflowSnafu)? + .checked_mul(usd_per_dfi) + .context(ArithmeticOverflowSnafu); + } + return reserve_b + .checked_div(reserve_a) + .context(ArithmeticUnderflowSnafu)? + .checked_mul(usd_per_dfi) + .context(ArithmeticOverflowSnafu); + } + + Ok(dec!(0)) +} + +pub async fn get_aggregated_in_usd( + ctx: &Arc, + aggregated: &PoolSwapAggregatedAggregated, +) -> Result { + let mut value = dec!(0); + + for (token_id, amount) in &aggregated.amounts { + let token_price = get_token_usd_value(ctx, token_id).await?; + let amount = Decimal::from_str(amount)?; + value = value + .checked_add(token_price) + .context(ArithmeticOverflowSnafu)? + .checked_mul(amount) + .context(ArithmeticOverflowSnafu)?; + } + + Ok(value) +} + +fn call_dftx(ctx: &Arc, txid: Txid) -> Result> { + let vout = ctx + .services + .transaction + .vout_by_id + .list(Some((txid, 0)), SortOrder::Ascending)? + .take(1) + .take_while(|item| match item { + Ok((_, vout)) => vout.txid == txid, + _ => true, + }) + .map(|item| { + let (_, v) = item?; + Ok(v) + }) + .collect::>>()?; + + if vout.is_empty() { + return Ok(None); + } + + let bytes = &vout[0].script.hex; + if bytes.len() > 6 && bytes[0] == 0x6a && bytes[1] <= 0x4e { + let offset = 1 + match bytes[1] { + 0x4c => 2, + 0x4d => 3, + 0x4e => 4, + _ => 1, + }; + + let raw_tx = &bytes[offset..]; + let dftx = match deserialize::(raw_tx) { + Ok(stack) => stack.dftx, + Err(e) => return Err(e.into()), + }; + return Ok(Some(dftx)); + }; + + Ok(None) +} + +fn find_composite_swap_dftx(ctx: &Arc, txid: Txid) -> Result> { + let Some(dftx) = call_dftx(ctx, txid)? else { + return Ok(None); + }; + + let composite_swap_dftx = match dftx { + DfTx::CompositeSwap(data) => Some(data), + _ => None, + }; + // let pool_swap_dftx = match dftx { + // DfTx::PoolSwap(data) => Some(data), + // DfTx::CompositeSwap(data) => Some(data.pool_swap), + // _ => None, + // };; + + Ok(composite_swap_dftx) +} + +pub async fn find_swap_from( + ctx: &Arc, + swap: &PoolSwap, +) -> Result> { + let PoolSwap { + from, + from_amount, + from_token_id, + .. + } = swap; + let from_address = from_script(from, ctx.network)?; + + let Some((_, from_token)) = get_token_cached(ctx, &from_token_id.to_string()).await? else { + return Ok(None); + }; + + Ok(Some(PoolSwapFromToData { + address: from_address, + amount: Decimal::new(from_amount.to_owned(), 8).to_string(), + display_symbol: parse_display_symbol(&from_token), + symbol: from_token.symbol, + })) +} + +pub async fn find_swap_to( + ctx: &Arc, + swap: &PoolSwap, +) -> Result> { + let PoolSwap { + to, + to_token_id, + to_amount, + .. + } = swap; + let to_address = from_script(to, ctx.network)?; + + let Some((_, to_token)) = get_token_cached(ctx, &to_token_id.to_string()).await? else { + return Ok(None); + }; + + let display_symbol = parse_display_symbol(&to_token); + + Ok(Some(PoolSwapFromToData { + address: to_address, + amount: Decimal::new(to_amount.to_owned(), 8).to_string(), + symbol: to_token.symbol, + display_symbol, + })) +} + +async fn get_pool_swap_type(ctx: &Arc, swap: &PoolSwap) -> Result> { + let Some((_, pool_pair_info)) = get_pool_pair_cached(ctx, swap.pool_id.to_string()).await? + else { + return Ok(None); + }; + + let id_token_a = pool_pair_info.id_token_a.parse::()?; + let swap_type = if id_token_a == swap.from_token_id { + SwapType::SELL + } else { + SwapType::BUY + }; + Ok(Some(swap_type)) +} + +pub async fn check_swap_type(ctx: &Arc, swap: &PoolSwap) -> Result> { + let Some(dftx) = find_composite_swap_dftx(ctx, swap.txid)? else { + return get_pool_swap_type(ctx, swap).await; + }; + + if dftx.pools.iter().count() <= 1 { + return get_pool_swap_type(ctx, swap).await; + } + + let mut prev = swap.from_token_id.to_string(); + for pool in dftx.pools.iter() { + let pool_id = pool.id.0.to_string(); + let Some((_, pool_pair_info)) = get_pool_pair_cached(ctx, pool_id.clone()).await? else { + break; + }; + + // if this is current pool pair, if previous token is primary token, indicator = sell + if pool_id == swap.pool_id.to_string() { + let swap_type = if pool_pair_info.id_token_a == prev { + SwapType::SELL + } else { + SwapType::BUY + }; + return Ok(Some(swap_type)); + } + // set previous token as pair swapped out token + prev = if prev == pool_pair_info.id_token_a { + pool_pair_info.id_token_b + } else { + pool_pair_info.id_token_a + } + } + + Ok(None) +} diff --git a/lib/ain-ocean/src/api/prices.rs b/lib/ain-ocean/src/api/prices.rs new file mode 100644 index 00000000000..43340419afe --- /dev/null +++ b/lib/ain-ocean/src/api/prices.rs @@ -0,0 +1,460 @@ +use std::sync::Arc; + +use ain_dftx::{Currency, Token, Weightage, COIN}; +use ain_macros::ocean_endpoint; +use axum::{ + extract::{Path, Query}, + routing::get, + Extension, Router, +}; +use bitcoin::{hashes::Hash, Txid}; +use indexmap::IndexSet; +use rust_decimal::{prelude::ToPrimitive, Decimal}; +use serde::{Deserialize, Serialize}; +use snafu::OptionExt; + +use super::{ + common::parse_token_currency, + oracle::OraclePriceFeedResponse, + query::PaginationQuery, + response::{ApiPagedResponse, Response}, + AppContext, +}; +use crate::{ + error::{ApiError, Error, OtherSnafu}, + model::{ + BlockContext, OracleIntervalSeconds, OraclePriceActive, + OraclePriceAggregatedIntervalAggregated, PriceTicker, + }, + storage::{RepositoryOps, SortOrder}, + Result, +}; + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceAggregatedResponse { + pub id: String, + pub key: String, + pub sort: String, + pub token: Token, + pub currency: Currency, + pub aggregated: OraclePriceAggregatedAggregatedResponse, + pub block: BlockContext, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceAggregatedAggregatedResponse { + pub amount: String, + pub weightage: i32, + pub oracles: OraclePriceActiveNextOraclesResponse, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PriceTickerResponse { + pub id: String, //token-currency + pub sort: String, //count-height-token-currency + pub price: OraclePriceAggregatedResponse, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceActiveNextOraclesResponse { + pub active: i32, + pub total: i32, +} + +impl From<((Token, Currency), PriceTicker)> for PriceTickerResponse { + fn from(ticker: ((Token, Currency), PriceTicker)) -> Self { + let token = ticker.0 .0; + let currency = ticker.0 .1; + let price_ticker = ticker.1; + let amount = price_ticker.price.aggregated.amount / Decimal::from(COIN); + Self { + id: format!("{}-{}", token, currency), + sort: format!( + "{}{}{}-{}", + hex::encode(price_ticker.price.aggregated.oracles.total.to_be_bytes()), + hex::encode(price_ticker.price.block.height.to_be_bytes()), + token.clone(), + currency.clone(), + ), + price: OraclePriceAggregatedResponse { + id: format!("{}-{}-{}", token, currency, price_ticker.price.block.height), + key: format!("{}-{}", token, currency), + sort: format!( + "{}{}", + hex::encode(price_ticker.price.block.median_time.to_be_bytes()), + hex::encode(price_ticker.price.block.height.to_be_bytes()), + ), + token, + currency, + aggregated: OraclePriceAggregatedAggregatedResponse { + amount: format!("{:.8}", amount), + weightage: price_ticker + .price + .aggregated + .weightage + .to_i32() + .unwrap_or_default(), + oracles: OraclePriceActiveNextOraclesResponse { + active: price_ticker + .price + .aggregated + .oracles + .active + .to_i32() + .unwrap_or_default(), + total: price_ticker.price.aggregated.oracles.total, + }, + }, + block: price_ticker.price.block, + }, + } + } +} + +#[ocean_endpoint] +async fn list_prices( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let sorted_ids = ctx + .services + .price_ticker + .by_key + .list(None, SortOrder::Descending)? + .map(|item| { + let (_, id) = item?; + Ok(id) + }) + .collect::>>()?; + + // use IndexSet to rm dup without changing order + let mut sorted_ids_set = IndexSet::new(); + for id in sorted_ids { + sorted_ids_set.insert(id); + } + + let prices = sorted_ids_set + .into_iter() + .take(query.size) + .map(|id| { + let price_ticker = ctx + .services + .price_ticker + .by_id + .get(&id)? + .context(OtherSnafu { + msg: "Missing price ticker index", + })?; + + Ok(PriceTickerResponse::from((id, price_ticker))) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(prices, query.size, |price| { + price.sort.to_string() + })) +} + +#[ocean_endpoint] +async fn get_price( + Path(key): Path, + Extension(ctx): Extension>, +) -> Result>> { + let (token, currency) = parse_token_currency(&key)?; + + let price_ticker = ctx + .services + .price_ticker + .by_id + .get(&(token.clone(), currency.clone()))?; + + let Some(price_ticker) = price_ticker else { + return Ok(Response::new(None)); + }; + + let res = PriceTickerResponse::from(((token, currency), price_ticker)); + + Ok(Response::new(Some(res))) +} + +#[ocean_endpoint] +async fn get_feed( + Path(key): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let (token, currency) = parse_token_currency(&key)?; + + let repo = &ctx.services.oracle_price_aggregated; + let id = (token.to_string(), currency.to_string(), u32::MAX); + let oracle_aggregated = repo + .by_id + .list(Some(id), SortOrder::Descending)? + .take(query.size) + .take_while(|item| match item { + Ok((k, _)) => k.0 == token.clone() && k.1 == currency.clone(), + _ => true, + }) + .map(|item| { + let (k, v) = item?; + let res = OraclePriceAggregatedResponse { + id: format!("{}-{}-{}", k.0, k.1, k.2), + key: format!("{}-{}", k.0, k.1), + sort: format!( + "{}{}", + hex::encode(v.block.median_time.to_be_bytes()), + hex::encode(v.block.height.to_be_bytes()), + ), + token: token.clone(), + currency: currency.clone(), + aggregated: OraclePriceAggregatedAggregatedResponse { + amount: format!("{:.8}", v.aggregated.amount), + weightage: v.aggregated.weightage.to_i32().unwrap_or_default(), + oracles: OraclePriceActiveNextOraclesResponse { + active: v.aggregated.oracles.active.to_i32().unwrap_or_default(), + total: v.aggregated.oracles.total, + }, + }, + block: v.block, + }; + Ok(res) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of( + oracle_aggregated, + query.size, + |aggregated| aggregated.sort.clone(), + )) +} + +#[ocean_endpoint] +async fn get_feed_active( + Path(key): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let (token, currency) = parse_token_currency(&key)?; + + let key = (token, currency); + let repo = &ctx.services.oracle_price_active; + let price_active = ctx + .services + .oracle_price_active + .by_key + .list(Some(key), SortOrder::Descending)? + .take(query.size) + .flat_map(|item| { + let (_, id) = item?; + let item = repo.by_id.get(&id)?; + Ok::, Error>(item) + }) + .flatten() + .collect::>(); + + Ok(ApiPagedResponse::of(price_active, query.size, |price| { + price.sort.to_string() + })) +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceAggregatedIntervalResponse { + pub id: String, + pub key: String, + pub sort: String, + pub token: Token, + pub currency: Currency, + pub aggregated: OraclePriceAggregatedIntervalAggregated, + pub block: BlockContext, + /** + * Aggregated interval time range in seconds. + * - Interval that aggregated in seconds + * - Start Time Inclusive + * - End Time Exclusive + */ + time: OraclePriceAggregatedIntervalTime, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceAggregatedIntervalTime { + interval: i64, + start: i64, + end: i64, +} + +#[ocean_endpoint] +async fn get_feed_with_interval( + Path((key, interval)): Path<(String, String)>, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let (token, currency) = parse_token_currency(&key)?; + let interval = interval.parse::()?; + + let interval_type = match interval { + 900 => OracleIntervalSeconds::FifteenMinutes, + 3600 => OracleIntervalSeconds::OneHour, + 86400 => OracleIntervalSeconds::OneDay, + _ => return Err(From::from("Invalid oracle interval")), + }; + let key = (token, currency, interval_type); + let repo = &ctx.services.oracle_price_aggregated_interval; + + let keys = repo + .by_key + .list(Some(key), SortOrder::Descending)? + .take(query.size) + .flatten() + .collect::>(); + + let mut prices = Vec::new(); + for ((token, currency, _), id) in keys { + let item = repo.by_id.get(&id)?; + + let Some(item) = item else { continue }; + + let start = item.block.median_time - (item.block.median_time % interval); + + let price = OraclePriceAggregatedIntervalResponse { + id: format!("{}-{}-{:?}", id.0, id.1, id.2), + key: format!("{}-{}", id.0, id.1), + sort: format!( + "{}{}", + hex::encode(item.block.median_time.to_be_bytes()), + hex::encode(item.block.height.to_be_bytes()), + ), + token, + currency, + aggregated: OraclePriceAggregatedIntervalAggregated { + amount: item.aggregated.amount, + weightage: item.aggregated.weightage, + oracles: item.aggregated.oracles, + count: item.aggregated.count, + }, + block: item.block, + time: OraclePriceAggregatedIntervalTime { + interval, + start, + end: start + interval, + }, + }; + prices.push(price); + } + + Ok(ApiPagedResponse::of(prices, query.size, |item| { + item.sort.clone() + })) +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PriceOracleResponse { + pub id: String, + pub key: String, + pub token: Token, + pub currency: Currency, + pub oracle_id: String, + pub weightage: Weightage, + pub feed: Option, + pub block: BlockContext, +} + +#[ocean_endpoint] +async fn list_price_oracles( + Path(key): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let (token, currency) = parse_token_currency(&key)?; + + let id = ( + token.clone(), + currency.clone(), + Txid::from_byte_array([0xffu8; 32]), + ); + let token_currencies = ctx + .services + .oracle_token_currency + .by_id + .list(Some(id.clone()), SortOrder::Descending)? + .take(query.size) + .take_while(|item| match item { + Ok((k, _)) => k.0 == id.0 && k.1 == id.1, + _ => true, + }) + .flatten() + .collect::>(); + + let mut prices = Vec::new(); + for ((t, c, oracle_id), token_currency) in token_currencies { + let feed = ctx + .services + .oracle_price_feed + .by_id + .list( + Some(( + token.clone(), + currency.clone(), + oracle_id, + Txid::from_byte_array([0xffu8; 32]), + )), + SortOrder::Descending, + )? + // .take(1) + .take_while(|item| match item { + Ok((k, _)) => k.0 == token && k.1 == currency && k.2 == oracle_id, + _ => true, + }) + .next() + .transpose()?; + + prices.push(PriceOracleResponse { + id: format!("{}-{}-{}", t, c, oracle_id), + key: format!("{}-{}", t, c), + token: t, + currency: c, + oracle_id: oracle_id.to_string(), + weightage: token_currency.weightage, + block: token_currency.block, + feed: feed.map(|(id, f)| { + let token = id.0; + let currency = id.1; + let oracle_id = id.2; + let txid = id.3; + OraclePriceFeedResponse { + id: format!("{}-{}-{}-{}", token, currency, oracle_id, txid), + key: format!("{}-{}-{}", token, currency, oracle_id), + sort: hex::encode(f.block.height.to_string() + &f.txid.to_string()), + token: token.clone(), + currency: currency.clone(), + oracle_id, + txid: f.txid, + time: f.time, + amount: f.amount.to_string(), + block: f.block, + } + }), + }); + } + + Ok(ApiPagedResponse::of(prices, query.size, |price| { + price.oracle_id.to_string() + })) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/", get(list_prices)) + .route("/:key", get(get_price)) + .route("/:key/feed/active", get(get_feed_active)) + .route("/:key/feed", get(get_feed)) + .route("/:key/feed/interval/:interval", get(get_feed_with_interval)) + .route("/:key/oracles", get(list_price_oracles)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/query.rs b/lib/ain-ocean/src/api/query.rs new file mode 100644 index 00000000000..f6f63a19f36 --- /dev/null +++ b/lib/ain-ocean/src/api/query.rs @@ -0,0 +1,75 @@ +use axum::{ + async_trait, + extract::FromRequestParts, + http::{request::Parts, StatusCode}, +}; +use serde::{ + de::{DeserializeOwned, Deserializer}, + Deserialize, +}; +use serde_with::{serde_as, DisplayFromStr}; + +use crate::error::ApiError; + +const DEFAUT_PAGINATION_SIZE: usize = 30; + +pub fn default_pagination_size() -> usize { + DEFAUT_PAGINATION_SIZE +} + +#[serde_as] +#[derive(Deserialize, Debug)] +pub struct PaginationQuery { + #[serde_as(as = "DisplayFromStr")] + #[serde(default = "default_pagination_size")] + pub size: usize, + #[serde(default)] + #[serde(deserialize_with = "undefined_to_none")] + pub next: Option, +} + +impl Default for PaginationQuery { + fn default() -> Self { + Self { + size: DEFAUT_PAGINATION_SIZE, + next: None, + } + } +} + +fn undefined_to_none<'de, D>(d: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let v: Option = Deserialize::deserialize(d)?; + match v { + Some(v) if v.as_str() != "undefined" => Ok(Some(v)), + _ => Ok(None), + } +} + +pub struct Query(pub T); + +#[async_trait] +impl FromRequestParts for Query +where + T: Default + DeserializeOwned, + S: Send + Sync, +{ + type Rejection = ApiError; + + async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { + let query = parts.uri.query().unwrap_or_default(); + if query.is_empty() { + return Ok(Self(T::default())); + } + match serde_urlencoded::from_str(query) { + Ok(v) => Ok(Self(v)), + Err(e) => Err(ApiError::new( + StatusCode::BAD_REQUEST, + format!("Invalid query parameter value for {query}. {e}"), + parts.uri.to_string(), + )), + } + } +} diff --git a/lib/ain-ocean/src/api/rawtx.rs b/lib/ain-ocean/src/api/rawtx.rs new file mode 100644 index 00000000000..002f2f9538a --- /dev/null +++ b/lib/ain-ocean/src/api/rawtx.rs @@ -0,0 +1,221 @@ +use std::{result::Result as StdResult, str::FromStr, sync::Arc}; + +use ain_dftx::{deserialize, DfTx, COIN}; +use ain_macros::ocean_endpoint; +use axum::{ + extract::{Json, Path}, + routing::{get, post}, + Extension, Router, +}; +use bitcoin::{Transaction, Txid}; +use defichain_rpc::{PoolPairRPC, RpcApi}; +use log::trace; +use rust_decimal::prelude::ToPrimitive; +use serde::{Deserialize, Serialize, Serializer}; +use snafu::location; + +use super::{query::Query, response::Response, AppContext}; +use crate::{ + error::{ApiError, NotFoundKind}, + model::{default_max_fee_rate, MempoolAcceptResult, RawTransactionResult, RawTxDto}, + Error, Result, +}; + +enum TransactionResponse { + HexString(String), + TransactionDetails(Box), +} + +#[derive(Deserialize, Default)] +struct QueryParams { + verbose: bool, +} + +#[ocean_endpoint] +async fn send_raw_tx( + Extension(ctx): Extension>, + Json(raw_tx_dto): Json, +) -> Result { + validate(ctx.clone(), raw_tx_dto.hex.clone()).await?; + let max_fee = match raw_tx_dto.max_fee_rate { + Some(fee_rate) => { + let fee_in_satoshis = fee_rate.checked_mul(COIN.into()); + match fee_in_satoshis { + Some(value) => Some(value.to_u64().unwrap_or_default()), + None => Some(default_max_fee_rate().to_sat()), + } + } + None => Some(default_max_fee_rate().to_sat()), + }; + match ctx + .client + .send_raw_transaction(raw_tx_dto.hex, max_fee) + .await + { + Ok(tx_hash) => Ok(tx_hash.to_string()), + Err(e) => { + if e.to_string().contains("TX decode failed") { + Err(Error::BadRequest { + msg: "Transaction decode failed".to_string(), + }) + } else { + Err(Error::RpcError { + error: e, + location: location!(), + }) + } + } + } +} +#[ocean_endpoint] +async fn test_raw_tx( + Extension(ctx): Extension>, + Json(raw_tx_dto): Json, +) -> Result>> { + let trx = defichain_rpc::RawTx::raw_hex(raw_tx_dto.hex); + let max_fee = match raw_tx_dto.max_fee_rate { + Some(fee_rate) => { + let fee_in_satoshis = fee_rate.checked_mul(COIN.into()); + match fee_in_satoshis { + Some(value) => Some(value.to_u64().unwrap_or_default()), + None => Some(default_max_fee_rate().to_sat()), + } + } + None => Some(default_max_fee_rate().to_sat()), + }; + match ctx.client.test_mempool_accept(&[trx], max_fee).await { + Ok(mempool_tx) => { + let results = mempool_tx + .into_iter() + .map(|tx_result| MempoolAcceptResult { + txid: tx_result.txid, + allowed: tx_result.allowed, + reject_reason: tx_result.reject_reason, + vsize: tx_result.vsize, + fees: tx_result.fees.map(|f| f.base), + }) + .collect::>(); + Ok(Response::new(results)) + } + Err(e) => { + if e.to_string().contains("TX decode failed") { + Err(Error::BadRequest { + msg: "Transaction decode failed".to_string(), + }) + } else { + Err(Error::RpcError { + error: e, + location: location!(), + }) + } + } + } +} + +impl Serialize for TransactionResponse { + fn serialize(&self, serializer: S) -> StdResult + where + S: Serializer, + { + match *self { + Self::HexString(ref s) => serializer.serialize_str(s), + Self::TransactionDetails(ref details) => details.serialize(serializer), + } + } +} + +#[ocean_endpoint] +async fn get_raw_tx( + Extension(ctx): Extension>, + Path(txid): Path, + Query(QueryParams { verbose }): Query, +) -> Result { + let tx_hash = Txid::from_str(&txid)?; + if !verbose { + let tx_hex = ctx.client.get_raw_transaction_hex(&tx_hash, None).await.map_err(|e| { + if e.to_string().contains("No such mempool or blockchain transaction. Use gettransaction for wallet transactions.") { + Error::NotFound { kind: NotFoundKind::RawTx } + } else { + Error::RpcError { error: e, location: location!() } + } + })?; + Ok(TransactionResponse::HexString(tx_hex)) + } else { + let tx_info = ctx.client.get_raw_transaction_info(&tx_hash, None).await?; + let result = RawTransactionResult { + in_active_chain: tx_info.in_active_chain, + hex: tx_info.hex, + txid: tx_info.txid, + hash: tx_info.hash, + size: tx_info.size, + vsize: tx_info.vsize, + version: tx_info.version, + locktime: tx_info.locktime, + vin: tx_info.vin, + vout: tx_info.vout, + blockhash: tx_info.blockhash, + confirmations: tx_info.confirmations, + time: tx_info.time, + blocktime: tx_info.blocktime, + }; + Ok(TransactionResponse::TransactionDetails(Box::new(result))) + } +} + +async fn validate(ctx: Arc, hex: String) -> Result<()> { + if !hex.starts_with("040000000001") { + return Ok(()); + } + let data = hex::decode(hex)?; + let trx = deserialize::(&data)?; + let bytes = trx.output[0].clone().script_pubkey.into_bytes(); + let tx: Option = if bytes.len() > 2 && bytes[0] == 0x6a && bytes[1] <= 0x4e { + let offset = 1 + match bytes[1] { + 0x4c => 2, + 0x4d => 3, + 0x4e => 4, + _ => 1, + }; + + let raw_tx = &bytes[offset..]; + Some(deserialize::(raw_tx)?) + } else { + return Ok(()); + }; + + if let Some(tx) = tx { + if let DfTx::CompositeSwap(composite_swap) = tx { + if composite_swap.pools.as_ref().is_empty() { + return Ok(()); + } + let pool_id = composite_swap.pools.iter().last().unwrap(); + let tokio_id = composite_swap.pool_swap.to_token_id.0.to_string(); + let pool_pair = ctx + .client + .get_pool_pair(pool_id.to_string(), Some(true)) + .await?; + for (_, pool_pair_info) in pool_pair.0 { + if pool_pair_info.id_token_a.eq(&tokio_id) + || pool_pair_info.id_token_b.eq(&tokio_id) + { + trace!("Found a match: {pool_pair_info:?}"); + } + } + Ok(()) + } else { + Err(Error::BadRequest { + msg: "Transaction is not a composite swap".to_string(), + }) + } + } else { + Ok(()) + } +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/send", post(send_raw_tx)) + .route("/test", post(test_raw_tx)) + .route("/:txid", get(get_raw_tx)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/response.rs b/lib/ain-ocean/src/api/response.rs new file mode 100644 index 00000000000..ead721a2781 --- /dev/null +++ b/lib/ain-ocean/src/api/response.rs @@ -0,0 +1,159 @@ +use serde::Serialize; +use serde_with::skip_serializing_none; + +#[derive(Debug, Serialize)] +pub struct Response { + data: T, +} + +impl Response { + pub fn new(data: T) -> Self { + Self { data } + } +} + +/// ApiPagedResponse indicates that this response of data array slice is part of a sorted list of items. +/// Items are part of a larger sorted list and the slice indicates a window within the large sorted list. +/// Each ApiPagedResponse holds the data array and the "token" for next part of the slice. +/// The next token should be passed via query 'next' and only used when getting the next slice. +/// Hence the first request, the next token is always empty and not provided. +/// +/// With ascending sorted list and a limit of 3 items per slice will have the behaviour as such. +/// +/// SORTED : | [1] [2] [3] | [4] [5] [6] | [7] [8] [9] | [10] +/// Query 1 : Data: [1] [2] [3], Next: 3, Operator: GT (>) +/// Query 2 : Data: [4] [5] [6], Next: 6, Operator: GT (>) +/// Query 3 : Data: [7] [8] [9], Next: 3, Operator: GT (>) +/// Query 4 : Data: [10], Next: undefined +/// +/// This design is resilient also mutating sorted list, where pagination is not. +/// +/// SORTED : [2] [4] [6] [8] [10] [12] [14] +/// Query 1 : Data: [2] [4] [6], Next: 6, Operator: GT (>) +/// +/// Being in a slice window, the larger sorted list can be mutated. +/// You only need the next token to get the next slice. +/// MUTATED : [2] [4] [7] [8] [9] [10] [12] [14] +/// Query 2 : Data: [7] [8] [9], Next: 6, Operator: GT (>) +/// +/// Limitations of this requires your dat astructure to always be sorted in one direction and your sort +/// indexes always fixed. Hence the moving down of the slice window, your operator will be greater than (GT). +/// While moving up your operator will be less than (GT). +/// +/// ASC : | [1] [2] [3] | [4] [5] [6] | [7] [8] [9] | +/// >3 >6 >9 +/// DESC : | [9] [8] [7] | [6] [5] [4] | [3] [2] [1] | +/// <7 <4 <1 +/// For developer quality life it's unwise to allow inclusive operator, it just creates more overhead +/// to understanding our services. No GTE or LTE, always GT and LE. Services must beclean and clear, +/// when the usage narrative is clear and so will the use of ease. LIST query must be dead simple. +/// Image travelling down the path, and getting a "next token" to get the next set of itmes to +/// continue walking. +/// +/// Because the limit is not part of the slice window your query mechanism should support varying size windows. +/// +/// DATA: | [1] [2] [3] | [4] [5] [6] [7] | [8] [9] | ... +/// | limit 3, >3 | limit 4, >7 | limit 2, >9 +/// For simplicity your API should not attempt to allow access to different sort indexes, be cognizant of +/// how our APIs are consumed. If we create a GET /blocks operation to list blocks what would the correct indexes +/// be 99% of the time? +/// +/// Answer: Blocks sorted by height in descending order, that's your sorted list and your slice window. +/// : <- Latest | [100] [99] [98] [97] [...] | Oldest -> +/// +#[skip_serializing_none] +#[derive(Debug, Serialize, PartialEq)] +pub struct ApiPagedResponse { + data: Vec, + page: Option, +} + +#[derive(Debug, Serialize, PartialEq)] +struct ApiPage { + next: String, +} + +impl ApiPagedResponse { + pub fn new(data: Vec, next: Option) -> Self { + Self { + data, + page: next.map(|next| ApiPage { next }), + } // Option<&str> -> Option + } + + pub fn next(data: Vec, next: Option) -> Self { + Self::new(data, next) + } + + pub fn of(data: Vec, limit: usize, next_provider: impl Fn(&T) -> U) -> Self { + if data.len() == limit && !data.is_empty() && limit > 0 { + let next = next_provider(&data[limit - 1]).to_string(); + Self::next(data, Some(next)) + } else { + Self::next(data, None) + } + } +} + +#[cfg(test)] +mod tests { + use super::ApiPagedResponse; + + #[allow(dead_code)] + #[derive(Clone, Debug)] + struct Item { + id: String, + sort: String, + } + + impl Item { + fn new(id: &str, sort: &str) -> Self { + Self { + id: id.into(), + sort: sort.into(), + } + } + } + + #[test] + fn should_next_with_none() { + let items: Vec = vec![Item::new("0", "a"), Item::new("1", "b")]; + + let page = ApiPagedResponse::next(items, None).page; + assert_eq!(page, None); + } + + #[test] + fn should_next_with_value() { + let items: Vec = vec![Item::new("0", "a"), Item::new("1", "b")]; + + let next = ApiPagedResponse::next(items, Some("b".to_string())) + .page + .unwrap() + .next; + assert_eq!(next, "b".to_string()); + } + + #[test] + fn should_of_with_limit_3() { + let items: Vec = vec![ + Item::new("0", "a"), + Item::new("1", "b"), + Item::new("2", "c"), + ]; + + let next = ApiPagedResponse::of(items, 3, |item| item.clone().sort) + .page + .unwrap() + .next; + assert_eq!(next, "c".to_string()); + } + + #[test] + fn should_not_create_with_limit_3_while_size_2() { + let items: Vec = vec![Item::new("0", "a"), Item::new("1", "b")]; + + let page = ApiPagedResponse::of(items, 3, |item| item.clone().sort).page; + assert_eq!(page, None); + } +} diff --git a/lib/ain-ocean/src/api/stats/cache.rs b/lib/ain-ocean/src/api/stats/cache.rs new file mode 100644 index 00000000000..dbee6ce0953 --- /dev/null +++ b/lib/ain-ocean/src/api/stats/cache.rs @@ -0,0 +1,351 @@ +use std::{collections::HashMap, str::FromStr, sync::Arc}; + +use cached::proc_macro::cached; +use defichain_rpc::{ + defichain_rpc_json::token::TokenPagination, json::account::AccountAmount, AccountRPC, Client, + LoanRPC, TokenRPC, +}; +use rust_decimal::{ + prelude::{FromPrimitive, Zero}, + Decimal, +}; +use rust_decimal_macros::dec; +use serde::{Deserialize, Serialize}; +use snafu::OptionExt; + +use super::{subsidy::BLOCK_SUBSIDY, COIN}; +use crate::{ + api::{ + cache::list_pool_pairs_cached, + common::{find_token_balance, parse_amount}, + pool_pair::service::{get_total_liquidity_usd, get_usd_per_dfi}, + stats::get_block_reward_distribution, + AppContext, + }, + error::{DecimalConversionSnafu, OtherSnafu}, + model::MasternodeStatsData, + storage::{RepositoryOps, SortOrder}, + Result, +}; + +#[derive(Debug, Serialize, Deserialize, Clone, Default)] +pub struct Burned { + pub address: Decimal, + pub fee: Decimal, + pub auction: Decimal, + pub payback: Decimal, + pub emission: Decimal, + pub total: Decimal, +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("burned") }"# +)] +pub async fn get_burned(client: &Client) -> Result { + let burn_info = client.get_burn_info().await?; + + let utxo = Decimal::from_f64(burn_info.amount).context(DecimalConversionSnafu)?; + let emission = Decimal::from_f64(burn_info.emissionburn).context(DecimalConversionSnafu)?; + let fee = Decimal::from_f64(burn_info.feeburn).context(DecimalConversionSnafu)?; + let auction = Decimal::from_f64(burn_info.auctionburn).context(DecimalConversionSnafu)?; + + let account = find_token_balance(burn_info.tokens, "DFI"); + let address = utxo + account; + let payback = find_token_balance(burn_info.paybackburn, "DFI"); + + let burned = Burned { + address, + emission, + fee, + payback, + auction, + total: address + fee + auction + payback + emission, + }; + Ok(burned) +} + +#[derive(Debug, Serialize, Deserialize, Clone, Default)] +pub struct Count { + pub blocks: u32, + pub tokens: usize, + pub prices: usize, + pub masternodes: u32, +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("count") }"# +)] +pub async fn get_count(ctx: &Arc) -> Result { + let tokens = ctx + .client + .list_tokens( + Some(TokenPagination { + limit: 1000, + ..Default::default() + }), + Some(true), + ) + .await?; + + let masternodes = ctx + .services + .masternode + .stats + .get_latest()? + .map_or(0, |mn| mn.stats.count); + + let prices = ctx + .services + .price_ticker + .by_id + .list(None, SortOrder::Descending)? + .collect::>(); + + Ok(Count { + blocks: 0, + tokens: tokens.0.len(), + masternodes, + prices: prices.len(), + }) +} + +// TODO Shove it into network struct when available +lazy_static::lazy_static! { + pub static ref BURN_ADDRESS: HashMap<&'static str, &'static str> = HashMap::from([ + ("mainnet", "8defichainBurnAddressXXXXXXXdRQkSm"), + ("testnet", "7DefichainBurnAddressXXXXXXXdMUE5n"), + ("devnet", "7DefichainBurnAddressXXXXXXXdMUE5n"), + ("changi", "7DefichainBurnAddressXXXXXXXdMUE5n"), + ("regtest", "mfburnZSAM7Gs1hpDeNaMotJXSGA7edosG"), + ]); +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("burned_total") }"# +)] +pub async fn get_burned_total(ctx: &AppContext) -> Result { + let burn_address = BURN_ADDRESS.get(ctx.network.as_str()).context(OtherSnafu { + msg: "Missing burn address", + })?; + let accounts = ctx + .client + .get_account(burn_address, None, Some(true)) + .await?; + let burn_info = ctx.client.get_burn_info().await?; + + let utxo = Decimal::from_f64(burn_info.amount).context(DecimalConversionSnafu)?; + let emission = Decimal::from_f64(burn_info.emissionburn).context(DecimalConversionSnafu)?; + let fee = Decimal::from_f64(burn_info.feeburn).context(DecimalConversionSnafu)?; + let account_balance = if let AccountAmount::List(accounts) = accounts { + for account in accounts { + let (amount, token_id) = parse_amount(&account)?; + + if token_id == "DFI" { + return Ok(Decimal::from_str(&amount).unwrap_or_default()); + } + } + dec!(0) + } else { + dec!(0) + }; + + Ok(utxo + account_balance + emission + fee) +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +pub struct Emission { + pub masternode: Decimal, + pub dex: Decimal, + pub community: Decimal, + pub anchor: Decimal, + pub burned: Decimal, + pub total: Decimal, +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("emission") }"# +)] +pub fn get_emission(height: u32) -> Result { + let subsidy = Decimal::from_u64(BLOCK_SUBSIDY.get_block_subsidy(height)) + .context(DecimalConversionSnafu)?; + let distribution = get_block_reward_distribution(subsidy); + + let masternode = distribution.masternode; + let dex = distribution.liquidity; + let community = distribution.community; + let anchor = distribution.anchor; + let total = subsidy / Decimal::from(COIN); + let burned = total - (masternode + dex + community + anchor); + + Ok(Emission { + masternode: masternode.trunc_with_scale(8), + dex: dex.trunc_with_scale(8), + community: community.trunc_with_scale(8), + anchor: anchor.trunc_with_scale(8), + burned, + total, + }) +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct LoanCount { + pub schemes: u64, + pub loan_tokens: u64, + pub collateral_tokens: u64, + pub open_vaults: u64, + pub open_auctions: u64, +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +pub struct LoanValue { + pub collateral: f64, + pub loan: f64, +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +pub struct Loan { + pub count: LoanCount, + pub value: LoanValue, +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("loan") }"# +)] +pub async fn get_loan(client: &Client) -> Result { + let info = client.get_loan_info().await?; + + Ok(Loan { + count: LoanCount { + collateral_tokens: info.totals.collateral_tokens, + loan_tokens: info.totals.loan_tokens, + open_auctions: info.totals.open_auctions, + open_vaults: info.totals.open_vaults, + schemes: info.totals.schemes, + }, + value: LoanValue { + collateral: info.totals.collateral_value, + loan: info.totals.loan_value, + }, + }) +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +pub struct Locked { + pub weeks: u16, + pub tvl: Decimal, + pub count: u32, +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +pub struct Masternodes { + pub locked: Vec, +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("masternodes") }"# +)] +pub async fn get_masternodes(ctx: &Arc) -> Result { + let stats = ctx + .services + .masternode + .stats + .get_latest()? + .map_or(MasternodeStatsData::default(), |mn| mn.stats); + + let usd = get_usd_per_dfi(ctx).await?; + + Ok(Masternodes { + locked: stats + .locked + .into_iter() + .map(|(k, v)| Locked { + weeks: k, + tvl: v.tvl * usd, + count: v.count, + }) + .collect(), + }) +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +pub struct Tvl { + pub total: Decimal, + pub dex: Decimal, + pub loan: Decimal, + pub masternodes: Decimal, +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("tvl") }"# +)] +pub async fn get_tvl(ctx: &Arc) -> Result { + // dex + let mut dex = dec!(0); + let pools = list_pool_pairs_cached(ctx, None, None).await?.0; + for (_, info) in pools { + let total_liquidity_usd = get_total_liquidity_usd(ctx, &info).await?; + dex += total_liquidity_usd; + } + + // masternodes + let usd = get_usd_per_dfi(ctx).await?; + let mut masternodes = ctx + .services + .masternode + .stats + .get_latest()? + .map_or_else(Decimal::zero, |mn| mn.stats.tvl); + masternodes *= usd; + + // loan + let loan = get_loan(&ctx.client).await?; + let loan = Decimal::from_f64(loan.value.collateral).unwrap_or_default(); + + Ok(Tvl { + loan, + masternodes, + dex, + total: dex + masternodes + loan, + }) +} + +#[derive(Debug, Serialize, Deserialize, Default, Clone)] +pub struct Price { + pub usd: Decimal, + #[deprecated(note = "use USD instead of aggregation over multiple pairs")] + pub usdt: Decimal, +} + +#[cached( + result = true, + time = 600, + key = "String", + convert = r#"{ format!("price") }"# +)] +pub async fn get_price(ctx: &Arc) -> Result { + let usd = get_usd_per_dfi(ctx).await?; + #[allow(deprecated)] + Ok(Price { usd, usdt: usd }) +} diff --git a/lib/ain-ocean/src/api/stats/distribution.rs b/lib/ain-ocean/src/api/stats/distribution.rs new file mode 100644 index 00000000000..676e403a93e --- /dev/null +++ b/lib/ain-ocean/src/api/stats/distribution.rs @@ -0,0 +1,44 @@ +use rust_decimal::Decimal; +use rust_decimal_macros::dec; + +use crate::api::stats::COIN; + +#[derive(Debug)] +pub struct BlockRewardDistribution { + pub masternode: Decimal, + pub community: Decimal, + pub anchor: Decimal, + pub liquidity: Decimal, + pub loan: Decimal, + pub options: Decimal, + pub unallocated: Decimal, +} + +pub const BLOCK_REWARD_DISTRIBUTION_PERCENTAGE: BlockRewardDistribution = BlockRewardDistribution { + masternode: dec!(3333), + community: dec!(491), + anchor: dec!(2), + liquidity: dec!(2545), + loan: dec!(2468), + options: dec!(988), + unallocated: dec!(173), +}; + +/** + * Get block reward distribution from block base subsidy + */ +pub fn get_block_reward_distribution(subsidy: Decimal) -> BlockRewardDistribution { + BlockRewardDistribution { + masternode: calculate_reward(subsidy, BLOCK_REWARD_DISTRIBUTION_PERCENTAGE.masternode), + community: calculate_reward(subsidy, BLOCK_REWARD_DISTRIBUTION_PERCENTAGE.community), + anchor: calculate_reward(subsidy, BLOCK_REWARD_DISTRIBUTION_PERCENTAGE.anchor), + liquidity: calculate_reward(subsidy, BLOCK_REWARD_DISTRIBUTION_PERCENTAGE.liquidity), + loan: calculate_reward(subsidy, BLOCK_REWARD_DISTRIBUTION_PERCENTAGE.loan), + options: calculate_reward(subsidy, BLOCK_REWARD_DISTRIBUTION_PERCENTAGE.options), + unallocated: calculate_reward(subsidy, BLOCK_REWARD_DISTRIBUTION_PERCENTAGE.unallocated), + } +} + +fn calculate_reward(amount: Decimal, percent: Decimal) -> Decimal { + (amount * percent) / dec!(10000) / Decimal::from(COIN) +} diff --git a/lib/ain-ocean/src/api/stats/mod.rs b/lib/ain-ocean/src/api/stats/mod.rs new file mode 100644 index 00000000000..c6e3a8628f4 --- /dev/null +++ b/lib/ain-ocean/src/api/stats/mod.rs @@ -0,0 +1,184 @@ +mod cache; +mod distribution; +mod subsidy; + +use std::sync::Arc; + +use ain_dftx::COIN; +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use defichain_rpc::{ + defichain_rpc_json::{account::BurnInfo, GetNetworkInfoResult}, + AccountRPC, +}; +use rust_decimal::{prelude::FromPrimitive, Decimal}; +use serde::{Deserialize, Serialize}; +use snafu::OptionExt; + +use self::{ + cache::{ + get_burned, get_burned_total, get_count, get_emission, get_loan, get_masternodes, + get_price, get_tvl, Burned, Count, Emission, Loan, Masternodes, Price, Tvl, + }, + distribution::get_block_reward_distribution, + subsidy::BLOCK_SUBSIDY, +}; +use super::{cache::get_network_info_cached, response::Response, AppContext}; +use crate::{ + error::{ApiError, DecimalConversionSnafu}, + Result, +}; + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct StatsData { + pub count: Count, + pub tvl: Tvl, + pub burned: Burned, + pub price: Price, + pub masternodes: Masternodes, + pub emission: Emission, + pub loan: Loan, + pub blockchain: Blockchain, + pub net: Net, +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct Blockchain { + pub difficulty: f64, +} + +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct Net { + pub version: u64, + pub subversion: String, + pub protocolversion: u64, +} + +#[ocean_endpoint] +async fn get_stats(Extension(ctx): Extension>) -> Result> { + let (height, difficulty) = ctx + .services + .block + .by_height + .get_highest()? + .map(|b| (b.height, b.difficulty)) + .unwrap_or_default(); // Default to genesis block + + let GetNetworkInfoResult { + version, + subversion, + protocol_version, + .. + } = get_network_info_cached(&ctx).await?; + + let stats = StatsData { + burned: get_burned(&ctx.client).await?, + net: Net { + version: version as u64, + protocolversion: protocol_version as u64, + subversion, + }, + count: Count { + blocks: height, + ..get_count(&ctx).await? + }, + emission: get_emission(height)?, + blockchain: Blockchain { difficulty }, + loan: get_loan(&ctx.client).await?, + price: get_price(&ctx).await?, + masternodes: get_masternodes(&ctx).await?, + tvl: get_tvl(&ctx).await?, + }; + Ok(Response::new(stats)) +} + +#[derive(Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct RewardDistributionData { + anchor: Decimal, + community: Decimal, + liquidity: Decimal, + loan: Decimal, + masternode: Decimal, + options: Decimal, + unallocated: Decimal, +} + +#[ocean_endpoint] +async fn get_reward_distribution( + Extension(ctx): Extension>, +) -> Result> { + let height = ctx + .services + .block + .by_height + .get_highest()? + .map(|b| b.height) + .unwrap_or_default(); // Default to genesis block + + let subsidy = Decimal::from_u64(BLOCK_SUBSIDY.get_block_subsidy(height)) + .context(DecimalConversionSnafu)?; + let distribution = get_block_reward_distribution(subsidy); + + let distribution = RewardDistributionData { + masternode: distribution.masternode.trunc_with_scale(8), + anchor: distribution.anchor.trunc_with_scale(8), + community: distribution.community.trunc_with_scale(8), + liquidity: distribution.liquidity.trunc_with_scale(8), + loan: distribution.loan.trunc_with_scale(8), + options: distribution.options.trunc_with_scale(8), + unallocated: distribution.unallocated.trunc_with_scale(8), + }; + Ok(Response::new(distribution)) +} + +#[derive(Debug, Serialize, Deserialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct SupplyData { + max: u32, + total: Decimal, + burned: Decimal, + circulating: Decimal, +} + +#[ocean_endpoint] +async fn get_supply(Extension(ctx): Extension>) -> Result> { + static MAX: u32 = 1_200_000_000; + let height = ctx + .services + .block + .by_height + .get_highest()? + .map(|b| b.height) + .unwrap_or_default(); // Default to genesis block + + let total = Decimal::from_u64(BLOCK_SUBSIDY.get_supply(height)) + .context(DecimalConversionSnafu)? + / Decimal::from(COIN); + + let burned = get_burned_total(&ctx).await?; + let circulating = total - burned; + + let supply = SupplyData { + max: MAX, + total, + burned, + circulating, + }; + Ok(Response::new(supply)) +} + +#[ocean_endpoint] +async fn get_burn(Extension(ctx): Extension>) -> Result> { + let burn_info = ctx.client.get_burn_info().await?; + Ok(Response::new(burn_info)) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/", get(get_stats)) + .route("/reward/distribution", get(get_reward_distribution)) + .route("/supply", get(get_supply)) + .route("/burn", get(get_burn)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/stats/subsidy.rs b/lib/ain-ocean/src/api/stats/subsidy.rs new file mode 100644 index 00000000000..9f67ac62193 --- /dev/null +++ b/lib/ain-ocean/src/api/stats/subsidy.rs @@ -0,0 +1,134 @@ +use serde::{Deserialize, Serialize}; + +lazy_static::lazy_static! { + // TODO handle networks + // Global service caching all block subsidy reductions + pub static ref BLOCK_SUBSIDY: BlockSubsidy = BlockSubsidy::new(TEST_NET_COINBASE_SUBSIDY_OPTIONS); +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy)] +pub struct CoinbaseSubsidyOptions { + eunos_height: u64, + genesis_block_subsidy: u64, + pre_eunos_block_subsidy: u64, + eunos_base_block_subsidy: u64, + eunos_foundation_burn: u64, + emission_reduction: u64, + emission_reduction_interval: u64, +} + +#[allow(dead_code)] +pub static MAIN_NET_COINBASE_SUBSIDY_OPTIONS: CoinbaseSubsidyOptions = CoinbaseSubsidyOptions { + eunos_height: 894_000, + genesis_block_subsidy: 59_100_003_000_000_000, + pre_eunos_block_subsidy: 20_000_000_000, + eunos_base_block_subsidy: 40_504_000_000, + eunos_foundation_burn: 26_859_289_307_829_046, + emission_reduction: 1658, + emission_reduction_interval: 32690, +}; + +pub static TEST_NET_COINBASE_SUBSIDY_OPTIONS: CoinbaseSubsidyOptions = CoinbaseSubsidyOptions { + eunos_height: 354_950, + genesis_block_subsidy: 30_400_004_000_000_000, + pre_eunos_block_subsidy: 20_000_000_000, + eunos_base_block_subsidy: 40_504_000_000, + eunos_foundation_burn: 0, + emission_reduction: 1658, + emission_reduction_interval: 32690, +}; + +pub struct BlockSubsidy { + reduction_block_subsidies: Vec, + reduction_supply_milestones: Vec, + options: CoinbaseSubsidyOptions, +} + +impl BlockSubsidy { + pub fn new(options: CoinbaseSubsidyOptions) -> Self { + let reduction_block_subsidies = Self::compute_block_reduction_subsidies(&options); + let reduction_supply_milestones = + Self::compute_reduction_supply_milestones(&reduction_block_subsidies, &options); + Self { + reduction_block_subsidies, + reduction_supply_milestones, + options, + } + } + + pub fn get_supply(&self, height: u32) -> u64 { + let height = u64::from(height); + if height < self.options.eunos_height { + self.get_pre_eunos_supply(height) + } else { + self.get_post_eunos_supply(height) + } + } + + pub fn get_block_subsidy(&self, height: u32) -> u64 { + let height = u64::from(height); + if height == 0 { + return self.options.genesis_block_subsidy; + } + + if height < self.options.eunos_height { + return self.options.pre_eunos_block_subsidy; + } + + let reduction_count = + (height - self.options.eunos_height) / self.options.emission_reduction_interval; + if reduction_count < self.reduction_block_subsidies.len() as u64 { + return self.reduction_block_subsidies[reduction_count as usize]; + } + + 0 + } + + fn get_pre_eunos_supply(&self, height: u64) -> u64 { + self.options.genesis_block_subsidy + self.options.pre_eunos_block_subsidy * height + } + + fn get_post_eunos_supply(&self, height: u64) -> u64 { + let post_eunos_diff = height - (self.options.eunos_height - 1); + let reduction_count = post_eunos_diff / self.options.emission_reduction_interval; + let reduction_remainder = post_eunos_diff % self.options.emission_reduction_interval; + + if reduction_count >= self.reduction_supply_milestones.len() as u64 { + *self.reduction_supply_milestones.last().unwrap() + } else { + self.reduction_supply_milestones[reduction_count as usize] + + self.reduction_block_subsidies[reduction_count as usize] * reduction_remainder + } + } + + fn compute_reduction_supply_milestones( + reduction_block_subsidies: &[u64], + options: &CoinbaseSubsidyOptions, + ) -> Vec { + let mut supply_milestones = vec![ + options.genesis_block_subsidy + + options.pre_eunos_block_subsidy * (options.eunos_height - 1) + - options.eunos_foundation_burn, + ]; + for i in 1..reduction_block_subsidies.len() { + let previous_milestone = supply_milestones[i - 1]; + supply_milestones.push( + previous_milestone + + reduction_block_subsidies[i - 1] * options.emission_reduction_interval, + ); + } + supply_milestones + } + + fn compute_block_reduction_subsidies(options: &CoinbaseSubsidyOptions) -> Vec { + let mut subsidy_reductions: Vec = vec![options.eunos_base_block_subsidy]; + while let Some(&last_subsidy) = subsidy_reductions.last() { + let amount = last_subsidy * options.emission_reduction / 100_000; + if amount == 0 { + break; + } + subsidy_reductions.push(last_subsidy - amount); + } + subsidy_reductions + } +} diff --git a/lib/ain-ocean/src/api/tokens.rs b/lib/ain-ocean/src/api/tokens.rs new file mode 100644 index 00000000000..2237888b190 --- /dev/null +++ b/lib/ain-ocean/src/api/tokens.rs @@ -0,0 +1,146 @@ +use std::sync::Arc; + +use ain_macros::ocean_endpoint; +use axum::{routing::get, Extension, Router}; +use defichain_rpc::{ + json::token::{TokenInfo, TokenResult}, + RpcApi, +}; +use serde::Serialize; +use serde_json::json; +use serde_with::{serde_as, DisplayFromStr}; + +use super::{ + common::parse_display_symbol, + path::Path, + query::{PaginationQuery, Query}, + response::{ApiPagedResponse, Response}, + AppContext, +}; +use crate::{ + error::{ApiError, Error}, + Result, +}; + +#[derive(Serialize, Debug, Clone, Default)] +pub struct TxHeight { + tx: String, + height: i64, +} + +#[serde_as] +#[derive(Serialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +pub struct TokenData { + id: String, + symbol: String, + symbol_key: String, + name: String, + decimal: u8, + #[serde_as(as = "DisplayFromStr")] + limit: i64, + mintable: bool, + tradeable: bool, + #[serde(rename = "isDAT")] + is_dat: bool, + #[serde(rename = "isLPS")] + is_lps: bool, + is_loan_token: bool, + finalized: bool, + minted: String, + creation: TxHeight, + destruction: TxHeight, + display_symbol: String, + collateral_address: Option, +} + +impl TokenData { + pub fn from_with_id(id: String, token: TokenInfo) -> Self { + let display_symbol = parse_display_symbol(&token); + Self { + id, + symbol: token.symbol, + display_symbol, + symbol_key: token.symbol_key, + name: token.name, + decimal: token.decimal, + limit: token.limit, + mintable: token.mintable, + tradeable: token.tradeable, + is_dat: token.is_dat, + is_lps: token.is_lps, + is_loan_token: token.is_loan_token, + finalized: token.finalized, + minted: token.minted.to_string(), + creation: TxHeight { + height: token.creation_height, + tx: token.creation_tx, + }, + destruction: TxHeight { + height: token.destruction_height, + tx: token.destruction_tx, + }, + collateral_address: token.collateral_address.and_then(|addr| { + if addr.is_empty() { + None + } else { + Some(addr) + } + }), + } + } +} + +#[ocean_endpoint] +async fn list_tokens( + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let tokens: TokenResult = ctx.client.call( + "listtokens", + &[ + json!({ + "limit": query.size, + "start": query.next.as_ref().and_then(|n| n.parse::().ok()).unwrap_or_default(), + "including_start": query.next.is_none() + }), + true.into(), + ], + ).await?; + + let res = tokens + .0 + .into_iter() + .map(|(k, v)| TokenData::from_with_id(k, v)) + .collect::>(); + Ok(ApiPagedResponse::of(res, query.size, |token| { + token.id.clone() + })) +} + +#[ocean_endpoint] +async fn get_token( + Path(id): Path, + Extension(ctx): Extension>, +) -> Result>> { + let mut v: TokenResult = ctx + .client + .call("gettoken", &[id.as_str().into()]) + .await + .map_err(|_| Error::NotFoundMessage { + msg: "Unable to find token".to_string(), + })?; + + let res = + v.0.remove(&id) + .map(|token| TokenData::from_with_id(id, token)); + + Ok(Response::new(res)) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/", get(list_tokens)) + .route("/:id", get(get_token)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/api/transactions.rs b/lib/ain-ocean/src/api/transactions.rs new file mode 100644 index 00000000000..5538327a274 --- /dev/null +++ b/lib/ain-ocean/src/api/transactions.rs @@ -0,0 +1,159 @@ +use std::sync::Arc; + +use ain_macros::ocean_endpoint; +use axum::{extract::Query, routing::get, Extension, Router}; +use bitcoin::Txid; +use serde::{Deserialize, Serialize}; + +use super::{path::Path, query::PaginationQuery, response::ApiPagedResponse, AppContext}; +use crate::{ + api::{common::Paginate, response::Response}, + error::ApiError, + model::{ + Transaction, TransactionVin, TransactionVinType, TransactionVinVout, TransactionVout, + TransactionVoutScript, + }, + storage::{ + InitialKeyProvider, RepositoryOps, SortOrder, TransactionVin as TransactionVinStorage, + }, + Result, +}; + +#[derive(Deserialize)] +pub struct TransactionId { + id: Txid, +} + +#[ocean_endpoint] +async fn get_transaction( + Path(TransactionId { id }): Path, + Extension(ctx): Extension>, +) -> Result>> { + let transactions = ctx.services.transaction.by_id.get(&id)?; + Ok(Response::new(transactions)) +} + +#[derive(Debug, Serialize)] +struct TransactionVinResponse { + pub id: String, + pub txid: Txid, + pub coinbase: Option, + pub vout: Option, + pub script: Option, + pub tx_in_witness: Option>, + pub sequence: i64, +} + +impl From for TransactionVinResponse { + fn from(v: TransactionVin) -> Self { + let (id, coinbase) = match v.r#type { + TransactionVinType::Coinbase(coinbase) => (format!("{}00", v.txid), Some(coinbase)), + TransactionVinType::Standard((txid, vout)) => { + (format!("{}{}{:x}", v.txid, txid, vout), None) + } + }; + Self { + id, + txid: v.txid, + coinbase, + vout: v.vout, + script: v.script, + tx_in_witness: v.tx_in_witness, + sequence: v.sequence, + } + } +} + +#[ocean_endpoint] +async fn get_vins( + Path(TransactionId { id }): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let next = query + .next + .clone() + .unwrap_or_else(|| TransactionVinStorage::initial_key(id)); + + let list = ctx + .services + .transaction + .vin_by_id + .list(Some(next), SortOrder::Descending)? + .paginate(&query) + .take_while(|item| match item { + Ok((_, vin)) => vin.txid == id, + _ => true, + }) + .map(|item| { + let (_, v) = item?; + Ok(TransactionVinResponse::from(v)) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(list, query.size, |each| { + each.id.clone() + })) +} + +#[derive(Debug, Serialize)] +struct TransactionVoutResponse { + pub id: String, + // pub vout: usize, + pub txid: Txid, + pub n: usize, + pub value: String, + pub token_id: Option, + pub script: TransactionVoutScript, +} + +impl From for TransactionVoutResponse { + fn from(v: TransactionVout) -> Self { + Self { + id: format!("{}{:x}", v.txid, v.vout), + txid: v.txid, + n: v.n, + value: format!("{:.8}", v.value), + token_id: v.token_id, + script: v.script, + } + } +} + +//get list of vout transaction, by passing id which contains txhash + vout_idx +#[ocean_endpoint] +async fn get_vouts( + Path(TransactionId { id }): Path, + Query(query): Query, + Extension(ctx): Extension>, +) -> Result> { + let next = query.next.as_deref().unwrap_or("0").parse::()?; + + let list = ctx + .services + .transaction + .vout_by_id + .list(Some((id, next)), SortOrder::Ascending)? + .paginate(&query) + .take_while(|item| match item { + Ok((_, vout)) => vout.txid == id, + _ => true, + }) + .map(|item| { + let (_, v) = item?; + Ok(TransactionVoutResponse::from(v)) + }) + .collect::>>()?; + + Ok(ApiPagedResponse::of(list, query.size, |each| { + each.n.to_string() + })) +} + +pub fn router(ctx: Arc) -> Router { + Router::new() + .route("/:id", get(get_transaction)) + .route("/:id/vins", get(get_vins)) + .route("/:id/vouts", get(get_vouts)) + .layer(Extension(ctx)) +} diff --git a/lib/ain-ocean/src/error.rs b/lib/ain-ocean/src/error.rs new file mode 100644 index 00000000000..20f5c0febaa --- /dev/null +++ b/lib/ain-ocean/src/error.rs @@ -0,0 +1,321 @@ +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, + Json, +}; +use serde::Serialize; +use serde_json::json; +use snafu::{Location, Snafu}; + +#[derive(Debug)] +pub enum IndexAction { + Index, + Invalidate, +} + +impl std::fmt::Display for IndexAction { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Index => write!(f, "index"), + Self::Invalidate => write!(f, "invalidate"), + } + } +} + +#[derive(Snafu, Debug)] +pub enum NotFoundKind { + #[snafu(display("auction"))] + Auction, + #[snafu(display("collateral token"))] + CollateralToken, + #[snafu(display("loan token"))] + LoanToken, + #[snafu(display("masternode"))] + Masternode, + #[snafu(display("oracle"))] + Oracle, + #[snafu(display("poolpair"))] + PoolPair, + #[snafu(display("proposal"))] + Proposal, + #[snafu(display("rawtx"))] + RawTx, + #[snafu(display("scheme"))] + Scheme, + #[snafu(display("token {}", id))] + Token { id: String }, + #[snafu(display("vault"))] + Vault, +} + +#[derive(Debug, Snafu)] +#[snafu(visibility(pub))] +pub enum Error { + #[snafu(context(false))] // allows using the ? operator directly on the underlying error + BincodeError { + #[snafu(source)] + error: bincode::Error, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + BitcoinAddressError { + #[snafu(source)] + error: bitcoin::address::Error, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + #[snafu(display("InvalidDefiAddress"))] + BitcoinAddressParseError { + #[snafu(source)] + error: bitcoin::address::error::ParseError, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + BitcoinConsensusEncodeError { + #[snafu(source)] + error: bitcoin::consensus::encode::Error, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + BitcoinHexToArrayError { + #[snafu(source)] + error: bitcoin::hex::HexToArrayError, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + DecimalError { + #[snafu(source)] + error: rust_decimal::Error, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + DBError { + #[snafu(source)] + error: ain_db::DBError, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + FromHexError { + #[snafu(source)] + error: hex::FromHexError, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + IOError { + #[snafu(source)] + error: std::io::Error, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + JsonrpseeError { + #[snafu(source)] + error: jsonrpsee::core::Error, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + ParseIntError { + #[snafu(source)] + error: std::num::ParseIntError, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + ParseFloatError { + #[snafu(source)] + error: std::num::ParseFloatError, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + RpcError { + #[snafu(source)] + error: defichain_rpc::Error, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + SerdeJsonError { + #[snafu(source)] + error: serde_json::Error, + #[snafu(implicit)] + location: Location, + }, + #[snafu(context(false))] + TryFromIntError { + #[snafu(source)] + error: std::num::TryFromIntError, + #[snafu(implicit)] + location: Location, + }, + #[snafu(display("Unable to find {}", kind))] + NotFound { + kind: NotFoundKind, + }, + NotFoundMessage { + msg: String, + }, + #[snafu(display( + "attempting to sync: {:?} but type: {} with id: {} cannot be found in the index", + action, + r#type, + id + ))] + NotFoundIndex { + action: IndexAction, + r#type: String, + id: String, + }, + DecimalConversionError, + #[snafu(display("Arithmetic overflow"))] + ArithmeticOverflow { + // msg: String, // TODO(canonbrother): less complicated atm + #[snafu(implicit)] + location: Location, + }, + #[snafu(display("Arithmetic underflow"))] + ArithmeticUnderflow { + // msg: String, // TODO(canonbrother): less complicated atm + #[snafu(implicit)] + location: Location, + }, + SecondaryIndex, + BadRequest { + msg: String, + }, + #[snafu(display("Invalid token currency format: {}", item))] + InvalidTokenCurrency { + item: String, + #[snafu(implicit)] + location: Location, + }, + #[snafu(display("Invalid fixed interval price format: {}", item))] + InvalidFixedIntervalPrice { + item: String, + #[snafu(implicit)] + location: Location, + }, + #[snafu(display("Invalid amount format: {}", item))] + InvalidAmount { + item: String, + #[snafu(implicit)] + location: Location, + }, + #[snafu(display("Invalid pool pair symbol format: {}", item))] + InvalidPoolPairSymbol { + item: String, + #[snafu(implicit)] + location: Location, + }, + #[snafu(display("To primitive error: {}", msg))] + ToPrimitiveError { + msg: String, + #[snafu(implicit)] + location: Location, + }, + #[snafu(display("{}", msg))] + Other { + msg: String, + }, +} + +impl From> for Error { + fn from(err: Box) -> Self { + Self::Other { + msg: err.to_string(), + } + } +} + +impl From<&str> for Error { + fn from(s: &str) -> Self { + Self::Other { msg: s.to_string() } + } +} + +#[derive(Serialize, Debug)] +pub enum ErrorKind { + NotFound, + BadRequest, + Unknown, +} + +#[derive(Serialize, Debug)] +struct ApiErrorData { + code: u16, + r#type: ErrorKind, + at: u128, + message: String, + url: String, +} +#[derive(Serialize, Debug)] +pub struct ApiError { + error: ApiErrorData, + #[serde(skip)] + status: StatusCode, +} + +impl ApiError { + #[must_use] + pub fn new(status: StatusCode, message: String, url: String) -> Self { + let current_time = std::time::SystemTime::now(); + let at = current_time + .duration_since(std::time::UNIX_EPOCH) + .expect("Time went backwards") + .as_millis(); + + let r#type = match status { + StatusCode::NOT_FOUND => ErrorKind::NotFound, + StatusCode::BAD_REQUEST => ErrorKind::BadRequest, + _ => ErrorKind::Unknown, + }; + + Self { + error: ApiErrorData { + r#type, + code: status.as_u16(), + message, + url, + at, + }, + status, + } + } +} + +impl IntoResponse for ApiError { + fn into_response(self) -> Response { + let status = self.status; + let body = Json(json!({ + "error": self.error + })); + (status, body).into_response() + } +} + +impl Error { + #[must_use] + pub fn into_code_and_message(self) -> (StatusCode, String) { + let (code, reason) = match &self { + Self::RpcError { + error: defichain_rpc::Error::JsonRpc(jsonrpc_async::error::Error::Rpc(e)), + .. + } => (StatusCode::NOT_FOUND, e.message.to_string()), + Self::NotFound { kind: _ } => (StatusCode::NOT_FOUND, format!("{self}")), + Self::NotFoundMessage { msg } => (StatusCode::NOT_FOUND, msg.clone()), + Self::BadRequest { msg } => (StatusCode::BAD_REQUEST, msg.clone()), + Self::Other { msg } => (StatusCode::INTERNAL_SERVER_ERROR, msg.clone()), + _ => (StatusCode::INTERNAL_SERVER_ERROR, self.to_string()), + }; + (code, reason) + } +} diff --git a/lib/ain-ocean/src/hex_encoder.rs b/lib/ain-ocean/src/hex_encoder.rs new file mode 100644 index 00000000000..aa002821f00 --- /dev/null +++ b/lib/ain-ocean/src/hex_encoder.rs @@ -0,0 +1,8 @@ +use sha2::{Digest, Sha256}; + +#[must_use] +pub fn as_sha256(bytes: &[u8]) -> [u8; 32] { + let mut hasher = Sha256::new(); + hasher.update(bytes); + hasher.finalize().into() +} diff --git a/lib/ain-ocean/src/indexer/auction.rs b/lib/ain-ocean/src/indexer/auction.rs new file mode 100644 index 00000000000..0cc7cfb88fd --- /dev/null +++ b/lib/ain-ocean/src/indexer/auction.rs @@ -0,0 +1,52 @@ +use std::sync::Arc; + +use ain_dftx::vault::PlaceAuctionBid; +use log::trace; + +use super::Context; +use crate::{ + indexer::{Index, Result}, + model::VaultAuctionBatchHistory, + storage::RepositoryOps, + Services, +}; + +impl Index for PlaceAuctionBid { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[PlaceAuctionBid] Indexing..."); + + let auction = VaultAuctionBatchHistory { + id: format!("{}-{}-{}", self.vault_id, self.index, ctx.tx.txid), + key: format!("{}-{}", self.vault_id, self.index), + sort: format!("{}-{}", ctx.block.height, ctx.tx_idx), + vault_id: self.vault_id, + index: ctx.tx_idx, + from: self.from, + amount: self.token_amount.amount, + token_id: self.token_amount.token.0, + block: ctx.block.clone(), + }; + trace!("auction : {:?}", auction); + + let key = (self.vault_id, self.index, ctx.tx.txid); + services.auction.by_id.put(&key, &auction)?; + services.auction.by_height.put( + &(self.vault_id, self.index, ctx.block.height, ctx.tx_idx), + &key, + ) + } + + fn invalidate(&self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[PlaceAuctionBid] Invalidating..."); + services + .auction + .by_id + .delete(&(self.vault_id, self.index, ctx.tx.txid))?; + services.auction.by_height.delete(&( + self.vault_id, + self.index, + ctx.block.height, + ctx.tx_idx, + )) + } +} diff --git a/lib/ain-ocean/src/indexer/helper.rs b/lib/ain-ocean/src/indexer/helper.rs new file mode 100644 index 00000000000..40378314ae5 --- /dev/null +++ b/lib/ain-ocean/src/indexer/helper.rs @@ -0,0 +1,16 @@ +use bitcoin::{hashes::Hash, Txid}; +use defichain_rpc::json::blockchain::{Transaction, Vin}; + +pub fn check_if_evm_tx(txn: &Transaction) -> bool { + txn.vin.len() == 2 + && txn.vin.iter().all(|vin| match vin { + Vin::Coinbase(_) => true, + Vin::Standard(tx) => tx.txid == Txid::all_zeros(), + }) + && txn.vout.len() == 1 + && txn.vout[0] + .script_pub_key + .asm + .starts_with("OP_RETURN 4466547839") + && txn.vout[0].value == 0f64 +} diff --git a/lib/ain-ocean/src/indexer/loan_token.rs b/lib/ain-ocean/src/indexer/loan_token.rs new file mode 100644 index 00000000000..c4eee9d3384 --- /dev/null +++ b/lib/ain-ocean/src/indexer/loan_token.rs @@ -0,0 +1,215 @@ +use std::{str::FromStr, sync::Arc}; + +use ain_dftx::loans::SetLoanToken; +use log::trace; +use rust_decimal::{prelude::Zero, Decimal}; +use rust_decimal_macros::dec; + +use crate::{ + indexer::{Context, Index, Result}, + model::{BlockContext, OraclePriceActive, OraclePriceActiveNext, OraclePriceAggregated}, + network::Network, + storage::{RepositoryOps, SortOrder}, + Services, +}; + +impl Index for SetLoanToken { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + let ticker = (self.currency_pair.token, self.currency_pair.currency); + perform_active_price_tick(services, ticker, &ctx.block)?; + Ok(()) + } + + fn invalidate(&self, services: &Arc, context: &Context) -> Result<()> { + trace!("[SetLoanToken] Invalidating..."); + let ticker_id = ( + self.currency_pair.token.clone(), + self.currency_pair.currency.clone(), + context.block.height, + ); + services.oracle_price_active.by_id.delete(&ticker_id)?; + Ok(()) + } +} + +fn is_aggregate_valid(aggregate: &OraclePriceAggregated, block: &BlockContext) -> bool { + if (aggregate.block.time - block.time).abs() >= 3600 { + return false; + } + + if aggregate.aggregated.oracles.active < dec!(2) { + // minimum live oracles + return false; + } + + if aggregate.aggregated.weightage == dec!(0) { + return false; + } + + true +} + +fn is_live(active: Option, next: Option) -> bool { + let Some(active) = active else { + return false; + }; + + let Some(next) = next else { + return false; + }; + + let active_price = active.amount; + + let next_price = next.amount; + + if active_price <= Decimal::zero() { + return false; + } + + if next_price <= Decimal::zero() { + return false; + } + + let diff = (next_price - active_price).abs(); + let threshold = active_price * dec!(0.3); // deviation_threshold 0.3 + if diff >= threshold { + return false; + } + + true +} + +pub fn index_active_price(services: &Arc, block: &BlockContext) -> Result<()> { + let network = ain_cpp_imports::get_network(); + let block_interval = match Network::from_str(&network)? { + Network::Regtest => 6, + _ => 120, + }; + if block.height % block_interval == 0 { + let price_tickers = services + .price_ticker + .by_id + .list(None, SortOrder::Descending)? + .flatten() + .collect::>(); + + for pt in price_tickers { + perform_active_price_tick(services, pt.0, block)?; + } + } + Ok(()) +} + +fn map_active_price( + block: &BlockContext, + ticker_id: (String, String), + aggregated_price: OraclePriceAggregated, + prev_price: OraclePriceActive, +) -> OraclePriceActive { + let next_price = if is_aggregate_valid(&aggregated_price, block) { + Some(aggregated_price.aggregated) + } else { + None + }; + + let active_price = if let Some(next) = prev_price.next { + Some(next) + } else { + prev_price.active + }; + + OraclePriceActive { + id: (ticker_id.0.clone(), ticker_id.1.clone(), block.height), + key: ticker_id, + sort: hex::encode(block.height.to_be_bytes()), + active: active_price.clone(), + next: next_price.clone(), + is_live: is_live(active_price, next_price), + block: block.clone(), + } +} + +pub fn invalidate_active_price(services: &Arc, block: &BlockContext) -> Result<()> { + let network = ain_cpp_imports::get_network(); + let block_interval = match Network::from_str(&network)? { + Network::Regtest => 6, + _ => 120, + }; + if block.height % block_interval == 0 { + let price_tickers = services + .price_ticker + .by_id + .list(None, SortOrder::Descending)? + .flatten() + .collect::>(); + + for pt in price_tickers { + let token = pt.0 .0; + let currency = pt.0 .1; + services + .oracle_price_active + .by_id + .delete(&(token, currency, block.height))?; + } + } + + Ok(()) +} + +pub fn perform_active_price_tick( + services: &Arc, + ticker_id: (String, String), + block: &BlockContext, +) -> Result<()> { + let repo = &services.oracle_price_aggregated; + let prev_keys = repo + .by_key + .list(Some(ticker_id.clone()), SortOrder::Descending)? + .take(1) + .flatten() // return empty vec if none + .collect::>(); + + if prev_keys.is_empty() { + return Ok(()); + } + + let Some((_, prev_id)) = prev_keys.first() else { + return Ok(()); + }; + + let aggregated_price = repo.by_id.get(prev_id)?; + + let Some(aggregated_price) = aggregated_price else { + return Ok(()); + }; + + let repo = &services.oracle_price_active; + let prev_keys = repo + .by_key + .list(Some(ticker_id.clone()), SortOrder::Descending)? + .take(1) + .flatten() + .collect::>(); + + if prev_keys.is_empty() { + return Ok(()); + } + + let Some((_, prev_id)) = prev_keys.first() else { + return Ok(()); + }; + + let prev_price = repo.by_id.get(prev_id)?; + + let Some(prev_price) = prev_price else { + return Ok(()); + }; + + let active_price = map_active_price(block, ticker_id, aggregated_price, prev_price); + + repo.by_id.put(&active_price.id, &active_price)?; + + repo.by_key.put(&active_price.key, &active_price.id)?; + + Ok(()) +} diff --git a/lib/ain-ocean/src/indexer/masternode.rs b/lib/ain-ocean/src/indexer/masternode.rs new file mode 100644 index 00000000000..a4a942a55fc --- /dev/null +++ b/lib/ain-ocean/src/indexer/masternode.rs @@ -0,0 +1,181 @@ +use std::sync::Arc; + +use ain_dftx::masternode::*; +use bitcoin::{hashes::Hash, PubkeyHash, ScriptBuf, WPubkeyHash}; +use log::trace; +use rust_decimal::{prelude::FromPrimitive, Decimal}; +use snafu::OptionExt; + +use super::Context; +use crate::{ + error::DecimalConversionSnafu, + indexer::{Index, Result}, + model::{HistoryItem, Masternode, MasternodeStats, MasternodeStatsData, TimelockStats}, + storage::RepositoryOps, + Services, +}; + +fn get_operator_script(hash: &PubkeyHash, r#type: u8) -> Result { + match r#type { + 0x1 => Ok(ScriptBuf::new_p2pkh(hash)), + 0x4 => Ok(ScriptBuf::new_p2wpkh(&WPubkeyHash::hash( + hash.as_byte_array(), + ))), + _ => Err("Unsupported type".into()), + } +} + +impl Index for CreateMasternode { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[CreateMasternode] Indexing..."); + let txid = ctx.tx.txid; + let Some(ref addresses) = ctx.tx.vout[1].script_pub_key.addresses else { + return Err("Missing owner address".into()); + }; + let collateral = Decimal::from_f64(ctx.tx.vout[1].value).context(DecimalConversionSnafu)?; + + let masternode = Masternode { + id: txid, + owner_address: addresses[0].clone(), + operator_address: get_operator_script(&self.operator_pub_key_hash, self.operator_type)? + .to_hex_string(), + creation_height: ctx.block.height, + resign_height: None, + resign_tx: None, + minted_blocks: 0, + timelock: self.timelock.0.unwrap_or_default(), + block: ctx.block.clone(), + collateral, + history: Vec::new(), + }; + + services.masternode.by_id.put(&txid, &masternode)?; + services + .masternode + .by_height + .put(&(ctx.block.height, txid), &0)?; + + index_stats(&self, services, ctx, collateral) + } + + fn invalidate(&self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[CreateMasternode] Invalidating..."); + services.masternode.by_id.delete(&ctx.tx.txid)?; + services + .masternode + .by_height + .delete(&(ctx.block.height, ctx.tx.txid)) + } +} + +fn index_stats( + data: &CreateMasternode, + services: &Arc, + ctx: &Context, + collateral: Decimal, +) -> Result<()> { + let mut stats = services + .masternode + .stats + .get_latest()? + .map_or(MasternodeStatsData::default(), |mn| mn.stats); + + let count = stats.count + 1; + let tvl = stats.tvl + collateral; + let locked = stats + .locked + .entry(data.timelock.0.unwrap_or_default()) + .or_insert_with(TimelockStats::default); + + locked.count += 1; + locked.tvl += collateral; + + services.masternode.stats.put( + &ctx.block.height, + &MasternodeStats { + stats: MasternodeStatsData { + count, + tvl, + locked: stats.clone().locked, + }, + block: ctx.block.clone(), + }, + ) +} + +impl Index for UpdateMasternode { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[UpdateMasternode] Indexing..."); + if let Some(mut mn) = services.masternode.by_id.get(&self.node_id)? { + mn.history.push(HistoryItem { + owner_address: mn.owner_address.clone(), + operator_address: mn.operator_address.clone(), + }); + + for update in self.updates.as_ref() { + trace!("update : {:?}", update); + match update.r#type { + 0x1 => { + if let Some(ref addresses) = ctx.tx.vout[1].script_pub_key.addresses { + mn.owner_address.clone_from(&addresses[0]); + } + } + 0x2 => { + if let Some(hash) = update.address.address_pub_key_hash { + mn.operator_address = + get_operator_script(&hash, update.address.r#type)?.to_hex_string(); + } + } + _ => (), + } + } + + services.masternode.by_id.put(&self.node_id, &mn)?; + } + Ok(()) + } + + fn invalidate(&self, services: &Arc, _ctx: &Context) -> Result<()> { + trace!("[UpdateMasternode] Invalidating..."); + if let Some(mut mn) = services.masternode.by_id.get(&self.node_id)? { + if let Some(history_item) = mn.history.pop() { + mn.owner_address = history_item.owner_address; + mn.operator_address = history_item.operator_address; + } + + services.masternode.by_id.put(&self.node_id, &mn)?; + } + Ok(()) + } +} + +impl Index for ResignMasternode { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[ResignMasternode] Indexing..."); + if let Some(mn) = services.masternode.by_id.get(&self.node_id)? { + services.masternode.by_id.put( + &self.node_id, + &Masternode { + resign_height: Some(ctx.block.height), + resign_tx: Some(ctx.tx.txid), + ..mn + }, + )?; + } + Ok(()) + } + + fn invalidate(&self, services: &Arc, _ctx: &Context) -> Result<()> { + trace!("[ResignMasternode] Invalidating..."); + if let Some(mn) = services.masternode.by_id.get(&self.node_id)? { + services.masternode.by_id.put( + &self.node_id, + &Masternode { + resign_height: None, + ..mn + }, + )?; + } + Ok(()) + } +} diff --git a/lib/ain-ocean/src/indexer/mod.rs b/lib/ain-ocean/src/indexer/mod.rs new file mode 100644 index 00000000000..b991f22c5e7 --- /dev/null +++ b/lib/ain-ocean/src/indexer/mod.rs @@ -0,0 +1,772 @@ +mod auction; +pub mod loan_token; +mod masternode; +pub mod oracle; +pub mod poolswap; +pub mod transaction; +pub mod tx_result; + +pub mod helper; + +use std::{ + collections::{BTreeMap, HashSet}, + sync::Arc, + time::Instant, +}; + +use ain_dftx::{deserialize, is_skipped_tx, DfTx, Stack}; +use defichain_rpc::json::blockchain::{Block, Transaction, Vin, VinStandard, Vout}; +use helper::check_if_evm_tx; +use log::trace; +pub use poolswap::{PoolSwapAggregatedInterval, AGGREGATED_INTERVALS}; + +use crate::{ + error::{Error, IndexAction}, + hex_encoder::as_sha256, + index_transaction, invalidate_transaction, + model::{ + Block as BlockMapper, BlockContext, PoolSwapAggregated, PoolSwapAggregatedAggregated, + ScriptActivity, ScriptActivityScript, ScriptActivityType, ScriptActivityTypeHex, + ScriptActivityVin, ScriptActivityVout, ScriptAggregation, ScriptAggregationAmount, + ScriptAggregationScript, ScriptAggregationStatistic, ScriptUnspent, ScriptUnspentScript, + ScriptUnspentVout, TransactionVout, TransactionVoutScript, + }, + storage::{RepositoryOps, SecondaryIndex, SortOrder}, + Result, Services, +}; + +pub trait Index { + fn index(self, services: &Arc, ctx: &Context) -> Result<()>; + + // TODO: allow dead_code at the moment + #[allow(dead_code)] + fn invalidate(&self, services: &Arc, ctx: &Context) -> Result<()>; +} + +#[derive(Debug)] +pub struct Context { + block: BlockContext, + tx: Transaction, + tx_idx: usize, +} + +fn log_elapsed + std::fmt::Display>(previous: Instant, msg: S) { + let now = Instant::now(); + trace!("{} in {} ms", msg, now.duration_since(previous).as_millis()); +} + +fn get_bucket(block: &Block, interval: i64) -> i64 { + block.mediantime - (block.mediantime % interval) +} + +fn index_block_start(services: &Arc, block: &Block) -> Result<()> { + let mut pool_pairs = ain_cpp_imports::get_pool_pairs(); + pool_pairs.sort_by(|a, b| b.creation_height.cmp(&a.creation_height)); + + for interval in AGGREGATED_INTERVALS { + for pool_pair in &pool_pairs { + let repository = &services.pool_swap_aggregated; + + let prevs = repository + .by_key + .list( + Some((pool_pair.id, interval, i64::MAX)), + SortOrder::Descending, + )? + .take(1) + .take_while(|item| match item { + Ok((k, _)) => k.0 == pool_pair.id && k.1 == interval, + _ => true, + }) + .map(|e| repository.by_key.retrieve_primary_value(e)) + .collect::>>()?; + + let bucket = get_bucket(block, i64::from(interval)); + + if prevs.len() == 1 && prevs[0].bucket >= bucket { + break; + } + + let aggregated = PoolSwapAggregated { + bucket, + aggregated: PoolSwapAggregatedAggregated { + amounts: Default::default(), + }, + block: BlockContext { + hash: block.hash, + height: block.height, + time: block.time, + median_time: block.mediantime, + }, + }; + + let pool_swap_aggregated_key = (pool_pair.id, interval, bucket); + let pool_swap_aggregated_id = (pool_pair.id, interval, block.hash); + + repository + .by_key + .put(&pool_swap_aggregated_key, &pool_swap_aggregated_id)?; + repository + .by_id + .put(&pool_swap_aggregated_id, &aggregated)?; + } + } + + Ok(()) +} + +fn invalidate_block_start(services: &Arc, block: &Block) -> Result<()> { + let mut pool_pairs = ain_cpp_imports::get_pool_pairs(); + pool_pairs.sort_by(|a, b| b.creation_height.cmp(&a.creation_height)); + + for interval in AGGREGATED_INTERVALS { + for pool_pair in &pool_pairs { + let pool_swap_aggregated_id = (pool_pair.id, interval, block.hash); + services + .pool_swap_aggregated + .by_id + .delete(&pool_swap_aggregated_id)?; + } + } + + Ok(()) +} + +fn get_vin_standard(vin: &Vin) -> Option { + match vin { + Vin::Coinbase(_vin) => None, + Vin::Standard(vin) => Some(vin.clone()), + } +} + +fn find_tx_vout( + services: &Arc, + vin: &VinStandard, + txs: &[Transaction], +) -> Result> { + let tx = txs.iter().find(|tx| tx.txid == vin.txid); + + if let Some(tx) = tx { + let vout = tx.vout.iter().find(|vout| vout.n == vin.vout); + + if let Some(vout) = vout { + let tx_vout = TransactionVout { + vout: vin.vout, + txid: tx.txid, + n: vout.n, + value: vout.value, + token_id: vout.token_id, + script: TransactionVoutScript { + r#type: vout.script_pub_key.r#type.clone(), + hex: vout.script_pub_key.hex.clone(), + }, + }; + return Ok(Some(tx_vout)); + } + } + services.transaction.vout_by_id.get(&(vin.txid, vin.vout)) +} + +fn index_script_activity_vin( + services: &Arc, + vin: &VinStandard, + vout: &TransactionVout, + ctx: &Context, +) -> Result<()> { + let tx = &ctx.tx; + let block = &ctx.block; + + let hid = as_sha256(&vout.script.hex); // as key + let script_activity = ScriptActivity { + hid, + r#type: ScriptActivityType::Vin, + type_hex: ScriptActivityTypeHex::Vin, + txid: tx.txid, + block: BlockContext { + hash: block.hash, + height: block.height, + time: block.time, + median_time: block.median_time, + }, + script: ScriptActivityScript { + r#type: vout.script.r#type.clone(), + hex: vout.script.hex.clone(), + }, + vin: Some(ScriptActivityVin { + txid: vin.txid, + n: vin.vout, + }), + vout: None, + value: vout.value, + token_id: vout.token_id, + }; + let id = ( + hid, + block.height.to_be_bytes(), + ScriptActivityTypeHex::Vin, + vin.txid, + vin.vout, + ); + services.script_activity.by_id.put(&id, &script_activity)?; + + Ok(()) +} + +fn index_script_aggregation_vin( + vout: &TransactionVout, + block: &BlockContext, + record: &mut BTreeMap<[u8; 32], ScriptAggregation>, +) { + let hid = as_sha256(&vout.script.hex); + let entry = record.entry(hid).or_insert_with(|| ScriptAggregation { + hid, + block: block.clone(), + script: ScriptAggregationScript { + r#type: vout.script.r#type.clone(), + hex: vout.script.hex.clone(), + }, + statistic: ScriptAggregationStatistic::default(), + amount: ScriptAggregationAmount::default(), + }); + entry.statistic.tx_out_count += 1; + entry.amount.tx_out += vout.value; +} + +fn index_script_unspent_vin( + services: &Arc, + vin: &VinStandard, + ctx: &Context, +) -> Result<()> { + let key = (ctx.block.height, vin.txid, vin.vout); + let id = services.script_unspent.by_key.get(&key)?; + if let Some(id) = id { + services.script_unspent.by_id.delete(&id)?; + services.script_unspent.by_key.delete(&key)?; + } + Ok(()) +} + +fn index_script_activity_vout(services: &Arc, vout: &Vout, ctx: &Context) -> Result<()> { + let tx = &ctx.tx; + let block = &ctx.block; + + let hid = as_sha256(&vout.script_pub_key.hex); + let script_activity = ScriptActivity { + hid, + r#type: ScriptActivityType::Vout, + type_hex: ScriptActivityTypeHex::Vout, + txid: tx.txid, + block: BlockContext { + hash: block.hash, + height: block.height, + time: block.time, + median_time: block.median_time, + }, + script: ScriptActivityScript { + r#type: vout.script_pub_key.r#type.clone(), + hex: vout.script_pub_key.hex.clone(), + }, + vin: None, + vout: Some(ScriptActivityVout { + txid: tx.txid, + n: vout.n, + }), + value: vout.value, + token_id: vout.token_id, + }; + let id = ( + hid, + block.height.to_be_bytes(), + ScriptActivityTypeHex::Vout, + tx.txid, + vout.n, + ); + services.script_activity.by_id.put(&id, &script_activity)?; + Ok(()) +} + +fn index_script_aggregation_vout( + vout: &Vout, + block: &BlockContext, + record: &mut BTreeMap<[u8; 32], ScriptAggregation>, +) { + let hid = as_sha256(&vout.script_pub_key.hex); + + let entry = record.entry(hid).or_insert_with(|| ScriptAggregation { + hid, + block: block.clone(), + script: ScriptAggregationScript { + r#type: vout.script_pub_key.r#type.clone(), + hex: vout.script_pub_key.hex.clone(), + }, + statistic: ScriptAggregationStatistic::default(), + amount: ScriptAggregationAmount::default(), + }); + entry.statistic.tx_in_count += 1; + entry.amount.tx_in += vout.value; +} + +fn index_script_unspent_vout(services: &Arc, vout: &Vout, ctx: &Context) -> Result<()> { + let tx = &ctx.tx; + let block = &ctx.block; + + let hid = as_sha256(&vout.script_pub_key.hex); + let script_unspent = ScriptUnspent { + id: (tx.txid, vout.n.to_be_bytes()), + hid, + block: BlockContext { + hash: block.hash, + height: block.height, + median_time: block.median_time, + time: block.time, + }, + script: ScriptUnspentScript { + r#type: vout.script_pub_key.r#type.clone(), + hex: vout.script_pub_key.hex.clone(), + }, + vout: ScriptUnspentVout { + txid: tx.txid, + n: vout.n, + value: vout.value, + token_id: vout.token_id, + }, + }; + + let id = (hid, block.height.to_be_bytes(), tx.txid, vout.n); + let key = (block.height, tx.txid, vout.n); + services.script_unspent.by_key.put(&key, &id)?; + services.script_unspent.by_id.put(&id, &script_unspent)?; + Ok(()) +} + +fn index_script(services: &Arc, ctx: &Context, txs: &[Transaction]) -> Result<()> { + trace!("[index_transaction] Indexing..."); + let start = Instant::now(); + + let is_evm_tx = check_if_evm_tx(&ctx.tx); + + let mut record = BTreeMap::new(); + + for vin in &ctx.tx.vin { + if is_evm_tx { + continue; + } + + let Some(vin) = get_vin_standard(vin) else { + continue; + }; + + index_script_unspent_vin(services, &vin, ctx)?; + + let Some(vout) = find_tx_vout(services, &vin, txs)? else { + if is_skipped_tx(&vin.txid) { + return Ok(()); + }; + + return Err(Error::NotFoundIndex { + action: IndexAction::Index, + r#type: "Index script TransactionVout".to_string(), + id: format!("{}-{}", vin.txid, vin.vout), + }); + }; + + index_script_activity_vin(services, &vin, &vout, ctx)?; + + // part of index_script_aggregation + index_script_aggregation_vin(&vout, &ctx.block, &mut record); + } + + for vout in &ctx.tx.vout { + index_script_unspent_vout(services, vout, ctx)?; + + if vout.script_pub_key.hex.starts_with(&[0x6a]) { + return Ok(()); + } + + index_script_activity_vout(services, vout, ctx)?; + + // part of index_script_aggregation + index_script_aggregation_vout(vout, &ctx.block, &mut record); + } + + // index_script_aggregation + for (_, mut aggregation) in record.clone() { + let repo = &services.script_aggregation; + let latest = repo + .by_id + .list(Some((aggregation.hid, u32::MAX)), SortOrder::Descending)? + .take(1) + .take_while(|item| match item { + Ok(((hid, _), _)) => &aggregation.hid == hid, + _ => true, + }) + .map(|item| { + let (_, v) = item?; + Ok(v) + }) + .collect::>>()?; + + if let Some(latest) = latest.first().cloned() { + aggregation.statistic.tx_in_count += latest.statistic.tx_in_count; + aggregation.statistic.tx_out_count += latest.statistic.tx_out_count; + + aggregation.amount.tx_in += latest.amount.tx_in; + aggregation.amount.tx_out += latest.amount.tx_out; + } + + aggregation.statistic.tx_count = + aggregation.statistic.tx_in_count + aggregation.statistic.tx_out_count; + aggregation.amount.unspent = aggregation.amount.tx_in - aggregation.amount.tx_out; + + repo.by_id + .put(&(aggregation.hid, ctx.block.height), &aggregation)?; + + record.insert(aggregation.hid, aggregation); + } + + log_elapsed(start, format!("Indexed script {:x}", ctx.tx.txid)); + Ok(()) +} + +fn invalidate_script(services: &Arc, ctx: &Context, txs: &[Transaction]) -> Result<()> { + let tx = &ctx.tx; + let block = &ctx.block; + + let is_evm_tx = check_if_evm_tx(tx); + + let mut hid_set = HashSet::new(); + + for vin in tx.vin.iter() { + if is_evm_tx { + continue; + } + + let Some(vin) = get_vin_standard(vin) else { + continue; + }; + + invalidate_script_unspent_vin(services, &ctx.tx, &vin)?; + + let Some(vout) = find_tx_vout(services, &vin, txs)? else { + if is_skipped_tx(&vin.txid) { + return Ok(()); + }; + + return Err(Error::NotFoundIndex { + action: IndexAction::Index, + r#type: "Index script TransactionVout".to_string(), + id: format!("{}-{}", vin.txid, vin.vout), + }); + }; + + invalidate_script_activity_vin(services, ctx.block.height, &vin, &vout)?; + + hid_set.insert(as_sha256(&vout.script.hex)); // part of invalidate_script_aggregation + } + + for vout in tx.vout.iter() { + invalidate_script_unspent_vout(services, ctx, vout)?; + + if vout.script_pub_key.hex.starts_with(&[0x6a]) { + continue; + } + + invalidate_script_activity_vout(services, ctx, vout)?; + + hid_set.insert(as_sha256(&vout.script_pub_key.hex)); // part of invalidate_script_aggregation + } + + // invalidate_script_aggregation + for hid in hid_set.into_iter() { + services + .script_aggregation + .by_id + .delete(&(hid, block.height))? + } + + Ok(()) +} + +fn invalidate_script_unspent_vin( + services: &Arc, + tx: &Transaction, + vin: &VinStandard, +) -> Result<()> { + let Some(transaction) = services.transaction.by_id.get(&vin.txid)? else { + return Err(Error::NotFoundIndex { + action: IndexAction::Invalidate, + r#type: "Transaction".to_string(), + id: vin.txid.to_string(), + }); + }; + + let Some(vout) = services.transaction.vout_by_id.get(&(vin.txid, vin.vout))? else { + return Err(Error::NotFoundIndex { + action: IndexAction::Invalidate, + r#type: "TransactionVout".to_string(), + id: format!("{}{}", vin.txid, vin.vout), + }); + }; + + let hid = as_sha256(&vout.script.hex); + + let script_unspent = ScriptUnspent { + id: (vout.txid, vout.n.to_be_bytes()), + hid, + block: BlockContext { + hash: transaction.block.hash, + height: transaction.block.height, + median_time: transaction.block.median_time, + time: transaction.block.time, + }, + script: ScriptUnspentScript { + r#type: vout.script.r#type, + hex: vout.script.hex, + }, + vout: ScriptUnspentVout { + txid: tx.txid, + n: vout.n, + value: vout.value, + token_id: vout.token_id, + }, + }; + + let id = ( + hid, + transaction.block.height.to_be_bytes(), + transaction.txid, + vout.n, + ); + let key = (transaction.block.height, transaction.txid, vout.n); + + services.script_unspent.by_key.put(&key, &id)?; + services.script_unspent.by_id.put(&id, &script_unspent)?; + + Ok(()) +} + +fn invalidate_script_activity_vin( + services: &Arc, + height: u32, + vin: &VinStandard, + vout: &TransactionVout, +) -> Result<()> { + let id = ( + as_sha256(&vout.script.hex), + height.to_be_bytes(), + ScriptActivityTypeHex::Vin, + vin.txid, + vin.vout, + ); + services.script_activity.by_id.delete(&id)?; + + Ok(()) +} + +fn invalidate_script_unspent_vout( + services: &Arc, + ctx: &Context, + vout: &Vout, +) -> Result<()> { + let hid = as_sha256(&vout.script_pub_key.hex); + let id = (hid, ctx.block.height.to_be_bytes(), ctx.tx.txid, vout.n); + services.script_unspent.by_id.delete(&id)?; + + Ok(()) +} + +fn invalidate_script_activity_vout( + services: &Arc, + ctx: &Context, + vout: &Vout, +) -> Result<()> { + let id = ( + as_sha256(&vout.script_pub_key.hex), + ctx.block.height.to_be_bytes(), + ScriptActivityTypeHex::Vout, + ctx.tx.txid, + vout.n, + ); + services.script_activity.by_id.delete(&id)?; + Ok(()) +} + +fn index_block_end(services: &Arc, block: &BlockContext) -> Result<()> { + loan_token::index_active_price(services, block)?; + Ok(()) +} + +fn invalidate_block_end(services: &Arc, block: &BlockContext) -> Result<()> { + loan_token::invalidate_active_price(services, block)?; + Ok(()) +} + +pub fn index_block(services: &Arc, block: Block) -> Result<()> { + trace!("[index_block] Indexing block..."); + let start = Instant::now(); + let block_hash = block.hash; + let transaction_count = block.tx.len(); + let block_ctx = BlockContext { + height: block.height, + hash: block_hash, + time: block.time, + median_time: block.mediantime, + }; + + index_block_start(services, &block)?; + + for (tx_idx, tx) in block.tx.clone().into_iter().enumerate() { + if is_skipped_tx(&tx.txid) { + continue; + } + let start = Instant::now(); + let ctx = Context { + block: block_ctx.clone(), + tx, + tx_idx, + }; + + index_script(services, &ctx, &block.tx)?; + + index_transaction(services, &ctx)?; + + let bytes = &ctx.tx.vout[0].script_pub_key.hex; + if bytes.len() <= 6 || bytes[0] != 0x6a || bytes[1] > 0x4e { + continue; + } + + let offset = 1 + match bytes[1] { + 0x4c => 2, + 0x4d => 3, + 0x4e => 4, + _ => 1, + }; + let raw_tx = &bytes[offset..]; + match deserialize::(raw_tx) { + Err(bitcoin::consensus::encode::Error::ParseFailed("Invalid marker")) => (), + Err(e) => return Err(e.into()), + Ok(Stack { dftx, .. }) => { + match dftx { + DfTx::CreateMasternode(data) => data.index(services, &ctx)?, + DfTx::UpdateMasternode(data) => data.index(services, &ctx)?, + DfTx::ResignMasternode(data) => data.index(services, &ctx)?, + DfTx::AppointOracle(data) => data.index(services, &ctx)?, + DfTx::RemoveOracle(data) => data.index(services, &ctx)?, + DfTx::UpdateOracle(data) => data.index(services, &ctx)?, + DfTx::SetOracleData(data) => data.index(services, &ctx)?, + DfTx::PoolSwap(data) => data.index(services, &ctx)?, + DfTx::SetLoanToken(data) => data.index(services, &ctx)?, + DfTx::CompositeSwap(data) => data.index(services, &ctx)?, + DfTx::PlaceAuctionBid(data) => data.index(services, &ctx)?, + _ => (), + } + log_elapsed(start, "Indexed dftx"); + } + } + } + + let block_mapper = BlockMapper { + hash: block_hash, + id: block_hash, + previous_hash: block.previousblockhash, + height: block.height, + version: block.version, + time: block.time, + median_time: block.mediantime, + transaction_count, + difficulty: block.difficulty, + masternode: block.masternode, + minter: block.minter, + minter_block_count: block.minted_blocks, + stake_modifier: block.stake_modifier.clone(), + merkleroot: block.merkleroot, + size: block.size, + size_stripped: block.strippedsize, + weight: block.weight, + }; + + index_block_end(services, &block_ctx)?; + + // services.block.raw.put(&ctx.hash, &encoded_block)?; TODO + services.block.by_id.put(&block_ctx.hash, &block_mapper)?; + services + .block + .by_height + .put(&block_ctx.height, &block_hash)?; + + log_elapsed(start, "Indexed block"); + + Ok(()) +} + +pub fn invalidate_block(services: &Arc, block: Block) -> Result<()> { + let block_ctx = BlockContext { + height: block.height, + hash: block.hash, + time: block.time, + median_time: block.mediantime, + }; + + invalidate_block_end(services, &block_ctx)?; + + // invalidate_dftx + for (tx_idx, tx) in block.tx.clone().into_iter().enumerate() { + if is_skipped_tx(&tx.txid) { + continue; + } + let start = Instant::now(); + let ctx = Context { + block: block_ctx.clone(), + tx, + tx_idx, + }; + + invalidate_script(services, &ctx, &block.tx)?; + + invalidate_transaction(services, &ctx)?; + + let bytes = &ctx.tx.vout[0].script_pub_key.hex; + if bytes.len() <= 6 || bytes[0] != 0x6a || bytes[1] > 0x4e { + continue; + } + + let offset = 1 + match bytes[1] { + 0x4c => 2, + 0x4d => 3, + 0x4e => 4, + _ => 1, + }; + let raw_tx = &bytes[offset..]; + match deserialize::(raw_tx) { + Err(bitcoin::consensus::encode::Error::ParseFailed("Invalid marker")) => { + println!("Discarding invalid marker"); + } + Err(e) => return Err(e.into()), + Ok(Stack { dftx, .. }) => { + match dftx { + DfTx::CreateMasternode(data) => data.invalidate(services, &ctx)?, + DfTx::UpdateMasternode(data) => data.invalidate(services, &ctx)?, + DfTx::ResignMasternode(data) => data.invalidate(services, &ctx)?, + DfTx::AppointOracle(data) => data.invalidate(services, &ctx)?, + DfTx::RemoveOracle(data) => data.invalidate(services, &ctx)?, // check + DfTx::UpdateOracle(data) => data.invalidate(services, &ctx)?, // check + DfTx::SetOracleData(data) => data.invalidate(services, &ctx)?, + DfTx::PoolSwap(data) => data.invalidate(services, &ctx)?, // check + DfTx::SetLoanToken(data) => data.invalidate(services, &ctx)?, + DfTx::CompositeSwap(data) => data.invalidate(services, &ctx)?, + DfTx::PlaceAuctionBid(data) => data.invalidate(services, &ctx)?, + _ => (), + } + log_elapsed(start, "Invalidate dftx"); + } + } + } + + invalidate_block_start(services, &block)?; + + // invalidate_block + services.block.by_height.delete(&block.height)?; + services.block.by_id.delete(&block.hash)?; + + Ok(()) +} diff --git a/lib/ain-ocean/src/indexer/oracle.rs b/lib/ain-ocean/src/indexer/oracle.rs new file mode 100644 index 00000000000..931bf38a289 --- /dev/null +++ b/lib/ain-ocean/src/indexer/oracle.rs @@ -0,0 +1,743 @@ +use std::{collections::HashSet, sync::Arc}; + +use ain_dftx::{oracles::*, Currency, Token}; +use bitcoin::{hashes::Hash, Txid}; +use log::trace; +use rust_decimal::{ + prelude::{ToPrimitive, Zero}, + Decimal, +}; +use rust_decimal_macros::dec; +use snafu::OptionExt; + +use crate::{ + error::{ + ArithmeticOverflowSnafu, ArithmeticUnderflowSnafu, Error, IndexAction, OtherSnafu, + ToPrimitiveSnafu, + }, + indexer::{Context, Index, Result}, + model::{ + BlockContext, Oracle, OracleHistoryId, OracleIntervalSeconds, OraclePriceActiveNext, + OraclePriceActiveNextOracles, OraclePriceAggregated, OraclePriceAggregatedInterval, + OraclePriceAggregatedIntervalAggregated, OraclePriceAggregatedIntervalAggregatedOracles, + OraclePriceAggregatedIntervalId, OraclePriceFeed, OraclePriceFeedId, OracleTokenCurrency, + PriceFeed, PriceTicker, + }, + storage::{RepositoryOps, SortOrder}, + Services, +}; + +pub const AGGREGATED_INTERVALS: [OracleIntervalSeconds; 3] = [ + OracleIntervalSeconds::FifteenMinutes, + OracleIntervalSeconds::OneDay, + OracleIntervalSeconds::OneHour, +]; + +impl Index for AppointOracle { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + let oracle_id = ctx.tx.txid; + let price_feeds = self + .price_feeds + .iter() + .map(|pair| PriceFeed { + token: pair.token.clone(), + currency: pair.currency.clone(), + }) + .collect::>(); + + let oracle = Oracle { + owner_address: self.script.to_hex_string(), + weightage: self.weightage, + price_feeds: price_feeds.clone(), + block: ctx.block.clone(), + }; + services.oracle.by_id.put(&oracle_id, &oracle)?; + + let oracle_history_id = (oracle_id, ctx.block.height); + services + .oracle_history + .by_id + .put(&oracle_history_id, &oracle)?; + + for token_currency in price_feeds { + let id = ( + token_currency.token.clone(), + token_currency.currency.clone(), + oracle_id, + ); + + let oracle_token_currency = OracleTokenCurrency { + weightage: self.weightage, + block: ctx.block.clone(), + }; + + services + .oracle_token_currency + .by_id + .put(&id, &oracle_token_currency)?; + } + + Ok(()) + } + + fn invalidate(&self, services: &Arc, context: &Context) -> Result<()> { + trace!("[AppointOracle] Invalidating..."); + let oracle_id = context.tx.txid; + services.oracle.by_id.delete(&oracle_id)?; + + services + .oracle_history + .by_id + .delete(&(oracle_id, context.block.height))?; + + for currency_pair in self.price_feeds.as_ref() { + let token_currency_id = ( + currency_pair.token.clone(), + currency_pair.currency.clone(), + oracle_id, + ); + services + .oracle_token_currency + .by_id + .delete(&token_currency_id)?; + } + Ok(()) + } +} + +impl Index for RemoveOracle { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + let oracle_id = ctx.tx.txid; + services.oracle.by_id.delete(&oracle_id)?; + + let (_, previous) = get_previous_oracle(services, oracle_id)?; + + for price_feed in &previous.price_feeds { + services.oracle_token_currency.by_id.delete(&( + price_feed.token.to_owned(), + price_feed.currency.to_owned(), + oracle_id, + ))?; + } + + Ok(()) + } + + fn invalidate(&self, services: &Arc, context: &Context) -> Result<()> { + trace!("[RemoveOracle] Invalidating..."); + let oracle_id = context.tx.txid; + let (_, previous) = get_previous_oracle(services, oracle_id)?; + + let oracle = Oracle { + owner_address: previous.owner_address, + weightage: previous.weightage, + price_feeds: previous.price_feeds.clone(), + block: previous.block, + }; + + services.oracle.by_id.put(&oracle_id, &oracle)?; + + for price_feed in previous.price_feeds { + let oracle_token_currency = OracleTokenCurrency { + weightage: oracle.weightage, + block: oracle.block.clone(), + }; + + services.oracle_token_currency.by_id.put( + &(price_feed.token, price_feed.currency, oracle_id), + &oracle_token_currency, + )?; + } + + Ok(()) + } +} + +impl Index for UpdateOracle { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + let oracle_id = ctx.tx.txid; + let price_feeds = self + .price_feeds + .iter() + .map(|pair| PriceFeed { + token: pair.token.clone(), + currency: pair.currency.clone(), + }) + .collect::>(); + + let oracle = Oracle { + owner_address: self.script.to_hex_string(), + weightage: self.weightage, + price_feeds: price_feeds.clone(), + block: ctx.block.clone(), + }; + services.oracle.by_id.put(&oracle_id, &oracle)?; + services + .oracle_history + .by_id + .put(&(oracle_id, ctx.block.height), &oracle)?; + + let (_, previous) = get_previous_oracle(services, oracle_id)?; + for price_feed in &previous.price_feeds { + services.oracle_token_currency.by_id.delete(&( + price_feed.token.to_owned(), + price_feed.currency.to_owned(), + oracle_id, + ))?; + } + + for price_feed in price_feeds { + let oracle_token_currency = OracleTokenCurrency { + weightage: self.weightage, + block: ctx.block.clone(), + }; + services.oracle_token_currency.by_id.put( + &(price_feed.token, price_feed.currency, oracle_id), + &oracle_token_currency, + )?; + } + + Ok(()) + } + + fn invalidate(&self, services: &Arc, context: &Context) -> Result<()> { + trace!("[UpdateOracle] Invalidating..."); + let oracle_id = context.tx.txid; + services + .oracle_history + .by_id + .delete(&(oracle_id, context.block.height))?; + + let price_feeds = self.price_feeds.as_ref(); + for pair in price_feeds { + services.oracle_token_currency.by_id.delete(&( + pair.token.clone(), + pair.currency.clone(), + self.oracle_id, + ))?; + } + let ((prev_oracle_id, _), previous) = get_previous_oracle(services, oracle_id)?; + + let prev_oracle = Oracle { + owner_address: previous.owner_address, + weightage: previous.weightage, + price_feeds: previous.price_feeds.clone(), + block: previous.block.clone(), + }; + services.oracle.by_id.put(&(prev_oracle_id), &prev_oracle)?; + + for price_feed in &previous.price_feeds { + let oracle_token_currency = OracleTokenCurrency { + weightage: previous.weightage, + block: previous.block.clone(), + }; + services.oracle_token_currency.by_id.put( + &( + price_feed.token.to_owned(), + price_feed.currency.to_owned(), + prev_oracle_id, + ), + &oracle_token_currency, + )?; + } + + Ok(()) + } +} + +fn map_price_aggregated( + services: &Arc, + context: &Context, + pair: &(String, String), +) -> Result> { + let (token, currency) = pair; + let oracle_repo = &services.oracle_token_currency; + let feed_repo = &services.oracle_price_feed; + + let oracles = oracle_repo + .by_id + .list( + Some(( + token.clone(), + currency.clone(), + Txid::from_byte_array([0xffu8; 32]), + )), + SortOrder::Descending, + )? + .take_while(|item| match item { + Ok((k, _)) => k.0 == token.clone() && k.1 == currency.clone(), + _ => true, + }) + .flatten() + .collect::>(); + + let mut aggregated_total = Decimal::zero(); + let mut aggregated_count = Decimal::zero(); + let mut aggregated_weightage = Decimal::zero(); + + let oracles_len = oracles.len(); + for (id, oracle) in oracles { + if oracle.weightage == 0 { + trace!("Skipping oracle with zero weightage: {:?}", oracle); + continue; + } + + let feed_id = feed_repo.by_key.get(&(id))?; + + let Some(feed_id) = feed_id else { continue }; + + let feed = feed_repo.by_id.get(&feed_id)?; + + let Some(feed) = feed else { continue }; + + let time_diff = Decimal::from(feed.time) - Decimal::from(context.block.time); + if Decimal::abs(&time_diff) < dec!(3600) { + aggregated_count = aggregated_count + .checked_add(dec!(1)) + .context(ArithmeticOverflowSnafu)?; + aggregated_weightage = aggregated_weightage + .checked_add(Decimal::from(oracle.weightage)) + .context(ArithmeticOverflowSnafu)?; + log::trace!( + "SetOracleData weightage: {:?} * oracle_price.amount: {:?}", + aggregated_weightage, + feed.amount + ); + let weighted_amount = Decimal::from(feed.amount) + .checked_mul(Decimal::from(oracle.weightage)) + .context(ArithmeticOverflowSnafu)?; + aggregated_total += weighted_amount; + } + } + + if aggregated_count == dec!(0) { + return Ok(None); + } + + // NOTE(canonbrother): default by zero since it has not executed within the bucket yet + let aggregated_amount = aggregated_total + .checked_div(aggregated_weightage) + .unwrap_or_default(); + + Ok(Some(OraclePriceAggregated { + aggregated: OraclePriceActiveNext { + amount: aggregated_amount, + weightage: aggregated_weightage, + oracles: OraclePriceActiveNextOracles { + active: aggregated_count, + total: oracles_len as i32, + }, + }, + block: context.block.clone(), + })) +} + +fn index_set_oracle_data( + services: &Arc, + context: &Context, + pairs: &HashSet<(Token, Currency)>, +) -> Result<()> { + let oracle_repo = &services.oracle_price_aggregated; + let ticker_repo = &services.price_ticker; + + for pair in pairs { + let price_aggregated = map_price_aggregated(services, context, pair)?; + + let Some(price_aggregated) = price_aggregated else { + continue; + }; + + let token = pair.0.clone(); + let currency = pair.1.clone(); + + let id = ( + token.clone(), + currency.clone(), + price_aggregated.block.height, + ); + oracle_repo.by_key.put(pair, &id)?; + oracle_repo.by_id.put(&id, &price_aggregated)?; + + let key = ( + price_aggregated.aggregated.oracles.total, + price_aggregated.block.height, + token.clone(), + currency.clone(), + ); + ticker_repo.by_key.put(&key, pair)?; + ticker_repo.by_id.put( + &pair.clone(), + &PriceTicker { + price: price_aggregated, + }, + )?; + } + Ok(()) +} + +fn index_set_oracle_data_interval( + services: &Arc, + context: &Context, + pairs: &HashSet<(String, String)>, +) -> Result<()> { + for (token, currency) in pairs { + let aggregated = services.oracle_price_aggregated.by_id.get(&( + token.clone(), + currency.clone(), + context.block.height, + ))?; + + let Some(aggregated) = aggregated else { + continue; + }; + + for interval in AGGREGATED_INTERVALS { + index_interval_mapper( + services, + &context.block, + token.clone(), + currency.clone(), + &aggregated, + interval, + )?; + } + } + + Ok(()) +} + +impl Index for SetOracleData { + fn index(self, services: &Arc, context: &Context) -> Result<()> { + let feed_repo = &services.oracle_price_feed; + + let mut pairs = HashSet::new(); + let feeds = map_price_feeds(&self, context); + for (id, feed) in &feeds { + let token = id.0.clone(); + let currency = id.1.clone(); + let oracle_id = id.2; + let key = (token.clone(), currency.clone(), oracle_id); + pairs.insert((token, currency)); + feed_repo.by_key.put(&key, id)?; + feed_repo.by_id.put(id, feed)?; + } + + index_set_oracle_data(services, context, &pairs)?; + + index_set_oracle_data_interval(services, context, &pairs)?; + + Ok(()) + } + + fn invalidate(&self, services: &Arc, context: &Context) -> Result<()> { + let oracle_repo = &services.oracle_price_aggregated; + + let feeds = map_price_feeds(self, context); + + for ((token, currency, _, _), _) in &feeds { + let id = (token.clone(), currency.clone(), context.block.height); + + let aggregated = oracle_repo.by_id.get(&id)?; + + let Some(aggregated) = aggregated else { + continue; + }; + + for interval in AGGREGATED_INTERVALS { + invalidate_oracle_interval( + services, + &context.block, + token, + currency, + &aggregated, + &interval, + )?; + } + + oracle_repo.by_id.delete(&id)?; + } + Ok(()) + } +} + +fn map_price_feeds( + data: &SetOracleData, + ctx: &Context, +) -> Vec<(OraclePriceFeedId, OraclePriceFeed)> { + let mut feeds = Vec::new(); + let token_prices = data.token_prices.as_ref(); + for token_price in token_prices { + for token_amount in token_price.prices.as_ref() { + let id = ( + token_price.token.clone(), + token_amount.currency.clone(), + data.oracle_id, + ctx.tx.txid, + ); + + let oracle_price_feed = OraclePriceFeed { + amount: token_amount.amount, + block: ctx.block.clone(), + time: data.timestamp as i32, + txid: ctx.tx.txid, + }; + feeds.push((id, oracle_price_feed)); + } + } + feeds +} + +fn start_new_bucket( + services: &Arc, + block: &BlockContext, + token: Token, + currency: Currency, + aggregated: &OraclePriceAggregated, + interval: OracleIntervalSeconds, +) -> Result<()> { + let key = (token.clone(), currency.clone(), interval.clone()); + let id = (token, currency, interval, block.height); + let repo = &services.oracle_price_aggregated_interval; + repo.by_id.put( + &id, + &OraclePriceAggregatedInterval { + aggregated: OraclePriceAggregatedIntervalAggregated { + amount: aggregated.aggregated.amount, + weightage: aggregated.aggregated.weightage, + count: 1, + oracles: OraclePriceAggregatedIntervalAggregatedOracles { + active: aggregated.aggregated.oracles.active, + total: aggregated.aggregated.oracles.total, + }, + }, + block: block.clone(), + }, + )?; + repo.by_key.put(&key, &id)?; + + Ok(()) +} + +pub fn index_interval_mapper( + services: &Arc, + block: &BlockContext, + token: Token, + currency: Currency, + aggregated: &OraclePriceAggregated, + interval: OracleIntervalSeconds, +) -> Result<()> { + let repo = &services.oracle_price_aggregated_interval; + let previous = repo + .by_key + .list( + Some((token.clone(), currency.clone(), interval.clone())), + SortOrder::Descending, + )? + .take(1) + .flatten() + .collect::>(); + + if previous.is_empty() { + return start_new_bucket(services, block, token, currency, aggregated, interval); + } + + for (_, id) in previous { + let aggregated_interval = repo.by_id.get(&id)?; + if let Some(aggregated_interval) = aggregated_interval { + if block.median_time - aggregated.block.median_time > interval.clone() as i64 { + return start_new_bucket(services, block, token, currency, aggregated, interval); + } + + forward_aggregate(services, (id, &aggregated_interval), aggregated)?; + } + } + + Ok(()) +} + +pub fn invalidate_oracle_interval( + services: &Arc, + _block: &BlockContext, + token: &str, + currency: &str, + aggregated: &OraclePriceAggregated, + interval: &OracleIntervalSeconds, +) -> Result<()> { + let repo = &services.oracle_price_aggregated_interval; + let previous = repo + .by_key + .list( + Some((token.to_string(), currency.to_string(), interval.clone())), + SortOrder::Descending, + )? + .take(1) + .map(|item| { + let (_, id) = item?; + let price = services + .oracle_price_aggregated_interval + .by_id + .get(&id)? + .context(OtherSnafu { + msg: "Missing oracle price aggregated interval index", + })?; + Ok((id, price)) + }) + .collect::>>()?; + + let (prev_id, previous) = &previous[0]; + + if previous.aggregated.count == 1 { + return repo.by_id.delete(prev_id); + } + + let last_price = previous.aggregated.clone(); + let count = last_price.count - 1; + + let aggregated_amount = backward_aggregate_value( + last_price.amount, + aggregated.aggregated.amount, + Decimal::from(count), + )?; + + let aggregated_weightage = backward_aggregate_value( + last_price.weightage, + aggregated.aggregated.weightage, + Decimal::from(count), + )?; + + let aggregated_active = backward_aggregate_value( + last_price.oracles.active, + aggregated.aggregated.oracles.active, + Decimal::from(last_price.count), + )?; + + let aggregated_total = backward_aggregate_value( + Decimal::from(last_price.oracles.total), + Decimal::from(aggregated.aggregated.oracles.total), + Decimal::from(last_price.count), + )?; + + let aggregated_interval = OraclePriceAggregatedInterval { + aggregated: OraclePriceAggregatedIntervalAggregated { + amount: aggregated_amount, + weightage: aggregated_weightage, + count, + oracles: OraclePriceAggregatedIntervalAggregatedOracles { + active: aggregated_active, + total: aggregated_total + .to_i32() + .context(ToPrimitiveSnafu { msg: "to_i32" })?, + }, + }, + block: previous.block.clone(), + }; + repo.by_id.put(prev_id, &aggregated_interval)?; + repo.by_key.put( + &(prev_id.0.clone(), prev_id.1.clone(), prev_id.2.clone()), + prev_id, + )?; + Ok(()) +} + +fn forward_aggregate( + services: &Arc, + previous: ( + OraclePriceAggregatedIntervalId, + &OraclePriceAggregatedInterval, + ), + aggregated: &OraclePriceAggregated, +) -> Result<()> { + let (prev_id, previous) = previous; + let last_price = previous.aggregated.clone(); + let count = last_price.count + 1; + + let aggregated_amount = forward_aggregate_value( + last_price.amount, + aggregated.aggregated.amount, + Decimal::from(count), + )?; + + let aggregated_weightage = forward_aggregate_value( + last_price.weightage, + aggregated.aggregated.weightage, + Decimal::from(count), + )?; + + let aggregated_active = forward_aggregate_value( + last_price.oracles.active, + aggregated.aggregated.oracles.active, + Decimal::from(last_price.count), + )?; + + let aggregated_total = forward_aggregate_value( + Decimal::from(last_price.oracles.total), + Decimal::from(aggregated.aggregated.oracles.total), + Decimal::from(last_price.count), + )?; + + let aggregated_interval = OraclePriceAggregatedInterval { + aggregated: OraclePriceAggregatedIntervalAggregated { + amount: aggregated_amount, + weightage: aggregated_weightage, + count, + oracles: OraclePriceAggregatedIntervalAggregatedOracles { + active: aggregated_active, + total: aggregated_total + .to_i32() + .context(ToPrimitiveSnafu { msg: "to_i32" })?, + }, + }, + block: previous.block.clone(), + }; + services + .oracle_price_aggregated_interval + .by_id + .put(&prev_id, &aggregated_interval)?; + services.oracle_price_aggregated_interval.by_key.put( + &(prev_id.0.clone(), prev_id.1.clone(), prev_id.2.clone()), + &prev_id, + )?; + Ok(()) +} + +fn forward_aggregate_value( + last_value: Decimal, + new_value: Decimal, + count: Decimal, +) -> Result { + (last_value * count + new_value) + .checked_div(count + dec!(1)) + .context(ArithmeticUnderflowSnafu) +} + +fn backward_aggregate_value( + last_value: Decimal, + new_value: Decimal, + count: Decimal, +) -> Result { + (last_value * count - new_value) + .checked_div(count - dec!(1)) + .context(ArithmeticUnderflowSnafu) +} + +fn get_previous_oracle( + services: &Arc, + oracle_id: Txid, +) -> Result<(OracleHistoryId, Oracle)> { + let previous = services + .oracle_history + .by_id + .list(Some((oracle_id, u32::MAX)), SortOrder::Descending)? + .next() + .transpose()?; + + let Some(previous) = previous else { + return Err(Error::NotFoundIndex { + action: IndexAction::Index, + r#type: "OracleHistory".to_string(), + id: oracle_id.to_string(), + }); + }; + + Ok(previous) +} diff --git a/lib/ain-ocean/src/indexer/poolswap.rs b/lib/ain-ocean/src/indexer/poolswap.rs new file mode 100644 index 00000000000..844e28b3581 --- /dev/null +++ b/lib/ain-ocean/src/indexer/poolswap.rs @@ -0,0 +1,288 @@ +use std::{str::FromStr, sync::Arc}; + +use ain_dftx::{pool::*, COIN}; +use bitcoin::Txid; +use log::trace; +use rust_decimal::Decimal; +use rust_decimal_macros::dec; +use snafu::OptionExt; + +use super::Context; +use crate::{ + error::{ArithmeticOverflowSnafu, ArithmeticUnderflowSnafu}, + indexer::{tx_result, Index, Result}, + model::{self, PoolSwapResult, TxResult}, + storage::{RepositoryOps, SortOrder}, + Services, +}; + +pub const AGGREGATED_INTERVALS: [u32; 2] = [ + PoolSwapAggregatedInterval::OneDay as u32, + PoolSwapAggregatedInterval::OneHour as u32, +]; + +#[derive(Debug, Clone, Copy)] +pub enum PoolSwapAggregatedInterval { + OneDay = 60 * 60 * 24, + OneHour = 60 * 60, +} + +fn index_swap_aggregated( + services: &Arc, + pool_id: u32, + from_token_id: u64, + from_amount: i64, + txid: Txid, +) -> Result<()> { + for interval in AGGREGATED_INTERVALS { + let repo: &crate::PoolSwapAggregatedService = &services.pool_swap_aggregated; + let prevs = repo + .by_key + .list(Some((pool_id, interval, i64::MAX)), SortOrder::Descending)? + .take(1) + .take_while(|item| match item { + Ok((k, _)) => k.0 == pool_id && k.1 == interval, + _ => true, + }) + .flatten() + .collect::>(); + + if prevs.is_empty() { + log::error!( + "index swap {txid}: Unable to find {pool_id}-{interval} for Aggregate Indexing" + ); + continue; + } + + let Some((_, id)) = prevs.first() else { + continue; + }; + + let aggregated = repo.by_id.get(id)?; + + let Some(mut aggregated) = aggregated else { + continue; + }; + + let amount = aggregated + .aggregated + .amounts + .get(&from_token_id) + .map(|amt| Decimal::from_str(amt)) + .transpose()? + .unwrap_or(dec!(0)); + + let aggregated_amount = amount + .checked_add(Decimal::from(from_amount) / Decimal::from(COIN)) + .context(ArithmeticOverflowSnafu)?; + + aggregated + .aggregated + .amounts + .insert(from_token_id, format!("{aggregated_amount:.8}")); + + repo.by_id.put(id, &aggregated)?; + } + + Ok(()) +} + +fn invalidate_swap_aggregated( + services: &Arc, + pool_id: u32, + from_token_id: u64, + from_amount: i64, + txid: Txid, +) -> Result<()> { + for interval in AGGREGATED_INTERVALS { + let repo = &services.pool_swap_aggregated; + let prevs = repo + .by_key + .list(Some((pool_id, interval, i64::MAX)), SortOrder::Descending)? + .take(1) + .take_while(|item| match item { + Ok((k, _)) => k.0 == pool_id && k.1 == interval, + _ => true, + }) + .flatten() + .collect::>(); + + if prevs.is_empty() { + log::error!( + "invalidate swap {txid}: Unable to find {pool_id}-{interval} for Aggregate Indexing" + ); + continue; + } + + let Some((_, id)) = prevs.first() else { + continue; + }; + + let aggregated = repo.by_id.get(id)?; + + let Some(mut aggregated) = aggregated else { + continue; + }; + + let amount = aggregated + .aggregated + .amounts + .get(&from_token_id) + .map(|amt| Decimal::from_str(amt)) + .transpose()? + .unwrap_or(dec!(0)); + + let aggregated_amount = amount + .checked_sub(Decimal::from(from_amount) / Decimal::from(COIN)) + .context(ArithmeticUnderflowSnafu)?; + + aggregated + .aggregated + .amounts + .insert(from_token_id, format!("{aggregated_amount:.8}")); + + repo.by_id.put(id, &aggregated)?; + } + + Ok(()) +} + +impl Index for PoolSwap { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[Poolswap] Indexing..."); + let txid = ctx.tx.txid; + let idx = ctx.tx_idx; + let from = self.from_script; + let to = self.to_script; + let from_token_id = self.from_token_id.0; + let from_amount = self.from_amount; + let to_token_id = self.to_token_id.0; + + let Some(TxResult::PoolSwap(PoolSwapResult { to_amount, pool_id })) = + services.result.get(&txid)? + else { + // TODO: Commenting out for now, fallback should only be introduced for supporting back CLI indexing + return Err("Missing swap result".into()); + // let pair = find_pair(from_token_id, to_token_id); + // if pair.is_none() { + // return Err(format_err!("Pool not found by {from_token_id}-{to_token_id} or {to_token_id}-{from_token_id}").into()); + // } + // let pair = pair.unwrap(); + // (None, pair.id) + }; + + let swap: model::PoolSwap = model::PoolSwap { + txid, + txno: idx, + from_amount, + from_token_id, + to_token_id, + to_amount, + pool_id, + from, + to, + block: ctx.block.clone(), + }; + trace!("swap : {:?}", swap); + + services + .pool + .by_id + .put(&(pool_id, ctx.block.height, idx), &swap)?; + + index_swap_aggregated(services, pool_id, from_token_id, from_amount, txid)?; + + Ok(()) + } + + fn invalidate(&self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[PoolSwap] Invalidating..."); + let txid = ctx.tx.txid; + let from_token_id = self.from_token_id.0; + let from_amount = self.from_amount; + + let Some(TxResult::PoolSwap(PoolSwapResult { pool_id, .. })) = + services.result.get(&txid)? + else { + return Err("Missing swap result".into()); + }; + + services + .pool + .by_id + .delete(&(pool_id, ctx.block.height, ctx.tx_idx))?; + tx_result::invalidate(services, &txid)?; + + invalidate_swap_aggregated(services, pool_id, from_token_id, from_amount, txid)?; + + Ok(()) + } +} + +impl Index for CompositeSwap { + fn index(self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[CompositeSwap] Indexing..."); + let txid = ctx.tx.txid; + let from_token_id = self.pool_swap.from_token_id.0; + let from_amount = self.pool_swap.from_amount; + let to_token_id = self.pool_swap.to_token_id.0; + + let Some(TxResult::PoolSwap(PoolSwapResult { to_amount, pool_id })) = + services.result.get(&txid)? + else { + trace!("Missing swap result for {}", txid.to_string()); + return Err("Missing swap result".into()); + }; + + let from = self.pool_swap.from_script; + let to = self.pool_swap.to_script; + let pools = self.pools.as_ref(); + + let pool_ids = if pools.is_empty() { + // the pool_id from finals wap is the only swap while pools is empty + Vec::from([pool_id]) + } else { + pools.iter().map(|pool| pool.id.0 as u32).collect() + }; + + for pool_id in pool_ids { + let swap = model::PoolSwap { + txid, + txno: ctx.tx_idx, + from_amount: self.pool_swap.from_amount, + from_token_id, + to_token_id, + to_amount, + pool_id, + from: from.clone(), + to: to.clone(), + block: ctx.block.clone(), + }; + services + .pool + .by_id + .put(&(pool_id, ctx.block.height, ctx.tx_idx), &swap)?; + + index_swap_aggregated(services, pool_id, from_token_id, from_amount, txid)?; + } + + Ok(()) + } + + fn invalidate(&self, services: &Arc, ctx: &Context) -> Result<()> { + trace!("[ComposoteSwap] Invalidating..."); + let from_token_id = self.pool_swap.from_token_id.0; + let from_amount = self.pool_swap.from_amount; + let txid = ctx.tx.txid; + for pool in self.pools.as_ref() { + let pool_id = pool.id.0 as u32; + services + .pool + .by_id + .delete(&(pool_id, ctx.block.height, ctx.tx_idx))?; + + invalidate_swap_aggregated(services, pool_id, from_token_id, from_amount, txid)?; + } + tx_result::invalidate(services, &ctx.tx.txid) + } +} diff --git a/lib/ain-ocean/src/indexer/transaction.rs b/lib/ain-ocean/src/indexer/transaction.rs new file mode 100644 index 00000000000..ab39c1070ad --- /dev/null +++ b/lib/ain-ocean/src/indexer/transaction.rs @@ -0,0 +1,144 @@ +use std::{sync::Arc, time::Instant}; + +use defichain_rpc::json::blockchain::Vin; +use log::trace; +use rust_decimal::{ + prelude::{FromPrimitive, Zero}, + Decimal, +}; +use snafu::OptionExt; + +use super::{helper::check_if_evm_tx, Context}; +use crate::{ + error::DecimalConversionSnafu, + indexer::{log_elapsed, Result}, + model::{ + Transaction as TransactionMapper, TransactionVin, TransactionVinType, TransactionVout, + TransactionVoutScript, + }, + storage::RepositoryOps, + Services, +}; + +pub fn index_transaction(services: &Arc, ctx: &Context) -> Result<()> { + trace!("[index_transaction] Indexing..."); + let start = Instant::now(); + + let idx = ctx.tx_idx; + let is_evm = check_if_evm_tx(&ctx.tx); + + let txid = ctx.tx.txid; + let vin_count = ctx.tx.vin.len(); + let vout_count = ctx.tx.vout.len(); + + let mut total_vout_value = Decimal::zero(); + let mut vouts = Vec::with_capacity(vout_count); + + // Index transaction vout + let start_vout = Instant::now(); + for vout in ctx.tx.vout.clone() { + let tx_vout = TransactionVout { + vout: vout.n, + txid, + n: vout.n, + value: vout.value, + token_id: Some(0), + script: TransactionVoutScript { + hex: vout.script_pub_key.hex, + r#type: vout.script_pub_key.r#type, + }, + }; + services + .transaction + .vout_by_id + .put(&(txid, vout.n), &tx_vout)?; + + total_vout_value += Decimal::from_f64(vout.value).context(DecimalConversionSnafu)?; + vouts.push(tx_vout); + } + log_elapsed(start_vout, "Indexed vouts"); + + // Indexing transaction vin + let start_vin = Instant::now(); + for vin in ctx.tx.vin.clone() { + if is_evm { + continue; + } + + let vin = TransactionVin::from_vin_and_txid(vin, txid, &vouts); + + match &vin.r#type { + TransactionVinType::Coinbase(_) => { + let vin_id = format!("{}00", ctx.tx.txid); + services.transaction.vin_by_id.put(&vin_id, &vin)?; + } + TransactionVinType::Standard((txid, vout)) => { + let vin_id = format!("{}{}{:x}", ctx.tx.txid, txid, vout); + services.transaction.vin_by_id.put(&vin_id, &vin)?; + } + } + } + + let order = idx; + + let tx = TransactionMapper { + id: txid, + txid, + order, + hash: ctx.tx.hash.clone(), + block: ctx.block.clone(), + version: ctx.tx.version, + size: ctx.tx.size, + v_size: ctx.tx.vsize, + weight: ctx.tx.weight, + total_vout_value, + lock_time: ctx.tx.locktime, + vin_count, + vout_count, + }; + log_elapsed(start_vin, "Indexed vins"); + + // Index transaction + services.transaction.by_id.put(&txid, &tx)?; + services + .transaction + .by_block_hash + .put(&(ctx.block.hash, order), &txid)?; + + log_elapsed(start, format!("Indexed transaction {:x}", txid)); + Ok(()) +} + +pub fn invalidate_transaction(services: &Arc, ctx: &Context) -> Result<()> { + services + .transaction + .by_block_hash + .delete(&(ctx.block.hash, ctx.tx_idx))?; + services.transaction.by_id.delete(&ctx.tx.txid)?; + + let is_evm = check_if_evm_tx(&ctx.tx); + for vin in ctx.tx.vin.clone().into_iter() { + if is_evm { + continue; + } + match vin { + Vin::Coinbase(_) => { + let vin_id = format!("{}00", ctx.tx.txid); + services.transaction.vin_by_id.delete(&vin_id)? + } + Vin::Standard(vin) => { + let vin_id = format!("{}{}{:x}", ctx.tx.txid, vin.txid, vin.vout); + services.transaction.vin_by_id.delete(&vin_id)? + } + } + } + + for vout in ctx.tx.vout.clone().into_iter() { + services + .transaction + .vout_by_id + .delete(&(ctx.tx.txid, vout.n))? + } + + Ok(()) +} diff --git a/lib/ain-ocean/src/indexer/tx_result.rs b/lib/ain-ocean/src/indexer/tx_result.rs new file mode 100644 index 00000000000..2cdfc4d157c --- /dev/null +++ b/lib/ain-ocean/src/indexer/tx_result.rs @@ -0,0 +1,20 @@ +use std::sync::Arc; + +use bitcoin::{hashes::Hash, Txid}; + +use crate::{model::TxResult, storage::RepositoryOps, Result, Services}; + +pub fn index( + services: &Arc, + tx_type: u8, + tx_hash: [u8; 32], + result_ptr: usize, +) -> Result<()> { + let txid = Txid::from_byte_array(tx_hash); + let result = TxResult::from((tx_type, result_ptr)); + services.result.put(&txid, &result) +} + +pub fn invalidate(services: &Arc, txid: &Txid) -> Result<()> { + services.result.delete(txid) +} diff --git a/lib/ain-ocean/src/lib.rs b/lib/ain-ocean/src/lib.rs new file mode 100644 index 00000000000..4ab49ddcccf --- /dev/null +++ b/lib/ain-ocean/src/lib.rs @@ -0,0 +1,223 @@ +pub mod error; +pub mod hex_encoder; +mod indexer; +pub mod network; +mod storage; + +use std::{path::PathBuf, sync::Arc}; + +pub use api::ocean_router; +use error::Error; +pub use indexer::{ + index_block, invalidate_block, + oracle::invalidate_oracle_interval, + transaction::{index_transaction, invalidate_transaction}, + tx_result, +}; + +use parking_lot::Mutex; +use petgraph::graphmap::UnGraphMap; +use serde::Serialize; +pub mod api; +mod model; + +use storage::*; + +pub type Result = std::result::Result; + +lazy_static::lazy_static! { + // Global services exposed by the library + pub static ref SERVICES: Arc = { + let datadir = ain_cpp_imports::get_datadir(); + let store = Arc::new(OceanStore::new(&PathBuf::from(datadir)).expect("Error initializating ocean services")); + Arc::new(Services::new(store)) + }; +} + +pub struct MasternodeService { + pub by_id: Masternode, + pub by_height: MasternodeByHeight, + pub stats: MasternodeStats, +} + +pub struct BlockService { + pub raw: RawBlock, + pub by_id: Block, + pub by_height: BlockByHeight, +} + +pub struct AuctionService { + by_id: VaultAuctionHistory, + by_height: VaultAuctionHistoryByHeight, +} + +pub struct PoolService { + by_id: PoolSwap, +} + +pub struct PoolSwapAggregatedService { + by_id: PoolSwapAggregated, + by_key: PoolSwapAggregatedKey, +} + +pub struct TransactionService { + by_id: Transaction, + by_block_hash: TransactionByBlockHash, + vin_by_id: TransactionVin, + vout_by_id: TransactionVout, +} + +pub struct OracleService { + by_id: Oracle, +} +pub struct OraclePriceFeedService { + by_key: OraclePriceFeedKey, + by_id: OraclePriceFeed, +} +pub struct OraclePriceActiveService { + by_key: OraclePriceActiveKey, + by_id: OraclePriceActive, +} +pub struct OraclePriceAggregatedIntervalService { + by_key: OraclePriceAggregatedIntervalKey, + by_id: OraclePriceAggregatedInterval, +} +pub struct OraclePriceAggregatedService { + by_key: OraclePriceAggregatedKey, + by_id: OraclePriceAggregated, +} + +pub struct OracleTokenCurrencyService { + by_id: OracleTokenCurrency, +} + +pub struct OracleHistoryService { + by_id: OracleHistory, +} + +pub struct PriceTickerService { + by_id: PriceTicker, + by_key: PriceTickerKey, +} + +pub struct ScriptActivityService { + by_id: ScriptActivity, +} + +pub struct ScriptAggregationService { + by_id: ScriptAggregation, +} + +pub struct ScriptUnspentService { + by_id: ScriptUnspent, + by_key: ScriptUnspentKey, +} + +#[derive(Clone, Debug, Serialize, Eq, PartialEq, Hash)] +#[serde(rename_all = "camelCase")] +pub struct TokenIdentifier { + pub id: String, + pub name: String, + pub symbol: String, + pub display_symbol: String, +} + +pub struct Services { + pub masternode: MasternodeService, + pub block: BlockService, + pub auction: AuctionService, + pub result: TxResult, + pub pool: PoolService, + pub pool_swap_aggregated: PoolSwapAggregatedService, + pub transaction: TransactionService, + pub oracle: OracleService, + pub oracle_price_feed: OraclePriceFeedService, + pub oracle_price_active: OraclePriceActiveService, + pub oracle_price_aggregated_interval: OraclePriceAggregatedIntervalService, + pub oracle_price_aggregated: OraclePriceAggregatedService, + pub oracle_token_currency: OracleTokenCurrencyService, + pub oracle_history: OracleHistoryService, + pub price_ticker: PriceTickerService, + pub script_activity: ScriptActivityService, + pub script_aggregation: ScriptAggregationService, + pub script_unspent: ScriptUnspentService, + pub token_graph: Arc>>, + pub store: Arc, +} + +impl Services { + #[must_use] + pub fn new(store: Arc) -> Self { + Self { + masternode: MasternodeService { + by_id: Masternode::new(Arc::clone(&store)), + by_height: MasternodeByHeight::new(Arc::clone(&store)), + stats: MasternodeStats::new(Arc::clone(&store)), + }, + block: BlockService { + raw: RawBlock::new(Arc::clone(&store)), + by_id: Block::new(Arc::clone(&store)), + by_height: BlockByHeight::new(Arc::clone(&store)), + }, + auction: AuctionService { + by_id: VaultAuctionHistory::new(Arc::clone(&store)), + by_height: VaultAuctionHistoryByHeight::new(Arc::clone(&store)), + }, + result: TxResult::new(Arc::clone(&store)), + pool: PoolService { + by_id: PoolSwap::new(Arc::clone(&store)), + }, + pool_swap_aggregated: PoolSwapAggregatedService { + by_id: PoolSwapAggregated::new(Arc::clone(&store)), + by_key: PoolSwapAggregatedKey::new(Arc::clone(&store)), + }, + transaction: TransactionService { + by_id: Transaction::new(Arc::clone(&store)), + by_block_hash: TransactionByBlockHash::new(Arc::clone(&store)), + vin_by_id: TransactionVin::new(Arc::clone(&store)), + vout_by_id: TransactionVout::new(Arc::clone(&store)), + }, + oracle: OracleService { + by_id: Oracle::new(Arc::clone(&store)), + }, + oracle_price_feed: OraclePriceFeedService { + by_key: OraclePriceFeedKey::new(Arc::clone(&store)), + by_id: OraclePriceFeed::new(Arc::clone(&store)), + }, + oracle_price_active: OraclePriceActiveService { + by_key: OraclePriceActiveKey::new(Arc::clone(&store)), + by_id: OraclePriceActive::new(Arc::clone(&store)), + }, + oracle_price_aggregated_interval: OraclePriceAggregatedIntervalService { + by_key: OraclePriceAggregatedIntervalKey::new(Arc::clone(&store)), + by_id: OraclePriceAggregatedInterval::new(Arc::clone(&store)), + }, + oracle_price_aggregated: OraclePriceAggregatedService { + by_key: OraclePriceAggregatedKey::new(Arc::clone(&store)), + by_id: OraclePriceAggregated::new(Arc::clone(&store)), + }, + oracle_token_currency: OracleTokenCurrencyService { + by_id: OracleTokenCurrency::new(Arc::clone(&store)), + }, + oracle_history: OracleHistoryService { + by_id: OracleHistory::new(Arc::clone(&store)), + }, + price_ticker: PriceTickerService { + by_id: PriceTicker::new(Arc::clone(&store)), + by_key: PriceTickerKey::new(Arc::clone(&store)), + }, + script_activity: ScriptActivityService { + by_id: ScriptActivity::new(Arc::clone(&store)), + }, + script_aggregation: ScriptAggregationService { + by_id: ScriptAggregation::new(Arc::clone(&store)), + }, + script_unspent: ScriptUnspentService { + by_id: ScriptUnspent::new(Arc::clone(&store)), + by_key: ScriptUnspentKey::new(Arc::clone(&store)), + }, + token_graph: Arc::new(Mutex::new(UnGraphMap::new())), + store: Arc::clone(&store), + } + } +} diff --git a/lib/ain-ocean/src/model/block.rs b/lib/ain-ocean/src/model/block.rs new file mode 100644 index 00000000000..3aeda8cad78 --- /dev/null +++ b/lib/ain-ocean/src/model/block.rs @@ -0,0 +1,44 @@ +use bitcoin::{hashes::Hash, BlockHash}; +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Block { + pub hash: BlockHash, + pub id: BlockHash, + pub previous_hash: Option, + pub height: u32, + pub version: i32, + pub time: i64, + pub median_time: i64, + pub transaction_count: usize, + pub difficulty: f64, + pub masternode: Option, + pub minter: Option, + pub minter_block_count: u64, + pub stake_modifier: String, + pub merkleroot: String, + pub size: u64, + pub size_stripped: u64, + pub weight: u64, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct BlockContext { + pub hash: BlockHash, + pub height: u32, + pub time: i64, + pub median_time: i64, +} + +impl Default for BlockContext { + fn default() -> Self { + Self { + hash: BlockHash::all_zeros(), + height: Default::default(), + time: Default::default(), + median_time: Default::default(), + } + } +} diff --git a/lib/ain-ocean/src/model/governance.rs b/lib/ain-ocean/src/model/governance.rs new file mode 100644 index 00000000000..03a2d70b7f4 --- /dev/null +++ b/lib/ain-ocean/src/model/governance.rs @@ -0,0 +1,91 @@ +use defichain_rpc::json::governance::{ProposalInfo, ProposalStatus, ProposalType}; +use serde::Serialize; +use serde_with::skip_serializing_none; + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ApiProposalInfo { + pub proposal_id: String, + pub title: String, + pub context: String, + pub context_hash: String, + pub r#type: ProposalType, + pub status: ProposalStatus, + pub current_cycle: u64, + pub total_cycles: u64, + pub creation_height: u64, + pub cycle_end_height: u64, + pub proposal_end_height: u64, + pub voting_period: u64, + pub approval_threshold: String, + pub quorum: String, + #[serde(flatten)] + pub confidence_vote: ApiProposalConfidenceVote, + #[serde(flatten)] + pub vote_info: ApiProposalVoteInfo, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ApiProposalConfidenceVote { + pub amount: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub payout_address: Option, +} + +#[skip_serializing_none] +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ApiProposalVoteInfo { + pub votes_possible: Option, + pub votes_present: Option, + pub votes_present_pct: Option, + pub votes_yes: Option, + pub votes_invalid: Option, + pub votes_neutral: Option, + pub votes_no: Option, + pub votes_yes_pct: Option, + pub fee: f64, + pub options: Option>, + pub fee_redistribution_per_vote: Option, + pub fee_redistribution_total: Option, +} + +impl From for ApiProposalInfo { + fn from(proposal: ProposalInfo) -> Self { + Self { + proposal_id: proposal.proposal_id, + title: proposal.title, + context: proposal.context, + context_hash: proposal.context_hash, + r#type: proposal.r#type, + status: proposal.status, + current_cycle: proposal.current_cycle, + total_cycles: proposal.total_cycles, + creation_height: proposal.creation_height, + cycle_end_height: proposal.cycle_end_height, + proposal_end_height: proposal.proposal_end_height, + voting_period: proposal.voting_period, + approval_threshold: proposal.approval_threshold, + quorum: proposal.quorum, + confidence_vote: ApiProposalConfidenceVote { + amount: proposal.amount.map(|a| format!("{a:.8}")), + payout_address: proposal.payout_address, + }, + vote_info: ApiProposalVoteInfo { + votes_possible: proposal.votes_possible, + votes_present: proposal.votes_present, + votes_present_pct: proposal.votes_present_pct, + votes_yes: proposal.votes_yes, + votes_invalid: proposal.votes_invalid, + votes_neutral: proposal.votes_neutral, + votes_no: proposal.votes_no, + votes_yes_pct: proposal.votes_yes_pct, + fee: proposal.fee, + options: proposal.options, + fee_redistribution_per_vote: proposal.fee_redistribution_per_vote, + fee_redistribution_total: proposal.fee_redistribution_total, + }, + } + } +} diff --git a/lib/ain-ocean/src/model/masternode.rs b/lib/ain-ocean/src/model/masternode.rs new file mode 100644 index 00000000000..bc4ffad00fe --- /dev/null +++ b/lib/ain-ocean/src/model/masternode.rs @@ -0,0 +1,28 @@ +use bitcoin::Txid; +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Masternode { + pub id: Txid, + pub owner_address: String, + pub operator_address: String, + pub creation_height: u32, + pub resign_height: Option, + pub resign_tx: Option, + pub minted_blocks: i32, + pub timelock: u16, + #[serde(with = "rust_decimal::serde::str")] + pub collateral: Decimal, + pub block: BlockContext, + pub history: Vec, +} + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct HistoryItem { + pub owner_address: String, + pub operator_address: String, +} diff --git a/lib/ain-ocean/src/model/masternode_stats.rs b/lib/ain-ocean/src/model/masternode_stats.rs new file mode 100644 index 00000000000..05af765251d --- /dev/null +++ b/lib/ain-ocean/src/model/masternode_stats.rs @@ -0,0 +1,30 @@ +use std::collections::HashMap; + +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct MasternodeStats { + pub block: BlockContext, + pub stats: MasternodeStatsData, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +pub struct TimelockStats { + #[serde(with = "rust_decimal::serde::str")] + pub tvl: Decimal, + pub count: u32, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +pub struct MasternodeStatsData { + pub count: u32, + #[serde(with = "rust_decimal::serde::str")] + pub tvl: Decimal, + pub locked: HashMap, +} diff --git a/lib/ain-ocean/src/model/mod.rs b/lib/ain-ocean/src/model/mod.rs new file mode 100644 index 00000000000..b7e869aabdb --- /dev/null +++ b/lib/ain-ocean/src/model/mod.rs @@ -0,0 +1,46 @@ +mod block; +mod governance; +mod masternode; +mod masternode_stats; +pub mod oracle; +mod oracle_price_active; +mod oracle_price_aggregated; +mod oracle_price_aggregated_interval; +mod oracle_price_feed; +mod oracle_token_currency; +mod poolswap; +mod poolswap_aggregated; +mod price_ticker; +mod raw_block; +mod raw_tx; +mod script_activity; +mod script_aggregation; +mod script_unspent; +mod transaction; +mod transaction_vin; +mod transaction_vout; +mod tx_result; +mod vault_auction_batch_history; +pub use block::*; +pub use governance::*; +pub use masternode::*; +pub use masternode_stats::*; +pub use oracle::*; +pub use oracle_price_active::*; +pub use oracle_price_aggregated::*; +pub use oracle_price_aggregated_interval::*; +pub use oracle_price_feed::*; +pub use oracle_token_currency::*; +pub use poolswap::*; +pub use poolswap_aggregated::*; +pub use price_ticker::*; +pub use raw_tx::*; +// pub use raw_block::*; +pub use script_activity::*; +pub use script_aggregation::*; +pub use script_unspent::*; +pub use transaction::*; +pub use transaction_vin::*; +pub use transaction_vout::*; +pub use tx_result::*; +pub use vault_auction_batch_history::*; diff --git a/lib/ain-ocean/src/model/oracle.rs b/lib/ain-ocean/src/model/oracle.rs new file mode 100644 index 00000000000..6c4c0b95480 --- /dev/null +++ b/lib/ain-ocean/src/model/oracle.rs @@ -0,0 +1,23 @@ +use ain_dftx::{Currency, Token, Weightage}; +use bitcoin::Txid; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +pub type OracleHistoryId = (Txid, u32); //oracleId-height + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct Oracle { + pub owner_address: String, + pub weightage: Weightage, + pub price_feeds: Vec, + pub block: BlockContext, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PriceFeed { + pub token: Token, + pub currency: Currency, +} diff --git a/lib/ain-ocean/src/model/oracle_price_active.rs b/lib/ain-ocean/src/model/oracle_price_active.rs new file mode 100644 index 00000000000..b913ea9d7c4 --- /dev/null +++ b/lib/ain-ocean/src/model/oracle_price_active.rs @@ -0,0 +1,36 @@ +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +pub type OraclePriceActiveId = (String, String, u32); //token-currency-height +pub type OraclePriceActiveKey = (String, String); //token-currency +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceActive { + pub id: OraclePriceActiveId, + pub key: OraclePriceActiveKey, + pub sort: String, //height + pub active: Option, + pub next: Option, + pub is_live: bool, + pub block: BlockContext, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceActiveNext { + #[serde(with = "rust_decimal::serde::str")] + pub amount: Decimal, // aggregated_amount + #[serde(with = "rust_decimal::serde::str")] + pub weightage: Decimal, // aggregated_weightage + pub oracles: OraclePriceActiveNextOracles, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceActiveNextOracles { + #[serde(with = "rust_decimal::serde::str")] + pub active: Decimal, // aggregated_count + pub total: i32, +} diff --git a/lib/ain-ocean/src/model/oracle_price_aggregated.rs b/lib/ain-ocean/src/model/oracle_price_aggregated.rs new file mode 100644 index 00000000000..f56b26f986a --- /dev/null +++ b/lib/ain-ocean/src/model/oracle_price_aggregated.rs @@ -0,0 +1,13 @@ +use serde::{Deserialize, Serialize}; + +use super::{BlockContext, OraclePriceActiveNext}; + +pub type OraclePriceAggregatedId = (String, String, u32); //token-currency-height +pub type OraclePriceAggregatedKey = (String, String); //token-currency + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceAggregated { + pub aggregated: OraclePriceActiveNext, + pub block: BlockContext, +} diff --git a/lib/ain-ocean/src/model/oracle_price_aggregated_interval.rs b/lib/ain-ocean/src/model/oracle_price_aggregated_interval.rs new file mode 100644 index 00000000000..215c3876f73 --- /dev/null +++ b/lib/ain-ocean/src/model/oracle_price_aggregated_interval.rs @@ -0,0 +1,44 @@ +use ain_dftx::{Currency, Token}; +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; +pub type OraclePriceAggregatedIntervalId = (Token, Currency, OracleIntervalSeconds, u32); //token-currency-interval-height +pub type OraclePriceAggregatedIntervalKey = (Token, Currency, OracleIntervalSeconds); //token-currency-interval + +pub const FIFTEEN_MINUTES: isize = 15 * 60; +pub const ONE_HOUR: isize = 60 * 60; +pub const ONE_DAY: isize = 24 * 60 * 60; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum OracleIntervalSeconds { + FifteenMinutes = FIFTEEN_MINUTES, + OneHour = ONE_HOUR, + OneDay = ONE_DAY, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceAggregatedInterval { + pub aggregated: OraclePriceAggregatedIntervalAggregated, + pub block: BlockContext, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceAggregatedIntervalAggregated { + #[serde(with = "rust_decimal::serde::str")] + pub amount: Decimal, + #[serde(with = "rust_decimal::serde::str")] + pub weightage: Decimal, + pub count: i32, + pub oracles: OraclePriceAggregatedIntervalAggregatedOracles, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceAggregatedIntervalAggregatedOracles { + #[serde(with = "rust_decimal::serde::str")] + pub active: Decimal, + pub total: i32, +} diff --git a/lib/ain-ocean/src/model/oracle_price_feed.rs b/lib/ain-ocean/src/model/oracle_price_feed.rs new file mode 100644 index 00000000000..828e6bfa1b3 --- /dev/null +++ b/lib/ain-ocean/src/model/oracle_price_feed.rs @@ -0,0 +1,15 @@ +use bitcoin::Txid; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; +pub type OraclePriceFeedId = (String, String, Txid, Txid); // token-currency-oracle_id-txid +pub type OraclePriceFeedKey = (String, String, Txid); // token-currency-oracle_id + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct OraclePriceFeed { + pub txid: Txid, + pub time: i32, + pub amount: i64, + pub block: BlockContext, +} diff --git a/lib/ain-ocean/src/model/oracle_token_currency.rs b/lib/ain-ocean/src/model/oracle_token_currency.rs new file mode 100644 index 00000000000..52024cf0304 --- /dev/null +++ b/lib/ain-ocean/src/model/oracle_token_currency.rs @@ -0,0 +1,13 @@ +use ain_dftx::Weightage; +use bitcoin::Txid; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; +pub type OracleTokenCurrencyId = (String, String, Txid); //token-currency-oracleId + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct OracleTokenCurrency { + pub weightage: Weightage, + pub block: BlockContext, +} diff --git a/lib/ain-ocean/src/model/poolswap.rs b/lib/ain-ocean/src/model/poolswap.rs new file mode 100644 index 00000000000..051deb7da71 --- /dev/null +++ b/lib/ain-ocean/src/model/poolswap.rs @@ -0,0 +1,21 @@ +use bitcoin::{ScriptBuf, Txid}; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +pub type PoolSwapKey = (u32, u32, usize); // (pool_id, height, txno) + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PoolSwap { + pub txid: Txid, + pub txno: usize, + pub pool_id: u32, + pub from_amount: i64, + pub from_token_id: u64, + pub to_amount: i64, + pub to_token_id: u64, + pub from: ScriptBuf, + pub to: ScriptBuf, + pub block: BlockContext, +} diff --git a/lib/ain-ocean/src/model/poolswap_aggregated.rs b/lib/ain-ocean/src/model/poolswap_aggregated.rs new file mode 100644 index 00000000000..f1e14995c42 --- /dev/null +++ b/lib/ain-ocean/src/model/poolswap_aggregated.rs @@ -0,0 +1,23 @@ +use std::collections::HashMap; + +use bitcoin::BlockHash; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +pub type PoolSwapAggregatedId = (u32, u32, BlockHash); // (pool_id, interval, block_hash) +pub type PoolSwapAggregatedKey = (u32, u32, i64); // (pool_id, interval, bucket) bucket is for next page query + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct PoolSwapAggregated { + pub bucket: i64, + pub aggregated: PoolSwapAggregatedAggregated, + pub block: BlockContext, +} + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct PoolSwapAggregatedAggregated { + pub amounts: HashMap, // amounts[tokenId] = BigNumber(volume) +} diff --git a/lib/ain-ocean/src/model/price_ticker.rs b/lib/ain-ocean/src/model/price_ticker.rs new file mode 100644 index 00000000000..54457622fdf --- /dev/null +++ b/lib/ain-ocean/src/model/price_ticker.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +use super::oracle_price_aggregated::OraclePriceAggregated; + +pub type PriceTickerId = (String, String); //token-currency +pub type PriceTickerKey = (i32, u32, String, String); // total-height-token-currency + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PriceTicker { + pub price: OraclePriceAggregated, +} diff --git a/lib/ain-ocean/src/model/raw_block.rs b/lib/ain-ocean/src/model/raw_block.rs new file mode 100644 index 00000000000..6219bae7c2b --- /dev/null +++ b/lib/ain-ocean/src/model/raw_block.rs @@ -0,0 +1,8 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, Default)] +#[serde(rename_all = "camelCase")] +pub struct RawBlock { + pub id: String, + pub json: String, +} diff --git a/lib/ain-ocean/src/model/raw_tx.rs b/lib/ain-ocean/src/model/raw_tx.rs new file mode 100644 index 00000000000..5d4f2e9e7f4 --- /dev/null +++ b/lib/ain-ocean/src/model/raw_tx.rs @@ -0,0 +1,42 @@ +use bitcoin::{Amount, Txid}; +use defichain_rpc::json::{GetRawTransactionResultVin, GetRawTransactionResultVout}; +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; +#[derive(Serialize, Deserialize, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct RawTxDto { + pub hex: String, + pub max_fee_rate: Option, +} + +pub fn default_max_fee_rate() -> Amount { + Amount::from_btc(0.1).unwrap_or_default() +} + +#[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct RawTransactionResult { + pub in_active_chain: Option, + pub hex: Vec, + pub txid: bitcoin::Txid, + pub hash: bitcoin::Wtxid, + pub size: usize, + pub vsize: usize, + pub version: u32, + pub locktime: u32, + pub vin: Vec, + pub vout: Vec, + pub blockhash: Option, + pub confirmations: Option, + pub time: Option, + pub blocktime: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct MempoolAcceptResult { + pub txid: Txid, + pub allowed: bool, + pub reject_reason: Option, + pub vsize: Option, + pub fees: Option, +} diff --git a/lib/ain-ocean/src/model/script_activity.rs b/lib/ain-ocean/src/model/script_activity.rs new file mode 100644 index 00000000000..daa40cf8f20 --- /dev/null +++ b/lib/ain-ocean/src/model/script_activity.rs @@ -0,0 +1,69 @@ +use std::fmt; + +use bitcoin::Txid; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize, Clone)] +pub enum ScriptActivityType { + Vin, + Vout, +} + +impl fmt::Display for ScriptActivityType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Vin => write!(f, "vin"), + Self::Vout => write!(f, "vout"), + } + } +} + +#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] +pub enum ScriptActivityTypeHex { + Vin, + Vout, +} + +impl fmt::Display for ScriptActivityTypeHex { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Vin => write!(f, "00"), + Self::Vout => write!(f, "01"), + } + } +} + +pub type ScriptActivityId = ([u8; 32], [u8; 4], ScriptActivityTypeHex, Txid, usize); // (hid, block.height, type_hex, txid, index) + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptActivity { + pub hid: [u8; 32], // hashed id, for length compatibility reasons this is the hashed id of script + pub r#type: ScriptActivityType, + pub type_hex: ScriptActivityTypeHex, + pub txid: Txid, + pub block: BlockContext, + pub script: ScriptActivityScript, + pub vin: Option, + pub vout: Option, + pub value: f64, + pub token_id: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptActivityScript { + pub r#type: String, + pub hex: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptActivityVin { + pub txid: Txid, + pub n: usize, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptActivityVout { + pub txid: Txid, + pub n: usize, +} diff --git a/lib/ain-ocean/src/model/script_aggregation.rs b/lib/ain-ocean/src/model/script_aggregation.rs new file mode 100644 index 00000000000..7623c1b47b8 --- /dev/null +++ b/lib/ain-ocean/src/model/script_aggregation.rs @@ -0,0 +1,34 @@ +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +pub type ScriptAggregationId = ([u8; 32], u32); // (hid, block.height) + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ScriptAggregation { + pub hid: [u8; 32], + pub block: BlockContext, + pub script: ScriptAggregationScript, + pub statistic: ScriptAggregationStatistic, + pub amount: ScriptAggregationAmount, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ScriptAggregationScript { + pub r#type: String, + pub hex: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Clone, Default)] +pub struct ScriptAggregationStatistic { + pub tx_count: i32, + pub tx_in_count: i32, + pub tx_out_count: i32, +} + +#[derive(Debug, Serialize, Deserialize, Clone, Default)] +pub struct ScriptAggregationAmount { + pub tx_in: f64, + pub tx_out: f64, + pub unspent: f64, +} diff --git a/lib/ain-ocean/src/model/script_unspent.rs b/lib/ain-ocean/src/model/script_unspent.rs new file mode 100644 index 00000000000..3546b438957 --- /dev/null +++ b/lib/ain-ocean/src/model/script_unspent.rs @@ -0,0 +1,30 @@ +use bitcoin::Txid; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +pub type ScriptUnspentId = ([u8; 32], [u8; 4], Txid, usize); // hid + block.height + txid + vout_index +pub type ScriptUnspentKey = (u32, Txid, usize); // block.height + txid + vout_index + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptUnspent { + pub id: (Txid, [u8; 8]), + pub hid: [u8; 32], + pub block: BlockContext, + pub script: ScriptUnspentScript, + pub vout: ScriptUnspentVout, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptUnspentScript { + pub r#type: String, + pub hex: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScriptUnspentVout { + pub txid: Txid, + pub n: usize, + pub value: f64, + pub token_id: Option, +} diff --git a/lib/ain-ocean/src/model/transaction.rs b/lib/ain-ocean/src/model/transaction.rs new file mode 100644 index 00000000000..7939a18a495 --- /dev/null +++ b/lib/ain-ocean/src/model/transaction.rs @@ -0,0 +1,26 @@ +use bitcoin::{BlockHash, Txid}; +use rust_decimal::Decimal; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +pub type TransactionByBlockHashKey = (BlockHash, usize); + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct Transaction { + pub id: Txid, + pub txid: Txid, + pub order: usize, // tx order + pub block: BlockContext, + pub hash: String, + pub version: u32, + pub size: u64, + pub v_size: u64, + pub weight: u64, + #[serde(with = "rust_decimal::serde::str")] + pub total_vout_value: Decimal, + pub lock_time: u64, + pub vin_count: usize, + pub vout_count: usize, +} diff --git a/lib/ain-ocean/src/model/transaction_vin.rs b/lib/ain-ocean/src/model/transaction_vin.rs new file mode 100644 index 00000000000..5d3817d6636 --- /dev/null +++ b/lib/ain-ocean/src/model/transaction_vin.rs @@ -0,0 +1,64 @@ +use bitcoin::Txid; +use defichain_rpc::json::blockchain::Vin; +use serde::{Deserialize, Serialize}; + +use super::TransactionVout; + +#[derive(Debug, Serialize, Deserialize)] +pub enum TransactionVinType { + Coinbase(String), + Standard((Txid, usize)), +} +#[derive(Debug, Serialize, Deserialize)] +pub struct TransactionVin { + // pub id: String, + pub txid: Txid, + pub r#type: TransactionVinType, + // pub coinbase: Option, + pub vout: Option, + pub script: Option, + pub tx_in_witness: Option>, + pub sequence: i64, +} + +impl TransactionVin { + pub fn from_vin_and_txid(vin: Vin, txid: Txid, vouts: &[TransactionVout]) -> Self { + match vin { + Vin::Coinbase(v) => Self { + r#type: TransactionVinType::Coinbase(v.coinbase), + txid, + sequence: v.sequence, + vout: None, + script: None, + tx_in_witness: None, + }, + Vin::Standard(v) => { + let vout = vouts.get(v.vout).map(|vout| TransactionVinVout { + txid: vout.txid, + value: vout.value, + n: vout.n, + token_id: vout.token_id, + script: vout.script.hex.clone(), + }); + Self { + // id: format!("{}{}{:x}", txid, v.txid, v.vout), + r#type: TransactionVinType::Standard((v.txid, v.vout)), + txid, + sequence: v.sequence, + vout, + script: v.script_sig.hex, + tx_in_witness: v.txinwitness, + } + } + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct TransactionVinVout { + pub txid: Txid, + pub n: usize, + pub value: f64, + pub token_id: Option, + pub script: Vec, +} diff --git a/lib/ain-ocean/src/model/transaction_vout.rs b/lib/ain-ocean/src/model/transaction_vout.rs new file mode 100644 index 00000000000..bef0c031bf0 --- /dev/null +++ b/lib/ain-ocean/src/model/transaction_vout.rs @@ -0,0 +1,21 @@ +use bitcoin::Txid; +use serde::{Deserialize, Serialize}; + +pub type TransactionVoutKey = (Txid, usize); + +#[derive(Debug, Serialize, Deserialize)] +pub struct TransactionVout { + // pub id: String, + pub vout: usize, + pub txid: Txid, + pub n: usize, + pub value: f64, + pub token_id: Option, + pub script: TransactionVoutScript, +} + +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct TransactionVoutScript { + pub hex: Vec, + pub r#type: String, +} diff --git a/lib/ain-ocean/src/model/tx_result.rs b/lib/ain-ocean/src/model/tx_result.rs new file mode 100644 index 00000000000..c84a5b7dacd --- /dev/null +++ b/lib/ain-ocean/src/model/tx_result.rs @@ -0,0 +1,28 @@ +use ain_dftx::custom_tx::CustomTxType; +use serde::{Deserialize, Serialize}; + +#[repr(C)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy)] +pub struct PoolSwapResult { + pub to_amount: i64, + pub pool_id: u32, +} + +#[derive(Serialize, Deserialize, Debug)] +pub enum TxResult { + PoolSwap(PoolSwapResult), + None, +} + +impl From<(u8, usize)> for TxResult { + fn from((tx_type, result_ptr): (u8, usize)) -> Self { + let dftx = CustomTxType::from(tx_type); + + match dftx { + CustomTxType::PoolSwap | CustomTxType::PoolSwapV2 => { + Self::PoolSwap(unsafe { *(result_ptr as *const PoolSwapResult) }) + } + _ => Self::None, + } + } +} diff --git a/lib/ain-ocean/src/model/vault_auction_batch_history.rs b/lib/ain-ocean/src/model/vault_auction_batch_history.rs new file mode 100644 index 00000000000..0ba11786ce8 --- /dev/null +++ b/lib/ain-ocean/src/model/vault_auction_batch_history.rs @@ -0,0 +1,21 @@ +use bitcoin::{ScriptBuf, Txid}; +use serde::{Deserialize, Serialize}; + +use super::BlockContext; + +pub type AuctionHistoryKey = (Txid, u32, Txid); // (vault_id, auction_batch_index, txid) +pub type AuctionHistoryByHeightKey = (Txid, u32, u32, usize); // (vault_id, auction_batch_index, block_height, txid) + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct VaultAuctionBatchHistory { + pub id: String, + pub key: String, + pub sort: String, + pub vault_id: Txid, + pub index: usize, + pub from: ScriptBuf, + pub amount: i64, + pub token_id: u64, + pub block: BlockContext, +} diff --git a/lib/ain-ocean/src/network.rs b/lib/ain-ocean/src/network.rs new file mode 100644 index 00000000000..c0488aeff3b --- /dev/null +++ b/lib/ain-ocean/src/network.rs @@ -0,0 +1,236 @@ +use std::fmt; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum Network { + Mainnet, + Mocknet, + Testnet, + Regtest, + Devnet, + Changi, +} + +impl Network { + #[must_use] + pub fn as_str(&self) -> &'static str { + match self { + Self::Mainnet => "mainnet", + Self::Mocknet => "mocknet", + Self::Testnet => "testnet", + Self::Regtest => "regtest", + Self::Devnet => "devnet", + Self::Changi => "changi", + } + } +} + +#[allow(clippy::from_over_into)] +impl Into for Network { + fn into(self) -> bitcoin::Network { + match self { + Self::Mainnet => bitcoin::Network::Mainnet, + Self::Testnet => bitcoin::Network::Testnet, + Self::Devnet => bitcoin::Network::Devnet, + Self::Regtest => bitcoin::Network::Regtest, + _ => bitcoin::Network::Regtest, + } + } +} + +impl std::str::FromStr for Network { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s { + "mainnet" | "main" => Ok(Self::Mainnet), + "mocknet" => Ok(Self::Mocknet), + "testnet" | "test" => Ok(Self::Testnet), + "regtest" => Ok(Self::Regtest), + "devnet" => Ok(Self::Devnet), + "changi" => Ok(Self::Changi), + _ => Err("invalid network"), + } + } +} + +impl fmt::Display for Network { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Mainnet => write!(f, "mainnet"), + Self::Mocknet => write!(f, "mocknet"), + Self::Testnet => write!(f, "testnet"), + Self::Regtest => write!(f, "regtest"), + Self::Devnet => write!(f, "devnet"), + Self::Changi => write!(f, "changi"), + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct NetworkParams { + pub activation_delay: u32, + pub new_activation_delay: u32, + pub resign_delay: u32, + pub new_resign_delay: u32, +} + +impl Network { + #[must_use] + pub fn params(&self) -> NetworkParams { + match self { + Self::Mainnet | Self::Testnet | Self::Devnet | Self::Changi | Self::Mocknet => { + NetworkParams { + activation_delay: 10, + new_activation_delay: 1008, + resign_delay: 60, + new_resign_delay: 2016, + } + } + Self::Regtest => NetworkParams { + activation_delay: 10, + new_activation_delay: 20, + resign_delay: 10, + new_resign_delay: 40, + }, + } + } +} + +#[derive(Debug, Clone, Copy)] +pub struct ForkHeight { + pub df1_amk_height: u32, + pub df2_bayfront_height: u32, + pub df3_bayfront_marina_height: u32, + pub df4_bayfront_gardens_height: u32, + pub df5_clarke_quay_height: u32, + pub df6_dakota_height: u32, + pub df7_dakota_crescent_height: u32, + pub df8_eunos_height: u32, + pub df9_eunos_kampung_height: u32, + pub df10_eunos_paya_height: u32, + pub df11_fort_canning_height: u32, + pub df12_fort_canning_museum_height: u32, + pub df13_fort_canning_park_height: u32, + pub df14_fort_canning_hill_height: u32, + pub df15_fort_canning_road_height: u32, + pub df16_fort_canning_crunch_height: u32, + pub df17_fort_canning_spring_height: u32, + pub df18_fort_canning_great_world_height: u32, + pub df19_fort_canning_epilogue_height: u32, + pub df20_grand_central_height: u32, + pub df21_grand_central_epilogue_height: u32, + pub df22_metachain_height: u32, + pub df23_height: u32, + pub df24_height: u32, +} + +impl Network { + #[must_use] + pub fn fork_heights(&self) -> ForkHeight { + match self { + Self::Mainnet | Self::Mocknet => ForkHeight { + df1_amk_height: 356_500, // Oct 12th, 2020., + df2_bayfront_height: 405_000, // Nov 2nd, 2020., + df3_bayfront_marina_height: 465_150, // Nov 28th, 2020., + df4_bayfront_gardens_height: 488_300, // Dec 8th, 2020., + df5_clarke_quay_height: 595_738, // Jan 24th, 2021., + df6_dakota_height: 678_000, // Mar 1st, 2021., + df7_dakota_crescent_height: 733_000, // Mar 25th, 2021., + df8_eunos_height: 894_000, // Jun 3rd, 2021., + df9_eunos_kampung_height: 895_743, // Jun 4th, 2021., + df10_eunos_paya_height: 1_072_000, // Aug 5th, 2021., + df11_fort_canning_height: 1_367_000, // Nov 15th, 2021., + df12_fort_canning_museum_height: 1_430_640, // Dec 7th, 2021., + df13_fort_canning_park_height: 1_503_143, // Jan 2nd, 2022., + df14_fort_canning_hill_height: 1_604_999, // Feb 7th, 2022., + df15_fort_canning_road_height: 1_786_000, // April 11th, 2022., + df16_fort_canning_crunch_height: 1_936_000, // June 2nd, 2022., + df17_fort_canning_spring_height: 2_033_000, // July 6th, 2022., + df18_fort_canning_great_world_height: 2_212_000, // Sep 7th, 2022., + df19_fort_canning_epilogue_height: 2_257_500, // Sep 22nd, 2022., + df20_grand_central_height: 2_479_000, // Dec 8th, 2022., + df21_grand_central_epilogue_height: 2_574_000, // Jan 10th, 2023., + df22_metachain_height: 3_462_000, // Nov 15th, 2023., + df23_height: u32::MAX, + df24_height: u32::MAX, + }, + Self::Testnet => ForkHeight { + df1_amk_height: 150, + df2_bayfront_height: 3000, + df3_bayfront_marina_height: 90470, + df4_bayfront_gardens_height: 101_342, + df5_clarke_quay_height: 155_000, + df6_dakota_height: 220_680, + df7_dakota_crescent_height: 287_700, + df8_eunos_height: 354_950, + df9_eunos_kampung_height: 354_950, + df10_eunos_paya_height: 463_300, + df11_fort_canning_height: 686_200, + df12_fort_canning_museum_height: 724_000, + df13_fort_canning_park_height: 828_800, + df14_fort_canning_hill_height: 828_900, + df15_fort_canning_road_height: 893_700, + df16_fort_canning_crunch_height: 1_011_600, + df17_fort_canning_spring_height: 1_086_000, + df18_fort_canning_great_world_height: 1_150_000, + df19_fort_canning_epilogue_height: 1_150_010, + df20_grand_central_height: 1_150_020, + df21_grand_central_epilogue_height: 1_150_030, + df22_metachain_height: 1_150_040, + df23_height: 1_507_200, + df24_height: u32::MAX, + }, + Self::Regtest => ForkHeight { + df1_amk_height: 10_000_000, + df2_bayfront_height: 10_000_000, + df3_bayfront_marina_height: 10_000_000, + df4_bayfront_gardens_height: 10_000_000, + df5_clarke_quay_height: 10_000_000, + df6_dakota_height: 10_000_000, + df7_dakota_crescent_height: 10_000_000, + df8_eunos_height: 10_000_000, + df9_eunos_kampung_height: 10_000_000, + df10_eunos_paya_height: 10_000_000, + df11_fort_canning_height: 10_000_000, + df12_fort_canning_museum_height: 10_000_000, + df13_fort_canning_park_height: 10_000_000, + df14_fort_canning_hill_height: 10_000_000, + df15_fort_canning_road_height: 10_000_000, + df16_fort_canning_crunch_height: 10_000_000, + df17_fort_canning_spring_height: 10_000_000, + df18_fort_canning_great_world_height: 10_000_000, + df19_fort_canning_epilogue_height: 10_000_000, + df20_grand_central_height: 10_000_000, + df21_grand_central_epilogue_height: 10_000_000, + df22_metachain_height: 10_000_000, + df23_height: 10_000_000, + df24_height: 10_000_000, + }, + Self::Devnet | Self::Changi => ForkHeight { + df1_amk_height: 150, + df2_bayfront_height: 3000, + df3_bayfront_marina_height: 90470, + df4_bayfront_gardens_height: 101_342, + df5_clarke_quay_height: 155_000, + df6_dakota_height: 220_680, + df7_dakota_crescent_height: 287_700, + df8_eunos_height: 354_950, + df9_eunos_kampung_height: 354_950, + df10_eunos_paya_height: 463_300, + df11_fort_canning_height: 686_200, + df12_fort_canning_museum_height: 724_000, + df13_fort_canning_park_height: 828_800, + df14_fort_canning_hill_height: 828_900, + df15_fort_canning_road_height: 893_700, + df16_fort_canning_crunch_height: 1_011_600, + df17_fort_canning_spring_height: 1_086_000, + df18_fort_canning_great_world_height: 1_223_000, + df19_fort_canning_epilogue_height: 1_244_000, + df20_grand_central_height: 1_366_000, + df21_grand_central_epilogue_height: 1_438_200, + df22_metachain_height: 1_586_750, + df23_height: 1_985_600, + df24_height: u32::MAX, + }, + } + } +} diff --git a/lib/ain-ocean/src/storage/macros.rs b/lib/ain-ocean/src/storage/macros.rs new file mode 100644 index 00000000000..ca95ab1d293 --- /dev/null +++ b/lib/ain-ocean/src/storage/macros.rs @@ -0,0 +1,110 @@ +#[macro_export] +macro_rules! define_table { + ( + $(#[$meta:meta])* + $vis:vis struct $name:ident { + key_type = $key_type:ty, + value_type = $value_type:ty, + $(custom_key = { $($custom_key:tt)* },)? + $(custom_value = { $($custom_value:tt)* },)? + } + $(, SecondaryIndex = $primary_column:ty)? + $(, InitialKeyProvider = |$pk:ident: $pk_type:ty| $initial_key_expr:expr)? + ) => { + // Repository definition + $(#[$meta])* + $vis struct $name { + #[allow(unused)] + pub store: Arc, + col: LedgerColumn<$name>, + } + + impl ColumnName for $name { + const NAME: &'static str = stringify!($name); + } + + impl Column for $name { + type Index = $key_type; + + $( + $($custom_key)* + )? + } + + impl TypedColumn for $name { + type Type = $value_type; + + $( + $($custom_value)* + )? + } + + + impl $name { + pub fn new(store: Arc) -> Self { + Self { + col: store.column(), + store, + } + } + + $( + #[allow(unused)] + pub fn get_highest(&self) -> Result::Type>> { + match self.col.iter(None, SortOrder::Descending.into())?.next() { + None => Ok(None), + Some(Ok((_, id))) => { + let col = self.store.column::<$primary_column>(); + Ok(col.get(&id)?) + } + Some(Err(e)) => Err(e.into()), + } + } + )? + } + + impl RepositoryOps<$key_type, $value_type> for $name { + type ListItem = std::result::Result<($key_type, $value_type), ain_db::DBError>; + + fn get(&self, id: &$key_type) -> Result> { + Ok(self.col.get(id)?) + } + + fn put(&self, id: &$key_type, item: &$value_type) -> Result<()> { + Ok(self.col.put(id, item)?) + } + + fn delete(&self, id: &$key_type) -> Result<()> { + Ok(self.col.delete(id)?) + } + + fn list<'a>(&'a self, from: Option<$key_type>, dir: $crate::storage::SortOrder) -> Result + 'a>> { + let it = self.col.iter(from, dir.into())?; + Ok(Box::new(it)) + } + } + + $( + impl SecondaryIndex<$key_type, $value_type> for $name { + type Value = <$primary_column as TypedColumn>::Type; + + fn retrieve_primary_value(&self, el: Self::ListItem) -> Result { + let (_, id) = el?; + let col = self.store.column::<$primary_column>(); + let value = col.get(&id)?.ok_or(Error::SecondaryIndex)?; + Ok(value) + } + } + )? + + $( + impl InitialKeyProvider<$key_type, $value_type> for $name { + type PartialKey = $pk_type; + + fn initial_key($pk: Self::PartialKey) -> $key_type { + $initial_key_expr + } + } + )? + }; +} diff --git a/lib/ain-ocean/src/storage/mod.rs b/lib/ain-ocean/src/storage/mod.rs new file mode 100644 index 00000000000..6ecd284e6e3 --- /dev/null +++ b/lib/ain-ocean/src/storage/mod.rs @@ -0,0 +1,489 @@ +#[macro_use] +mod macros; + +mod ocean_store; + +use std::sync::Arc; + +use ain_db::{Column, ColumnName, DBError, LedgerColumn, Result as DBResult, TypedColumn}; +use bitcoin::{hashes::Hash, BlockHash, Txid}; +pub use ocean_store::OceanStore; +use rocksdb::Direction; + +use crate::{define_table, model, Error, Result}; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum SortOrder { + Ascending, + Descending, +} + +impl From for Direction { + fn from(sort_order: SortOrder) -> Self { + match sort_order { + SortOrder::Ascending => Self::Forward, + SortOrder::Descending => Self::Reverse, + } + } +} + +pub trait RepositoryOps { + type ListItem; + fn get(&self, key: &K) -> Result>; + fn put(&self, key: &K, value: &V) -> Result<()>; + fn delete(&self, key: &K) -> Result<()>; + fn list<'a>( + &'a self, + from: Option, + direction: SortOrder, + ) -> Result + 'a>>; +} + +pub trait InitialKeyProvider: RepositoryOps { + type PartialKey; + fn initial_key(pk: Self::PartialKey) -> K; +} + +pub trait SecondaryIndex: RepositoryOps { + type Value; + fn retrieve_primary_value(&self, el: Self::ListItem) -> Result; +} + +define_table! { + #[derive(Debug)] + pub struct Block { + key_type = BlockHash, + value_type = model::Block, + } +} + +define_table! { + #[derive(Debug)] + pub struct BlockByHeight { + key_type = u32, + value_type = BlockHash, + custom_key = { + fn key(index: &Self::Index) -> DBResult> { + Ok(index.to_be_bytes().to_vec()) + } + + fn get_key(raw_key: Box<[u8]>) -> DBResult { + if raw_key.len() != 4 { + return Err(DBError::WrongKeyLength); + } + let mut array = [0u8; 4]; + array.copy_from_slice(&raw_key); + Ok(u32::from_be_bytes(array)) + } + }, + }, + SecondaryIndex = Block +} + +define_table! { + #[derive(Debug)] + pub struct Masternode { + key_type = Txid, + value_type = model::Masternode, + } +} + +define_table! { + #[derive(Debug)] + pub struct MasternodeByHeight { + key_type = (u32, Txid), + value_type = u8, + } +} + +impl SecondaryIndex<(u32, Txid), u8> for MasternodeByHeight { + type Value = model::Masternode; + + fn retrieve_primary_value(&self, el: Self::ListItem) -> Result { + let ((_, id), _) = el?; + let col = self.store.column::(); + let tx = col.get(&id)?.ok_or(Error::SecondaryIndex)?; + Ok(tx) + } +} + +define_table! { + #[derive(Debug)] + pub struct MasternodeStats { + key_type = u32, + value_type = model::MasternodeStats, + custom_key = { + fn key(index: &Self::Index) -> DBResult> { + Ok(index.to_be_bytes().to_vec()) + } + + fn get_key(raw_key: Box<[u8]>) -> DBResult { + if raw_key.len() != 4 { + return Err(DBError::WrongKeyLength); + } + let mut array = [0u8; 4]; + array.copy_from_slice(&raw_key); + Ok(u32::from_be_bytes(array)) + } + }, + } +} + +impl MasternodeStats { + pub fn get_latest(&self) -> Result> { + match self.col.iter(None, SortOrder::Descending.into())?.next() { + None => Ok(None), + Some(Ok((_, id))) => Ok(Some(id)), + Some(Err(e)) => Err(e.into()), + } + } +} + +define_table! { + #[derive(Debug)] + pub struct Oracle { + key_type = Txid, + value_type = model::Oracle, + } +} + +define_table! { + #[derive(Debug)] + pub struct OracleHistory { + key_type = model::OracleHistoryId, + value_type = model::Oracle, + } +} + +define_table! { + #[derive(Debug)] + pub struct OraclePriceActive { + key_type = model::OraclePriceActiveId, + value_type = model::OraclePriceActive, + } +} + +define_table! { + #[derive(Debug)] + pub struct OraclePriceActiveKey { + key_type = model::OraclePriceActiveKey, + value_type = model::OraclePriceActiveId, + }, + SecondaryIndex = OraclePriceActive +} + +define_table! { + #[derive(Debug)] + pub struct OraclePriceAggregated { + key_type = model::OraclePriceAggregatedId, + value_type = model::OraclePriceAggregated, + } +} + +define_table! { + #[derive(Debug)] + pub struct OraclePriceAggregatedKey { + key_type = model::OraclePriceAggregatedKey, + value_type = model::OraclePriceAggregatedId, + }, + SecondaryIndex = OraclePriceAggregated +} + +define_table! { + #[derive(Debug)] + pub struct OraclePriceAggregatedInterval { + key_type = model::OraclePriceAggregatedIntervalId, + value_type = model::OraclePriceAggregatedInterval, + } +} + +define_table! { + #[derive(Debug)] + pub struct OraclePriceAggregatedIntervalKey { + key_type = model::OraclePriceAggregatedIntervalKey, + value_type = model::OraclePriceAggregatedIntervalId, + }, + SecondaryIndex = OraclePriceAggregatedInterval +} + +define_table! { + #[derive(Debug)] + pub struct OraclePriceFeed { + key_type = model::OraclePriceFeedId, + value_type = model::OraclePriceFeed, + } +} + +define_table! { + #[derive(Debug)] + pub struct OraclePriceFeedKey { + key_type = model::OraclePriceFeedKey, + value_type = model::OraclePriceFeedId, + }, + SecondaryIndex = OraclePriceFeed +} + +define_table! { + #[derive(Debug)] + pub struct OracleTokenCurrency { + key_type = model::OracleTokenCurrencyId, + value_type = model::OracleTokenCurrency, + } +} + +define_table! { + #[derive(Debug)] + pub struct PoolSwap { + key_type = model::PoolSwapKey, + value_type = model::PoolSwap, + }, + InitialKeyProvider = |pk: u32| (pk, u32::MAX, usize::MAX) +} + +define_table! { + #[derive(Debug)] + pub struct PoolSwapAggregated { + key_type = model::PoolSwapAggregatedId, + value_type = model::PoolSwapAggregated, + } +} + +define_table! { + #[derive(Debug)] + pub struct PoolSwapAggregatedKey { + key_type = model::PoolSwapAggregatedKey, + value_type = model::PoolSwapAggregatedId, + custom_key = { + fn key(index: &Self::Index) -> DBResult> { + let (pool_id, interval, bucket) = index; + let mut vec = Vec::with_capacity(16); + vec.extend_from_slice(&pool_id.to_be_bytes()); + vec.extend_from_slice(&interval.to_be_bytes()); + vec.extend_from_slice(&bucket.to_be_bytes()); + Ok(vec) + } + + fn get_key(raw_key: Box<[u8]>) -> DBResult { + if raw_key.len() != 16 { + return Err(DBError::WrongKeyLength); + } + let pool_id = u32::from_be_bytes( + raw_key[0..4] + .try_into() + .map_err(|_| DBError::WrongKeyLength)?, + ); + let interval = u32::from_be_bytes( + raw_key[4..8] + .try_into() + .map_err(|_| DBError::WrongKeyLength)?, + ); + let bucket = i64::from_be_bytes( + raw_key[8..16] + .try_into() + .map_err(|_| DBError::WrongKeyLength)?, + ); + + Ok((pool_id, interval, bucket)) + } + }, + }, + SecondaryIndex = PoolSwapAggregated +} + +define_table! { + #[derive(Debug)] + pub struct PriceTicker { + key_type = model::PriceTickerId, + value_type = model::PriceTicker, + } +} + +define_table! { + #[derive(Debug)] + pub struct PriceTickerKey { + key_type = model::PriceTickerKey, + value_type = model::PriceTickerId, + }, + SecondaryIndex = PriceTicker +} + +define_table! { + #[derive(Debug)] + pub struct RawBlock { + key_type = BlockHash, + value_type = String, + } +} + +define_table! { + #[derive(Debug)] + pub struct ScriptActivity { + key_type = model::ScriptActivityId, + value_type = model::ScriptActivity, + } +} + +define_table! { + #[derive(Debug)] + pub struct ScriptAggregation { + key_type = model::ScriptAggregationId, + value_type = model::ScriptAggregation, + } +} + +define_table! { + #[derive(Debug)] + pub struct ScriptUnspent { + key_type = model::ScriptUnspentId, + value_type = model::ScriptUnspent, + } +} + +define_table! { + #[derive(Debug)] + pub struct ScriptUnspentKey { + key_type = model::ScriptUnspentKey, + value_type = model::ScriptUnspentId, + }, + SecondaryIndex = ScriptUnspent +} + +define_table! { + #[derive(Debug)] + pub struct Transaction { + key_type = Txid, + value_type = model::Transaction, + } +} + +define_table! { + #[derive(Debug)] + pub struct TransactionByBlockHash { + key_type = model::TransactionByBlockHashKey, + value_type = Txid, + custom_key = { + fn key(index: &Self::Index) -> DBResult> { + let (hash, txno) = index; + let mut vec = hash.as_byte_array().to_vec(); + vec.extend_from_slice(&txno.to_be_bytes()); + Ok(vec) + } + + fn get_key(raw_key: Box<[u8]>) -> DBResult { + if raw_key.len() != 40 { + return Err(DBError::WrongKeyLength); + } + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&raw_key[..32]); + let mut txno_array = [0u8; 8]; + txno_array.copy_from_slice(&raw_key[32..]); + + let hash = BlockHash::from_byte_array(hash_array); + let txno = usize::from_be_bytes(txno_array); + Ok((hash, txno)) + } + }, + }, + SecondaryIndex = Transaction, + InitialKeyProvider = |pk: BlockHash| (pk, 0) +} + +define_table! { + #[derive(Debug)] + pub struct TransactionVin { + key_type = String, + value_type = model::TransactionVin, + }, + InitialKeyProvider = |pk: Txid| format!("{}00", pk) +} + +define_table! { + #[derive(Debug)] + pub struct TransactionVout { + key_type = model::TransactionVoutKey, + value_type = model::TransactionVout, + custom_key = { + fn key(index: &Self::Index) -> DBResult> { + let (txid, txno) = index; + let mut vec = txid.as_byte_array().to_vec(); + vec.extend_from_slice(&txno.to_be_bytes()); + Ok(vec) + } + + fn get_key(raw_key: Box<[u8]>) -> DBResult { + if raw_key.len() != 40 { + return Err(DBError::WrongKeyLength); + } + let mut hash_array = [0u8; 32]; + hash_array.copy_from_slice(&raw_key[..32]); + let mut txno_array = [0u8; 8]; + txno_array.copy_from_slice(&raw_key[32..]); + + let txid = Txid::from_byte_array(hash_array); + let txno = usize::from_be_bytes(txno_array); + Ok((txid, txno)) + } + }, + } +} + +define_table! { + #[derive(Debug)] + pub struct TxResult { + key_type = Txid, + value_type = model::TxResult, + } +} + +define_table! { + #[derive(Debug)] + pub struct VaultAuctionHistory { + key_type = model::AuctionHistoryKey, + value_type = model::VaultAuctionBatchHistory, + } +} + +define_table! { + #[derive(Debug)] + pub struct VaultAuctionHistoryByHeight { + key_type = model::AuctionHistoryByHeightKey, + value_type = model::AuctionHistoryKey, + }, + SecondaryIndex = VaultAuctionHistory +} + +pub const COLUMN_NAMES: [&str; 33] = [ + Block::NAME, + BlockByHeight::NAME, + MasternodeStats::NAME, + Masternode::NAME, + MasternodeByHeight::NAME, + Oracle::NAME, + OracleHistory::NAME, + OraclePriceActive::NAME, + OraclePriceActiveKey::NAME, + OraclePriceAggregated::NAME, + OraclePriceAggregatedKey::NAME, + OraclePriceAggregatedInterval::NAME, + OraclePriceAggregatedIntervalKey::NAME, + OraclePriceFeed::NAME, + OraclePriceFeedKey::NAME, + OracleTokenCurrency::NAME, + PoolSwapAggregated::NAME, + PoolSwapAggregatedKey::NAME, + PoolSwap::NAME, + PriceTicker::NAME, + PriceTickerKey::NAME, + RawBlock::NAME, + ScriptActivity::NAME, + ScriptAggregation::NAME, + ScriptUnspent::NAME, + ScriptUnspentKey::NAME, + Transaction::NAME, + TransactionByBlockHash::NAME, + TransactionVin::NAME, + TransactionVout::NAME, + TxResult::NAME, + VaultAuctionHistory::NAME, + VaultAuctionHistoryByHeight::NAME, +]; diff --git a/lib/ain-ocean/src/storage/ocean_store.rs b/lib/ain-ocean/src/storage/ocean_store.rs new file mode 100644 index 00000000000..ff2e9ad63c7 --- /dev/null +++ b/lib/ain-ocean/src/storage/ocean_store.rs @@ -0,0 +1,96 @@ +use ain_db::{Column, ColumnName, LedgerColumn, Rocks}; +use rocksdb::{BlockBasedOptions, Cache, Options, SliceTransform}; +use std::{fs, marker::PhantomData, path::Path, sync::Arc}; + +use super::{ScriptActivity, COLUMN_NAMES}; +use crate::Result; + +// ScriptActivity is the heaviest CF we use, sitting at 20% of disk and supporting listtransactions query. +// Optimized here for the typical usage of sequential read of 10 elements, averaging 300bytes per entry. +fn get_scriptactivity_cf_options() -> Options { + let mut options = Options::default(); + + // Optimize for small, targeted queries + options.set_optimize_filters_for_hits(true); + options.set_level_compaction_dynamic_level_bytes(true); + + let mut block_opts = BlockBasedOptions::default(); + block_opts.set_block_size(4 * 1024); // 4 KB ~(300 * 10) + block_opts.set_format_version(5); + block_opts.set_cache_index_and_filter_blocks(true); + block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + + // Bloom filters + block_opts.set_bloom_filter(10.0, false); + + // Block cache + let cache = Cache::new_lru_cache(256 * 1024 * 1024); // 256 MB + block_opts.set_block_cache(&cache); + + options.set_block_based_table_factory(&block_opts); + + // Compression + options.set_compression_type(rocksdb::DBCompressionType::Zstd); + options.set_bottommost_compression_type(rocksdb::DBCompressionType::Zstd); + + // Write buffer + options.set_write_buffer_size(32 * 1024 * 1024); // 32 MB + options.set_max_write_buffer_number(3); + options.set_min_write_buffer_number_to_merge(1); + + // Level options + options.set_max_bytes_for_level_base(256 * 1024 * 1024); // 256 MB + options.set_max_bytes_for_level_multiplier(10.0); + + // Target file size + options.set_target_file_size_base(32 * 1024 * 1024); // 32 MB + + options.set_prefix_extractor(SliceTransform::create_fixed_prefix(32)); // hid length + options.set_memtable_whole_key_filtering(true); + options.set_memtable_prefix_bloom_ratio(0.1); + + // Optimize for point lookups + options.optimize_for_point_lookup(4096); + + options +} + +#[derive(Debug, Clone)] +pub struct OceanStore(Arc); + +impl OceanStore { + pub fn new(path: &Path) -> Result { + let path = path.join("ocean"); + fs::create_dir_all(&path)?; + + let cf_with_opts = COLUMN_NAMES + .into_iter() + .map(|name| match name { + ScriptActivity::NAME => (name, Some(get_scriptactivity_cf_options())), + _ => (name, None), + }) + .collect::>(); + + let backend = Arc::new(Rocks::open(&path, cf_with_opts, None)?); + + Ok(Self(backend)) + } + + pub fn column(&self) -> LedgerColumn + where + C: Column + ColumnName, + { + LedgerColumn { + backend: Arc::clone(&self.0), + column: PhantomData, + } + } + + pub fn dump_table_sizes(&self) -> Result<()> { + Ok(self.0.dump_table_sizes(&COLUMN_NAMES)?) + } + + pub fn compact(&self) { + self.0.compact(); + } +} diff --git a/lib/ain-rs-exports/Cargo.toml b/lib/ain-rs-exports/Cargo.toml index 80184d53c25..4a6616e1df2 100644 --- a/lib/ain-rs-exports/Cargo.toml +++ b/lib/ain-rs-exports/Cargo.toml @@ -12,8 +12,9 @@ crate-type = ["staticlib"] [dependencies] ain-evm = { path = "../ain-evm" } ain-grpc = { path = "../ain-grpc" } +ain-ocean = { path = "../ain-ocean" } ain-contracts = { path = "../ain-contracts" } -ain-macros = { path = "../ain-macros" } +ain-macros.workspace = true ain-cpp-imports = { path = "../ain-cpp-imports" } ethereum.workspace = true @@ -24,6 +25,7 @@ hex.workspace = true serde_json.workspace = true anyhow.workspace = true ethabi.workspace = true +defichain-rpc.workspace = true # Build cxx.workspace = true diff --git a/lib/ain-rs-exports/src/core.rs b/lib/ain-rs-exports/src/core.rs index 02d1d34c22c..e5b78e61bea 100644 --- a/lib/ain-rs-exports/src/core.rs +++ b/lib/ain-rs-exports/src/core.rs @@ -1,7 +1,8 @@ -use crate::{ffi, prelude::*}; use ain_macros::ffi_fallible; use anyhow::Result; +use crate::{ffi, prelude::*}; + #[ffi_fallible] pub fn ain_rs_preinit() -> Result<()> { ain_grpc::preinit(); @@ -44,6 +45,12 @@ pub fn ain_rs_init_network_subscriptions_service(addr: String) -> Result<()> { Ok(()) } +#[ffi_fallible] +pub fn ain_rs_init_network_rest_ocean(addr: String) -> Result<()> { + ain_grpc::init_network_rest_ocean(addr)?; + Ok(()) +} + #[ffi_fallible] pub fn ain_rs_stop_network_services() -> Result<()> { ain_grpc::stop_network_services()?; diff --git a/lib/ain-rs-exports/src/lib.rs b/lib/ain-rs-exports/src/lib.rs index 6e9830d6303..abbcde05ad8 100644 --- a/lib/ain-rs-exports/src/lib.rs +++ b/lib/ain-rs-exports/src/lib.rs @@ -1,12 +1,13 @@ mod core; mod debug; mod evm; +mod ocean; mod prelude; mod util; use ain_evm::blocktemplate::BlockTemplate; -use crate::{core::*, debug::*, evm::*, util::*}; +use crate::{core::*, debug::*, evm::*, ocean::*, util::*}; pub struct BlockTemplateWrapper(Option); @@ -46,6 +47,7 @@ pub mod ffi { // Networking fn ain_rs_init_network_json_rpc_service(result: &mut CrossBoundaryResult, addr: String); fn ain_rs_init_network_grpc_service(result: &mut CrossBoundaryResult, addr: String); + fn ain_rs_init_network_rest_ocean(result: &mut CrossBoundaryResult, addr: String); fn ain_rs_init_network_subscriptions_service( result: &mut CrossBoundaryResult, addr: String, @@ -344,6 +346,15 @@ pub mod ffi { fn evm_try_flush_db(result: &mut CrossBoundaryResult); + fn ocean_index_block(result: &mut CrossBoundaryResult, block_str: String); + fn ocean_invalidate_block(result: &mut CrossBoundaryResult, block: String); + + fn ocean_try_set_tx_result( + result: &mut CrossBoundaryResult, + tx_type: u8, + tx_hash: [u8; 32], + result_ptr: usize, + ); fn evm_try_unsafe_rename_dst20( result: &mut CrossBoundaryResult, block_template: &mut BlockTemplateWrapper, diff --git a/lib/ain-rs-exports/src/ocean.rs b/lib/ain-rs-exports/src/ocean.rs new file mode 100644 index 00000000000..dd1e7de7909 --- /dev/null +++ b/lib/ain-rs-exports/src/ocean.rs @@ -0,0 +1,25 @@ +use ain_macros::ffi_fallible; +use ain_ocean::Result; +use defichain_rpc::json::blockchain::{Block, Transaction}; + +use crate::{ + ffi, + prelude::{cross_boundary_error_return, cross_boundary_success_return}, +}; + +#[ffi_fallible] +pub fn ocean_index_block(block_str: String) -> Result<()> { + let block: Block = serde_json::from_str(&block_str)?; + ain_ocean::index_block(&ain_ocean::SERVICES, block) +} + +#[ffi_fallible] +pub fn ocean_invalidate_block(block_str: String) -> Result<()> { + let block: Block = serde_json::from_str(&block_str)?; + ain_ocean::invalidate_block(&ain_ocean::SERVICES, block) +} + +#[ffi_fallible] +fn ocean_try_set_tx_result(tx_type: u8, tx_hash: [u8; 32], result_ptr: usize) -> Result<()> { + ain_ocean::tx_result::index(&ain_ocean::SERVICES, tx_type, tx_hash, result_ptr) +} diff --git a/lib/cli/src/command.rs b/lib/cli/src/command.rs index c3eff6ef4c3..bbca1e23270 100644 --- a/lib/cli/src/command.rs +++ b/lib/cli/src/command.rs @@ -1,5 +1,5 @@ use ain_grpc::rpc::MetachainRPCClient; -use jsonrpsee::http_client::HttpClient; +use defichain_rpc::{Client, RpcApi}; use crate::{result::RpcResult, MetachainCLI}; diff --git a/make.sh b/make.sh index 7bacc594e07..21c23f9de00 100755 --- a/make.sh +++ b/make.sh @@ -44,7 +44,7 @@ setup_vars() { PYTHON_VENV_DIR=${PYTHON_VENV_DIR:-"${BUILD_DIR}/pyenv"} CLANG_DEFAULT_VERSION=${CLANG_DEFAULT_VERSION:-"15"} - RUST_DEFAULT_VERSION=${RUST_DEFAULT_VERSION:-"1.72"} + RUST_DEFAULT_VERSION=${RUST_DEFAULT_VERSION:-"1.76"} MAKE_DEBUG=${MAKE_DEBUG:-"1"} MAKE_USE_CLANG=${MAKE_USE_CLANG:-"$(get_default_use_clang)"} @@ -415,7 +415,7 @@ check_py() { } check_rs() { - lib clippy 1 + # lib clippy 1 lib fmt-check 1 } diff --git a/src/Makefile.am b/src/Makefile.am index 9674cf50111..385585c02dc 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -159,6 +159,7 @@ DEFI_CORE_H = \ logging.h \ ffi/ffiexports.h \ ffi/ffihelpers.h \ + ffi/ffiocean.h \ dfi/accounts.h \ dfi/accountshistory.h \ dfi/anchors.h \ @@ -412,6 +413,7 @@ libdefi_server_a_SOURCES = \ init.cpp \ dbwrapper.cpp \ ffi/ffiexports.cpp \ + ffi/ffiocean.cpp \ dfi/accounts.cpp \ dfi/accountshistory.cpp \ dfi/anchors.cpp \ diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp index 852906f0104..ada178ba557 100644 --- a/src/bench/rpc_blockchain.cpp +++ b/src/bench/rpc_blockchain.cpp @@ -13,21 +13,21 @@ #include static void BlockToJsonVerbose(benchmark::State& state) { - CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION); - char a = '\0'; - stream.write(&a, 1); // Prevent compaction + // CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION); + // char a = '\0'; + // stream.write(&a, 1); // Prevent compaction - CBlock block; - stream >> block; + // CBlock block; + // stream >> block; - CBlockIndex blockindex; - const uint256 blockHash = block.GetHash(); - blockindex.phashBlock = &blockHash; - blockindex.nBits = 403014710; + // CBlockIndex blockindex; + // const uint256 blockHash = block.GetHash(); + // blockindex.phashBlock = &blockHash; + // blockindex.nBits = 403014710; - while (state.KeepRunning()) { - (void)blockToJSON(block, &blockindex, &blockindex, /*verbose*/ true); - } + // while (state.KeepRunning()) { + // (void)blockToJSON(block, &blockindex, &blockindex, /*verbose*/ true); + // } } BENCHMARK(BlockToJsonVerbose, 10); diff --git a/src/dfi/consensus/poolpairs.cpp b/src/dfi/consensus/poolpairs.cpp index e3fbe9de953..e91882bd198 100644 --- a/src/dfi/consensus/poolpairs.cpp +++ b/src/dfi/consensus/poolpairs.cpp @@ -7,6 +7,7 @@ #include #include #include +#include Res CPoolPairsConsensus::EraseEmptyBalances(TAmounts &balances) const { auto &mnview = blockCtx.GetView(); @@ -163,9 +164,11 @@ Res CPoolPairsConsensus::operator()(const CPoolSwapMessage &obj) const { const auto &consensus = txCtx.GetConsensus(); const auto height = txCtx.GetHeight(); + const auto &tx = txCtx.GetTransaction(); auto &mnview = blockCtx.GetView(); - return CPoolSwap(obj, height).ExecuteSwap(mnview, {}, consensus); + return CPoolSwap(obj, height, std::make_pair(CustomTxType::PoolSwap, tx.GetHash())) + .ExecuteSwap(mnview, {}, consensus); } Res CPoolPairsConsensus::operator()(const CPoolSwapMessageV2 &obj) const { @@ -176,9 +179,11 @@ Res CPoolPairsConsensus::operator()(const CPoolSwapMessageV2 &obj) const { const auto &consensus = txCtx.GetConsensus(); const auto height = txCtx.GetHeight(); + const auto &tx = txCtx.GetTransaction(); auto &mnview = blockCtx.GetView(); - return CPoolSwap(obj.swapInfo, height).ExecuteSwap(mnview, obj.poolIDs, consensus); + return CPoolSwap(obj.swapInfo, height, std::make_pair(CustomTxType::PoolSwapV2, tx.GetHash())) + .ExecuteSwap(mnview, obj.poolIDs, consensus); } Res CPoolPairsConsensus::operator()(const CLiquidityMessage &obj) const { diff --git a/src/dfi/mn_checks.cpp b/src/dfi/mn_checks.cpp index 5beb2821806..ac7ec0431ab 100644 --- a/src/dfi/mn_checks.cpp +++ b/src/dfi/mn_checks.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -996,6 +997,13 @@ Res CPoolSwap::ExecuteSwap(CCustomCSView &view, // Set amount to be swapped in pool CTokenAmount swapAmountResult{obj.idTokenFrom, obj.amountFrom}; + struct PoolSwapResult { + int64_t toAmount; + uint32_t poolId; + }; + + PoolSwapResult finalSwapAmount; + for (size_t i{0}; i < poolIDs.size(); ++i) { // Also used to generate pool specific error messages for RPC users currentID = poolIDs[i]; @@ -1111,8 +1119,6 @@ Res CPoolSwap::ExecuteSwap(CCustomCSView &view, intermediateView.Flush(); - const auto token = view.GetToken("DUSD"); - // burn the dex in amount if (dexfeeInAmount.nValue > 0) { res = view.AddBalance(consensus.burnAddress, dexfeeInAmount); @@ -1134,8 +1140,12 @@ Res CPoolSwap::ExecuteSwap(CCustomCSView &view, totalTokenA.swaps += (reserveAmount - initReserveAmount); totalTokenA.commissions += (blockCommission - initBlockCommission); - if (lastSwap && obj.to == consensus.burnAddress) { - totalTokenB.feeburn += swapAmountResult.nValue; + if (lastSwap) { + if (obj.to == consensus.burnAddress) { + totalTokenB.feeburn += swapAmountResult.nValue; + } + + finalSwapAmount = {swapAmountResult.nValue, currentID.v}; } return res; @@ -1170,7 +1180,8 @@ Res CPoolSwap::ExecuteSwap(CCustomCSView &view, // Assign to result for loop testing best pool swap result result = swapAmountResult.nValue; - return Res::Ok(); + // Send final swap amount Rust side for indexer + return OceanSetTxResult(txInfo, static_cast(reinterpret_cast(&finalSwapAmount))); } Res SwapToDFIorDUSD(CCustomCSView &mnview, diff --git a/src/dfi/mn_checks.h b/src/dfi/mn_checks.h index 69d5c59b045..5ab02e3179b 100644 --- a/src/dfi/mn_checks.h +++ b/src/dfi/mn_checks.h @@ -270,16 +270,20 @@ std::set GetGovernanceMembers(const CCustomCSView &mnview); class CPoolSwap { const CPoolSwapMessage &obj; - uint32_t height; - CAmount result{0}; - DCT_ID currentID; + const uint32_t height; + const std::optional> txInfo; + CAmount result{}; + DCT_ID currentID{}; public: std::vector> errors; - CPoolSwap(const CPoolSwapMessage &obj, uint32_t height) + CPoolSwap(const CPoolSwapMessage &obj, + const uint32_t height, + const std::optional> txInfo = std::nullopt) : obj(obj), - height(height) {} + height(height), + txInfo(txInfo) {} std::vector CalculateSwaps(CCustomCSView &view, const Consensus::Params &consensus, bool testOnly = false); Res ExecuteSwap(CCustomCSView &view, diff --git a/src/dfi/validation.cpp b/src/dfi/validation.cpp index 99a308c76b1..6e10a0d935d 100644 --- a/src/dfi/validation.cpp +++ b/src/dfi/validation.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include @@ -4636,6 +4638,23 @@ static void FlushCacheCreateUndo(const CBlockIndex *pindex, } } +static CrossBoundaryResult OceanIndex(const UniValue b, const uint32_t height) { + auto time = GetTimeMillis(); + CrossBoundaryResult result; + ocean_index_block(result, b.write()); + if (!result.ok) { + LogPrintf("Error indexing block %d : %s\n", height, result.reason); + ocean_invalidate_block(result, b.write()); + if (!result.ok) { + LogPrintf("Error invalidating block %d: %s\n", height, result.reason); + return result; + } + OceanIndex(b, height); + } + LogPrint(BCLog::OCEAN, "Indexing ocean block %d took: %dms\n", height, GetTimeMillis() - time); + return result; +}; + Res ProcessDeFiEventFallible(const CBlock &block, const CBlockIndex *pindex, const CChainParams &chainparams, @@ -4666,6 +4685,15 @@ Res ProcessDeFiEventFallible(const CBlock &block, // Construct undo FlushCacheCreateUndo(pindex, mnview, cache, uint256S(std::string(64, '1'))); + // Ocean archive + if (gArgs.GetBoolArg("-oceanarchive", DEFAULT_OCEAN_INDEXER_ENABLED)) { + const UniValue b = blockToJSON(cache, block, ::ChainActive().Tip(), pindex, true, 2); + + if (CrossBoundaryResult result = OceanIndex(b, static_cast(pindex->nHeight)); !result.ok) { + return Res::Err(result.reason.c_str()); + } + } + return Res::Ok(); } diff --git a/src/ffi/ffiexports.cpp b/src/ffi/ffiexports.cpp index 76c7cfdb697..6e581192854 100644 --- a/src/ffi/ffiexports.cpp +++ b/src/ffi/ffiexports.cpp @@ -29,6 +29,26 @@ uint64_t getChainId() { return Params().GetConsensus().evmChainId; } +int getRPCPort() { + auto dvmport = gArgs.GetArg("-rpcport", BaseParams().RPCPort()); + if (const auto port = GetPortFromLockFile(AutoPort::RPC); port) { + dvmport = port; + } + return dvmport; +} + +rust::string getRPCAuth() { + // Get credentials + std::string strRPCUserColonPass; + if (gArgs.GetArg("-rpcpassword", "") == "") { + // Try fall back to cookie-based authentication if no password is provided + GetAuthCookie(&strRPCUserColonPass); + } else { + strRPCUserColonPass = gArgs.GetArg("-rpcuser", "") + ":" + gArgs.GetArg("-rpcpassword", ""); + } + return strRPCUserColonPass; +} + bool isMining() { return gArgs.GetBoolArg("-gen", false); } @@ -328,6 +348,60 @@ uint64_t getEstimateGasErrorRatio() { return gArgs.GetArg("-evmestimategaserrorratio", DEFAULT_ESTIMATE_GAS_ERROR_RATIO); } +rust::vec getPoolPairs() { + LOCK(cs_main); + + rust::vec pools; + auto view = pcustomcsview.get(); + view->ForEachPoolPair( + [&](DCT_ID const &id, const CPoolPair &pool) { + pools.emplace_back(PoolPairCreationHeight{id.v, pool.idTokenA.v, pool.idTokenB.v, pool.creationHeight}); + return true; + }, + {0}); + + return pools; +} + +std::unique_ptr getDSTToken(rust::string tokenId) { + auto [view, accountView, vaultView] = GetSnapshots(); + + DCT_ID dctId; + auto token = view->GetTokenGuessId(tokenId.c_str(), dctId); + if (!token) { + return {}; + } + + DSTToken dstToken; + dstToken.id = dctId.v; + dstToken.name = token->name; + dstToken.symbol = token->symbol; + dstToken.symbol = token->CreateSymbolKey(dctId); + dstToken.decimal = token->decimal; + dstToken.isDAT = token->IsDAT(); + dstToken.isLPS = token->IsPoolShare(); + dstToken.tradable = token->IsTradeable(); + dstToken.mintable = token->IsMintable(); + dstToken.finalize = token->IsFinalized(); + dstToken.isLoanToken = token->IsLoanToken(); + dstToken.minted = token->minted; + dstToken.creationTx = token->creationTx.ToString(); + dstToken.creationHeight = token->creationHeight; + dstToken.destructionTx = token->destructionTx.ToString(); + dstToken.destructionHeight = token->destructionHeight; + + if (!token->IsPoolShare()) { + auto collateralAuth = token->creationTx; + if (const auto txid = view->GetNewTokenCollateralTXID(dctId.v); txid != uint256{}) { + collateralAuth = txid; + } + const Coin &authCoin = ::ChainstateActive().CoinsTip().AccessCoin(COutPoint(collateralAuth, 1)); + dstToken.collateralAddress = ScriptToString(authCoin.out.scriptPubKey); + } + + return std::make_unique(dstToken); +} + bool getDST20Tokens(std::size_t mnview_ptr, rust::vec &tokens) { LOCK(cs_main); @@ -529,3 +603,9 @@ uint64_t getDF24Height() { bool migrateTokensFromEVM(std::size_t mnview_ptr, TokenAmount old_amount, TokenAmount &new_amount) { return ExecuteTokenMigrationEVM(mnview_ptr, old_amount, new_amount); } + +bool isSkippedTx(std::array txHash) { + uint256 hash{}; + std::copy(txHash.begin(), txHash.end(), hash.begin()); + return IsSkippedTx(hash); +} diff --git a/src/ffi/ffiexports.h b/src/ffi/ffiexports.h index ad2420b7fab..9e223f90d33 100644 --- a/src/ffi/ffiexports.h +++ b/src/ffi/ffiexports.h @@ -32,6 +32,10 @@ static constexpr bool DEFAULT_ETH_DEBUG_ENABLED = false; static constexpr bool DEFAULT_ETH_DEBUG_TRACE_ENABLED = true; static constexpr bool DEFAULT_ETH_SUBSCRIPTION_ENABLED = true; +static constexpr bool DEFAULT_OCEAN_INDEXER_ENABLED = false; +static constexpr bool DEFAULT_OCEAN_SERVER_ENABLED = false; +static constexpr uint32_t DEFAULT_OCEAN_SERVER_PORT = 3002; + struct Attributes { uint64_t blockGasTargetFactor; uint64_t blockGasLimit; @@ -48,6 +52,34 @@ struct Attributes { } }; +struct PoolPairCreationHeight { + uint32_t id; + uint32_t idTokenA; + uint32_t idTokenB; + uint32_t creationHeight; +}; + +struct DSTToken { + uint32_t id; + rust::string name; + rust::string symbol; + rust::string symbolKey; + uint8_t decimal; + bool isDAT; + bool isLPS; + bool tradable; + bool mintable; + bool finalize; + bool isLoanToken; + CAmount minted; + CAmount limit; + rust::string creationTx; + int32_t creationHeight; + rust::string destructionTx; + int32_t destructionHeight; + rust::string collateralAddress; +}; + struct DST20Token { uint64_t id; rust::string name; @@ -93,6 +125,8 @@ struct SystemTxData { }; uint64_t getChainId(); +int getRPCPort(); +rust::string getRPCAuth(); bool isMining(); rust::string publishEthTransaction(rust::Vec rawTransaction); rust::vec getAccounts(); @@ -114,6 +148,8 @@ rust::string getStateInputJSON(); std::array getEthSyncStatus(); Attributes getAttributeValues(std::size_t mnview_ptr); void CppLogPrintf(rust::string message); +rust::vec getPoolPairs(); +std::unique_ptr getDSTToken(rust::string id); bool getDST20Tokens(std::size_t mnview_ptr, rust::vec &tokens); rust::string getClientVersion(); int32_t getNumCores(); @@ -129,5 +165,6 @@ rust::vec getEVMSystemTxsFromBlock(std::array evmBloc uint64_t getDF23Height(); uint64_t getDF24Height(); bool migrateTokensFromEVM(std::size_t mnview_ptr, TokenAmount old_amount, TokenAmount &new_amount); +bool isSkippedTx(std::array txHash); #endif // DEFI_FFI_FFIEXPORTS_H diff --git a/src/ffi/ffiocean.cpp b/src/ffi/ffiocean.cpp new file mode 100644 index 00000000000..22c7da4c6a4 --- /dev/null +++ b/src/ffi/ffiocean.cpp @@ -0,0 +1,19 @@ +// Copyright (c) DeFi Blockchain Developers +// Distributed under the MIT software license, see the accompanying +// file LICENSE or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include + +Res OceanSetTxResult(const std::optional> &txInfo, const std::size_t result_ptr) { + bool isOceanEnabled = gArgs.GetBoolArg("-oceanarchive", false); + if (txInfo && isOceanEnabled) { + const auto &[txType, txHash] = *txInfo; + CrossBoundaryResult ffiResult; + ocean_try_set_tx_result(ffiResult, static_cast(txType), txHash.GetByteArrayBE(), result_ptr); + } + + return Res::Ok(); +} diff --git a/src/ffi/ffiocean.h b/src/ffi/ffiocean.h new file mode 100644 index 00000000000..b801484268d --- /dev/null +++ b/src/ffi/ffiocean.h @@ -0,0 +1,6 @@ +#ifndef DEFI_FFI_FFIOCEAN_H +#define DEFI_FFI_FFIOCEAN_H + +Res OceanSetTxResult(const std::optional> &txInfo, const std::size_t result_ptr); + +#endif // DEFI_FFI_FFIOCEAN_H diff --git a/src/init.cpp b/src/init.cpp index 7e191085c1f..d9c605d1f47 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -678,6 +678,10 @@ void SetupServerArgs() gArgs.AddArg("-ethdebug", strprintf("Enable debug_* ETH RPCs (default: %b)", DEFAULT_ETH_DEBUG_ENABLED), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); gArgs.AddArg("-ethdebugtrace", strprintf("Enable debug_trace* ETH RPCs (default: %b)", DEFAULT_ETH_DEBUG_TRACE_ENABLED), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); gArgs.AddArg("-ethsubscription", strprintf("Enable subscription notifications ETH RPCs (default: %b)", DEFAULT_ETH_SUBSCRIPTION_ENABLED), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + gArgs.AddArg("-oceanarchive", strprintf("Enable ocean archive indexer (default: %b)", DEFAULT_OCEAN_INDEXER_ENABLED), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + gArgs.AddArg("-oceanarchiveserver", strprintf("Enable ocean archive server (default: %b)", DEFAULT_OCEAN_SERVER_ENABLED), ArgsManager::ALLOW_ANY, OptionsCategory::RPC); + gArgs.AddArg("-oceanarchiveport=", strprintf("Listen for ocean archive connections on (default: %u)", DEFAULT_OCEAN_SERVER_PORT), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC); + gArgs.AddArg("-oceanarchivebind=[:port]", "Bind to given address to listen for Ocean connections. Do not expose the Ocean server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -oceanarchiveport. This option can be specified multiple times (default: 127.0.0.1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC); gArgs.AddArg("-minerstrategy", "Staking optimisation. Options are none, numeric value indicating the number of subnodes to stake (default: none)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC); @@ -1607,7 +1611,7 @@ void SetupCacheSizes(CacheSizes& cacheSizes) { LogPrintf("* Using %.1f MiB for in-memory UTXO set (plus up to %.1f MiB of unused mempool space)\n", nCoinCacheUsage * (1.0 / 1024 / 1024), nMempoolSizeMax * (1.0 / 1024 / 1024)); } -static void SetupRPCPorts(std::vector& ethEndpoints, std::vector& wsEndpoints) { +static void SetupRPCPorts(std::vector& ethEndpoints, std::vector& wsEndpoints, std::vector& oceanEndpoints) { std::string default_address = "127.0.0.1"; bool setAutoPort{}; @@ -1618,7 +1622,7 @@ static void SetupRPCPorts(std::vector& ethEndpoints, std::vector& ethEndpoints, std::vector& ethEndpoints, std::vector eth_endpoints, ws_endpoints; - SetupRPCPorts(eth_endpoints, ws_endpoints); + std::vector eth_endpoints, ws_endpoints, ocean_endpoints; + SetupRPCPorts(eth_endpoints, ws_endpoints, ocean_endpoints); CrossBoundaryResult result; // Bind ETH RPC addresses @@ -2398,6 +2445,24 @@ bool AppInitMain(InitInterfaces& interfaces) } } } + + // bind ocean REST addresses + if (gArgs.GetBoolArg("-oceanarchiveserver", DEFAULT_OCEAN_SERVER_ENABLED)) { + // bind ocean addresses + for (auto it = ocean_endpoints.begin(); it != ocean_endpoints.end(); ++it) { + LogPrint(BCLog::HTTP, "Binding ocean server on endpoint %s\n", *it); + const auto addr = rs_try_from_utf8(result, ffi_from_string_to_slice(*it)); + if (!result.ok) { + LogPrint(BCLog::HTTP, "Invalid ocean address, not UTF-8 valid\n"); + return false; + } + auto res = XResultStatusLogged(ain_rs_init_network_rest_ocean(result, addr)) + if (!res) { + LogPrintf("Binding ocean server on endpoint %s failed.\n", *it); + return false; + } + } + } } uiInterface.InitMessage(_("Done loading").translated); @@ -2446,7 +2511,30 @@ bool AppInitMain(InitInterfaces& interfaces) spv::pspv->Connect(); } - // ********************************************************* Step 15: start minter thread + + // ********************************************************* Step 15: start genesis ocean indexing + if(gArgs.GetBoolArg("-oceanarchive", DEFAULT_OCEAN_INDEXER_ENABLED)) { + const CBlock &block = chainparams.GenesisBlock(); + + const CBlockIndex* pblockindex; + const CBlockIndex* tip; + { + LOCK(cs_main); + + pblockindex = LookupBlockIndex(block.GetHash()); + assert(pblockindex); + + tip = ::ChainActive().Tip(); + } + + const UniValue b = blockToJSON(*pcustomcsview, block, tip, pblockindex, true, 2); + + if (bool isIndexed = OceanIndex(b); !isIndexed) { + return false; + } + } + + // ********************************************************* Step 16: start minter thread if(gArgs.GetBoolArg("-gen", DEFAULT_GENERATE)) { if (!pos::StartStakingThreads(threadGroup)) { return false; diff --git a/src/logging.cpp b/src/logging.cpp index 1143b18c519..c9c23da2b7f 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -163,6 +163,7 @@ const CLogCategoryDesc LogCategories[] = {BCLog::CUSTOMTXBENCH, "customtxbench"}, {BCLog::SWAPRESULT, "swapresult"}, {BCLog::CONNECT, "connect"}, + {BCLog::OCEAN, "ocean"}, {BCLog::ALL, "1"}, {BCLog::ALL, "all"}, }; @@ -337,6 +338,8 @@ static std::string GetAutoPortString(const AutoPort type) return "ethrpcport"; case WEBSOCKET: return "wsport"; + case OCEAN: + return "ocean"; default: return "Unknown"; } diff --git a/src/logging.h b/src/logging.h index 017d4d7965d..2caa38cddfa 100644 --- a/src/logging.h +++ b/src/logging.h @@ -32,6 +32,7 @@ enum AutoPort : uint8_t { P2P, ETHRPC, WEBSOCKET, + OCEAN }; struct CLogCategoryActive @@ -76,6 +77,7 @@ namespace BCLog { CONNECT = (1ull << 31ull), SIGN = (1ull << 32ull), SWAPRESULT = (1ull << 33ull), + OCEAN = (1ull << 34ull), ALL = ~(0ull), }; @@ -207,7 +209,7 @@ static inline void LogPrintCategoryOrThreadThrottled(const BCLog::LogFlags& cate LogPrintf(args...); it->second = current_time; } - } + } else { // No entry yet -> log directly and save timestamp last_log_timestamps.insert(std::make_pair(message_key, current_time)); diff --git a/src/test/xvm_tests.cpp b/src/test/xvm_tests.cpp index cae4765ce27..8307f7ccda9 100644 --- a/src/test/xvm_tests.cpp +++ b/src/test/xvm_tests.cpp @@ -30,7 +30,7 @@ BOOST_AUTO_TEST_CASE(xvm_test_case_1) XVM xvm4; xvm4.evm.blockHash = uint256(oneVecReversed).GetHex(); - for (const auto& [result, expected]: std::vector> { + for (const auto& [result, expected]: std::vector> { { zero.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000" }, { one.GetHex(), "0000000000000000000000000000000000000000000000000000000000000001" }, { xvm.evm.blockHash, "0000000000000000000000000000000000000000000000000000000000000001" }, @@ -39,10 +39,10 @@ BOOST_AUTO_TEST_CASE(xvm_test_case_1) { xvm4.evm.blockHash, "0000000000000000000000000000000000000000000000000000000000000001" }, { xvm.evm.beneficiary, "00000000000000000002" }, { xvm2->evm.beneficiary, "00000000000000000002" }, - }) { + }) { // BOOST_TEST_MESSAGE("expected: " + expected + ", result: " + result); BOOST_CHECK(result == expected); }; } -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/uint256.h b/src/uint256.h index 16346183ca2..f3676a75101 100644 --- a/src/uint256.h +++ b/src/uint256.h @@ -76,6 +76,13 @@ class base_blob return reversedArray; } + [[nodiscard]] std::array GetByteArrayBE() const + { + std::array byteArray; + std::copy(data, data + WIDTH, byteArray.begin()); + return byteArray; + } + unsigned char* begin() { return &data[0]; diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh index 9a98cf7aa89..387320503ee 100755 --- a/test/lint/lint-circular-dependencies.sh +++ b/test/lint/lint-circular-dependencies.sh @@ -84,7 +84,8 @@ EXPECTED_CIRCULAR_DEPENDENCIES=( "dfi/mn_checks -> validation -> wallet/wallet -> dfi/mn_checks" "dfi/mn_rpc -> wallet/rpcwallet -> init -> ffi/ffiexports -> dfi/mn_rpc" "dfi/govvariables/attributes -> dfi/mn_rpc -> wallet/rpcwallet -> init -> miner -> dfi/govvariables/attributes" - "dfi/govvariables/attributes -> dfi/mn_rpc -> wallet/rpcwallet -> init -> rpc/blockchain -> dfi/govvariables/attributes" + # "dfi/govvariables/attributes -> dfi/mn_rpc -> wallet/rpcwallet -> init -> rpc/blockchain -> dfi/govvariables/attributes" + "dfi/govvariables/attributes -> dfi/validation -> rpc/blockchain -> dfi/govvariables/attributes" "dfi/mn_rpc -> wallet/rpcwallet -> init -> miner -> dfi/validation -> dfi/mn_rpc" "dfi/snapshotmanager -> dfi/vaulthistory -> flushablestorage -> dfi/snapshotmanager" "dfi/validation -> validation -> dfi/validation"