diff --git a/.editorconfig b/.editorconfig index 72e61c1b..12808c50 100644 --- a/.editorconfig +++ b/.editorconfig @@ -19,3 +19,7 @@ trim_trailing_whitespace = false [*.{yml,yaml}] indent_size = 2 + +[{Makefile,**.mk}] +# Use tabs for indentation (Makefiles require tabs) +indent_style = tab diff --git a/.env.sample b/.env.sample index 5b6c89de..c111ace9 100644 --- a/.env.sample +++ b/.env.sample @@ -1,26 +1,34 @@ -# Common Configuration +# Authentication & Security KEYPAIR=generated-p2p-secret -NATS_URL=nats://localhost:4222 -NATS_ADMIN_PASS=generated-secret -NATS_SYSTEM_PASS=generated-secret +JWT_AUTH_SECRET=generated-secret + +# AWS S3 Configuration AWS_ACCESS_KEY_ID=test AWS_SECRET_ACCESS_KEY=test -AWS_REGION=us-east-1 AWS_ENDPOINT_URL=http://localhost:4566 -AWS_S3_BUCKET_NAME=fuel-streams-testnet +AWS_REGION=us-east-1 +AWS_S3_ENABLED=false +AWS_S3_BUCKET_NAME=fuel-streams-local + +# NATS Configuration +NATS_URL=nats://localhost:4222 +NATS_PUBLISHER_URL=nats://localhost:4333 +NATS_SYSTEM_USER=sys +NATS_SYSTEM_PASS=sys +NATS_ADMIN_USER=admin +NATS_ADMIN_PASS=admin +NATS_PUBLIC_USER=default_user +NATS_PUBLIC_PASS="" + +# Monitoring & Logging USE_ELASTIC_LOGGING=false USE_METRICS=true PUBLISHER_MAX_THREADS=16 + +# Elasticsearch Configuration ELASTICSEARCH_URL=http://127.0.0.1:9200 ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=generated-secret -AWS_S3_ENABLED=false -AWS_ACCESS_KEY_ID=s3-access-key-id -AWS_SECRET_ACCESS_KEY=s3-secret-access-key -AWS_REGION=s3-region -AWS_ENDPOINT_URL=s3-endpoint -AWS_S3_BUCKET_NAME=fuel-streams-local -JWT_AUTH_SECRET=generated-secret # Mainnet Configuration MAINNET_RELAYER=https://mainnet.infura.io/v3/ diff --git a/.github/actions/setup-rust/action.yaml b/.github/actions/setup-rust/action.yaml index 11d83426..600f16ef 100644 --- a/.github/actions/setup-rust/action.yaml +++ b/.github/actions/setup-rust/action.yaml @@ -21,8 +21,16 @@ runs: - name: Create .env file with NATS environment variables shell: bash run: | - echo "NATS_ADMIN_PASS=${NATS_ADMIN_PASS:-default_pass}" >> .env - echo "NATS_PUBLIC_PASS=${NATS_PUBLIC_PASS:-temp-public-pass}" >> .env + set_env_var() { + echo "$1=${!1:-$2}" >> $GITHUB_ENV + echo "$1=${!1:-$2}" >> .env + } + set_env_var "NATS_SYSTEM_USER" "sys" + set_env_var "NATS_SYSTEM_PASS" "sys" + set_env_var "NATS_ADMIN_USER" "admin" + set_env_var "NATS_ADMIN_PASS" "admin" + set_env_var "NATS_PUBLIC_USER" "default_user" + set_env_var "NATS_PUBLIC_PASS" "" - name: Install Rust uses: dtolnay/rust-toolchain@master diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index c6b9ac26..2055b492 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -1,7 +1,6 @@ name: CI on: - workflow_dispatch: pull_request: types: - opened @@ -190,24 +189,24 @@ jobs: - name: Install dependencies run: cargo fetch - test-helm: - needs: install-deps - name: Test Helm - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Set up Helm - uses: azure/setup-helm@v4 - with: - version: "latest" - - - name: Install helm unittest plugin - run: | - helm plugin install https://github.com/helm-unittest/helm-unittest.git - - - name: Run Helm unit tests - run: | - make helm-test + # test-helm: + # needs: install-deps + # name: Test Helm + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + # - name: Set up Helm + # uses: azure/setup-helm@v4 + # with: + # version: "latest" + + # - name: Install helm unittest plugin + # run: | + # helm plugin install https://github.com/helm-unittest/helm-unittest.git + + # - name: Run Helm unit tests + # run: | + # make helm-test test: needs: install-deps @@ -215,8 +214,13 @@ jobs: runs-on: ubuntu-latest env: NATS_URL: nats://127.0.0.1:4222 - NATS_ADMIN_PASS: secret - NATS_PUBLIC_PASS: secret + NATS_PUBLISHER_URL: nats://127.0.0.1:4333 + NATS_SYSTEM_USER: sys + NATS_SYSTEM_PASSWORD: sys + NATS_ADMIN_USER: admin + NATS_ADMIN_PASS: admin + NATS_PUBLIC_USER: default_user + NATS_PUBLIC_PASS: "" AWS_ACCESS_KEY_ID: test AWS_SECRET_ACCESS_KEY: test AWS_REGION: us-east-1 @@ -226,13 +230,12 @@ jobs: fail-fast: false matrix: package: - # - fuel-data-parser + - fuel-data-parser - fuel-streams - fuel-streams-core - fuel-streams-macros - - fuel-streams-publisher - - fuel-streams-ws - + - sv-webserver + - sv-publisher steps: - uses: actions/checkout@v4 @@ -266,8 +269,9 @@ jobs: fail-fast: false matrix: package: - - fuel-streams-publisher - - fuel-streams-ws + - sv-consumer + - sv-publisher + - sv-webserver is_release: - ${{ github.ref == 'refs/heads/main' || github.event_name == 'workflow_dispatch' }} platform: @@ -336,34 +340,34 @@ jobs: rustup target add ${{ matrix.platform.target }} cargo build --release --locked --target ${{ matrix.platform.target }} --package ${{ matrix.package }} - - name: Strip binaries - run: ./scripts/strip-binary.sh "${{ matrix.platform.target }}" - - - name: Set Artifact Name - id: artifact-name - shell: bash - run: | - echo "value=${{ matrix.package }}-${{ matrix.platform.os_name }}" >> $GITHUB_OUTPUT - - - name: Package as archive - shell: bash - run: | - cd target/${{ matrix.platform.target }}/release - tar czvf ../../../${{ steps.artifact-name.outputs.value }}.tar.gz ${{ matrix.package }} - cd - - - - name: Publish release artifacts - uses: actions/upload-artifact@v4 - if: >- - (github.event_name == 'push' && - github.ref == 'refs/heads/main' && - contains(github.event.head_commit.message, 'ci(release): Preparing')) || - github.event_name == 'workflow_dispatch' - with: - name: ${{ steps.artifact-name.outputs.value }} - path: ${{ matrix.package }}-* - if-no-files-found: error - retention-days: 30 + # - name: Strip binaries + # run: ./scripts/strip-binary.sh "${{ matrix.platform.target }}" + + # - name: Set Artifact Name + # id: artifact-name + # shell: bash + # run: | + # echo "value=${{ matrix.package }}-${{ matrix.platform.os_name }}" >> $GITHUB_OUTPUT + + # - name: Package as archive + # shell: bash + # run: | + # cd target/${{ matrix.platform.target }}/release + # tar czvf ../../../${{ steps.artifact-name.outputs.value }}.tar.gz ${{ matrix.package }} + # cd - + + # - name: Publish release artifacts + # uses: actions/upload-artifact@v4 + # if: >- + # (github.event_name == 'push' && + # github.ref == 'refs/heads/main' && + # contains(github.event.head_commit.message, 'ci(release): Preparing')) || + # github.event_name == 'workflow_dispatch' + # with: + # name: ${{ steps.artifact-name.outputs.value }} + # path: ${{ matrix.package }}-* + # if-no-files-found: error + # retention-days: 30 release: name: Create Release with Knope @@ -374,7 +378,7 @@ jobs: github.event_name == 'workflow_dispatch' needs: - test - - test-helm + # - test-helm - build runs-on: ubuntu-latest permissions: @@ -384,14 +388,14 @@ jobs: - name: Checkout Repository uses: actions/checkout@v4 - - name: Download Artifacts - uses: actions/download-artifact@v4 - with: - path: artifacts - merge-multiple: true + # - name: Download Artifacts + # uses: actions/download-artifact@v4 + # with: + # path: artifacts + # merge-multiple: true - - name: List Artifacts - run: ls -R artifacts + # - name: List Artifacts + # run: ls -R artifacts - name: Run Knope Action uses: knope-dev/action@v2.1.0 diff --git a/.github/workflows/docker_publish.yaml b/.github/workflows/docker_publish.yaml index 748ee112..6aff171b 100644 --- a/.github/workflows/docker_publish.yaml +++ b/.github/workflows/docker_publish.yaml @@ -3,15 +3,16 @@ name: Build and publish Docker image on: workflow_dispatch: inputs: - image_type: - description: "Choose which image to build (publisher/webserver/both)" - required: true + package: type: choice + description: "Package to build and publish" + default: "all" + required: true options: - - publisher - - webserver - - both - default: "both" + - all + - sv-publisher + - sv-webserver + - sv-consumer push: branches: - main @@ -30,11 +31,15 @@ concurrency: jobs: build-and-publish-image: runs-on: ubuntu-latest - if: | - (github.event_name == 'release' && github.event.action == 'published') || - github.ref == 'refs/heads/main' || - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' + strategy: + matrix: + package: + - name: sv-webserver + image: cluster/docker/sv-webserver.Dockerfile + - name: sv-publisher + image: cluster/docker/sv-publisher.Dockerfile + - name: sv-consumer + image: cluster/docker/sv-consumer.Dockerfile steps: - uses: actions/checkout@v4 @@ -42,24 +47,14 @@ jobs: id: sha run: echo "short_sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT - - name: Build and push Docker for publisher - if: github.event.inputs.image_type == 'publisher' || github.event.inputs.image_type == 'both' - || github.event_name != 'workflow_dispatch' - uses: ./.github/actions/docker-publish - id: publish-fuel-streams-nats - with: - username: ${{ github.repository_owner }} - password: ${{ secrets.GITHUB_TOKEN }} - image: ghcr.io/fuellabs/fuel-streams-publisher - dockerfile: cluster/docker/fuel-streams-publisher.Dockerfile - - - name: Build and push Docker for webserver - if: github.event.inputs.image_type == 'webserver' || github.event.inputs.image_type == 'both' - || github.event_name != 'workflow_dispatch' + - name: Build and push Docker for ${matrix.package.name} (${{ steps.sha.outputs.short_sha }}) + if: | + (github.event_name == 'workflow_dispatch' && (github.event.inputs.package == 'all' || github.event.inputs.package == matrix.package.name)) || + github.event_name != 'workflow_dispatch' uses: ./.github/actions/docker-publish - id: publish-fuel-webserver-nats + id: publish with: username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - image: ghcr.io/fuellabs/fuel-streams-ws - dockerfile: cluster/docker/fuel-streams-ws.Dockerfile + image: ghcr.io/fuellabs/${{ matrix.package.name }} + dockerfile: ${{ matrix.package.image }} diff --git a/.github/workflows/helm_publish.yaml b/.github/workflows/helm_publish.yaml index 932d54f6..8dc9de93 100644 --- a/.github/workflows/helm_publish.yaml +++ b/.github/workflows/helm_publish.yaml @@ -2,87 +2,41 @@ name: Build and Publish Helm Chart on: workflow_dispatch: - inputs: - chart: - description: "Select the Helm chart to deploy" - required: true - type: choice - options: - - fuel-streams-publisher - - fuel-streams push: branches: - main - paths: - - cluster/charts/fuel-streams-publisher/Chart.yaml - - cluster/charts/fuel-streams/Chart.yaml + release: + types: + - published + +env: + CHART_NAME: fuel-streams + CHART_PATH: cluster/charts/fuel-streams permissions: contents: read + packages: write + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true jobs: helm-release: - name: Build Helm Charts runs-on: ubuntu-latest - if: | - github.event_name == 'workflow_dispatch' || - (github.event_name == 'release' && github.event.action == 'published') || - github.ref == 'refs/heads/main' || - github.event_name == 'pull_request' - permissions: - contents: read - packages: write steps: - - name: Check out code - uses: actions/checkout@v4 - - - name: Determine charts to process - id: charts - run: | - if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then - echo "charts=${{ github.event.inputs.chart }}" >> $GITHUB_OUTPUT - else - echo "charts=fuel-streams-publisher fuel-streams" >> $GITHUB_OUTPUT - fi - - - name: Validate Chart Names - run: | - for chart in ${{ steps.charts.outputs.charts }}; do - if [ ! -d "cluster/charts/$chart" ]; then - echo "Error: Chart '$chart' does not exist." - exit 1 - fi - done - + - uses: actions/checkout@v4 - name: Helm Dependencies Update run: | - set -e - for chart in ${{ steps.charts.outputs.charts }}; do - echo "Updating dependencies for $chart" - helm dependency update cluster/charts/$chart - done + cd ${{ env.CHART_PATH }} && helm dependency update - - name: Get chart versions - id: versions + - name: Get chart version + id: version run: | - publisher_version=$(awk '/^version:/ {print $2}' cluster/charts/fuel-streams-publisher/Chart.yaml) - streams_version=$(awk '/^version:/ {print $2}' cluster/charts/fuel-streams/Chart.yaml) - echo "publisher_version=$publisher_version" >> $GITHUB_OUTPUT - echo "streams_version=$streams_version" >> $GITHUB_OUTPUT - - - name: "Build chart: [fuel-streams-publisher v${{ steps.versions.outputs.publisher_version }}]" - if: contains(steps.charts.outputs.charts, 'fuel-streams-publisher') - uses: bsord/helm-push@v4.1.0 - with: - useOCIRegistry: true - registry-url: oci://ghcr.io/fuellabs/helmcharts - username: ${{ github.repository_owner }} - access-token: ${{ secrets.GITHUB_TOKEN }} - force: true - chart-folder: ./cluster/charts/fuel-streams-publisher + version=$(awk '/^version:/ {print $2}' ${{ env.CHART_PATH }}/Chart.yaml) + echo "version=$version" >> $GITHUB_OUTPUT - - name: "Build chart: [fuel-streams v${{ steps.versions.outputs.streams_version }}]" - if: contains(steps.charts.outputs.charts, 'fuel-streams') + - name: "Build chart: [${{ env.CHART_NAME }} v${{ steps.version.outputs.version }}]" uses: bsord/helm-push@v4.1.0 with: useOCIRegistry: true @@ -90,20 +44,11 @@ jobs: username: ${{ github.repository_owner }} access-token: ${{ secrets.GITHUB_TOKEN }} force: true - chart-folder: ./cluster/charts/fuel-streams + chart-folder: ${{ env.CHART_PATH }} - name: Build Summary run: |- echo "### Helm Charts Build Summary 📊" >> $GITHUB_STEP_SUMMARY echo "| Chart | Version | Status |" >> $GITHUB_STEP_SUMMARY echo "|-------|---------|--------|" >> $GITHUB_STEP_SUMMARY - - for chart in ${{ steps.charts.outputs.charts }}; do - version="" - if [ "$chart" = "fuel-streams-publisher" ]; then - version="${{ steps.versions.outputs.publisher_version }}" - elif [ "$chart" = "fuel-streams" ]; then - version="${{ steps.versions.outputs.streams_version }}" - fi - echo "| $chart | $version | ✅ Published |" >> $GITHUB_STEP_SUMMARY - done + echo "| ${{ env.CHART_NAME }} | ${{ steps.version.outputs.version }} | ✅ Published |" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/publish_release.yaml b/.github/workflows/publish_release.yaml index 4dbc4f19..bd4edc6a 100644 --- a/.github/workflows/publish_release.yaml +++ b/.github/workflows/publish_release.yaml @@ -19,17 +19,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Set NATS environment variables - shell: bash - run: | - set_env_var() { - echo "$1=${!1:-$2}" >> $GITHUB_ENV - echo "$1=${!1:-$2}" >> .env - } - set_env_var "NATS_ADMIN_PASS" "null" - - name: Install toolchain - uses: dtolnay/rust-toolchain@master + - name: Install Rust + uses: ./.github/actions/setup-rust with: toolchain: ${{ env.RUST_VERSION }} target: x86_64-unknown-linux-gnu,wasm32-unknown-unknown diff --git a/.gitignore b/.gitignore index 9b22fe23..8607f4ff 100644 --- a/.gitignore +++ b/.gitignore @@ -20,10 +20,8 @@ profile.json coverage/ docs/ **/**/charts/**.tgz -values-publisher-secrets.yaml +values-secrets.yaml values-publisher-env.yaml localstack-data .vscode - **/Cargo.lock -!./Cargo.lock diff --git a/.prettierignore b/.prettierignore index 8b7b5b96..0063613c 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,3 +1,2 @@ -helm -cluster +cluster/charts pnpm-lock.yaml diff --git a/.rustfmt.toml b/.rustfmt.toml index aaebb675..2d0df4eb 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -10,3 +10,8 @@ use_field_init_shorthand = true reorder_imports = true reorder_modules = true tab_spaces = 4 +# Add these new settings +format_macro_matchers = true +format_macro_bodies = true +# If you want macros to ignore the max_width setting +overflow_delimited_expr = true diff --git a/.typos.toml b/.typos.toml index 97819a36..87a65555 100644 --- a/.typos.toml +++ b/.typos.toml @@ -1,9 +1,7 @@ [files] extend-exclude = [ "pnpm-lock.yaml", - "crates/fuel-streams-publisher/README.md", - "crates/fuel-streams-publisher/src/elastic.rs", - "crates/fuel-streams-ws/README.md", + "crates/sv-webserver/README.md", "docker/chain-config", "docker/monitoring", "cluster", diff --git a/Cargo.lock b/Cargo.lock index aa6a87fc..a8c25d4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -410,15 +410,6 @@ version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" -[[package]] -name = "approx" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" -dependencies = [ - "num-traits", -] - [[package]] name = "arbitrary" version = "1.4.1" @@ -491,12 +482,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" -[[package]] -name = "assert_matches" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" - [[package]] name = "async-compression" version = "0.4.18" @@ -519,9 +504,9 @@ dependencies = [ [[package]] name = "async-graphql" -version = "7.0.11" +version = "7.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ba6d24703c5adc5ba9116901b92ee4e4c0643c01a56c4fd303f3818638d7449" +checksum = "59fd6bd734afb8b6e4d0f84a3e77305ce0a7ccc60d70f6001cb5e1c3f38d8ff1" dependencies = [ "async-graphql-derive", "async-graphql-parser", @@ -538,7 +523,6 @@ dependencies = [ "mime", "multer", "num-traits", - "once_cell", "pin-project-lite", "regex", "serde", @@ -552,9 +536,9 @@ dependencies = [ [[package]] name = "async-graphql-derive" -version = "7.0.11" +version = "7.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a94c2d176893486bd37cd1b6defadd999f7357bf5804e92f510c08bcf16c538f" +checksum = "ac38b4dd452d529d6c0248b51df23603f0a875770352e26ae8c346ce6c149b3e" dependencies = [ "Inflector", "async-graphql-parser", @@ -569,9 +553,9 @@ dependencies = [ [[package]] name = "async-graphql-parser" -version = "7.0.11" +version = "7.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79272bdbf26af97866e149f05b2b546edb5c00e51b5f916289931ed233e208ad" +checksum = "42d271ddda2f55b13970928abbcbc3423cfc18187c60e8769b48f21a93b7adaa" dependencies = [ "async-graphql-value", "pest", @@ -581,9 +565,9 @@ dependencies = [ [[package]] name = "async-graphql-value" -version = "7.0.11" +version = "7.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5ec94176a12a8cbe985cd73f2e54dc9c702c88c766bdef12f1f3a67cedbee1" +checksum = "aefe909173a037eaf3281b046dc22580b59a38b765d7b8d5116f2ffef098048d" dependencies = [ "bytes", "indexmap 2.7.0", @@ -981,9 +965,9 @@ dependencies = [ [[package]] name = "aws-smithy-async" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62220bc6e97f946ddd51b5f1361f78996e704677afc518a4ff66b7a72ea1378c" +checksum = "8aa8ff1492fd9fb99ae28e8467af0dbbb7c31512b16fabf1a0f10d7bb6ef78bb" dependencies = [ "futures-util", "pin-project-lite", @@ -1088,7 +1072,7 @@ dependencies = [ "http-body 0.4.6", "http-body 1.0.1", "httparse", - "hyper 0.14.31", + "hyper 0.14.32", "hyper-rustls 0.24.2", "once_cell", "pin-project-lite", @@ -1177,7 +1161,7 @@ dependencies = [ "futures-util", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", + "hyper 0.14.32", "itoa", "matchit 0.5.0", "memchr", @@ -1208,7 +1192,7 @@ dependencies = [ "futures-util", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", + "hyper 0.14.32", "itoa", "matchit 0.7.3", "memchr", @@ -1377,22 +1361,6 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86b93f97252c47b41663388e6d155714a9d0c398b99f1005cbc5f978b29f445" -[[package]] -name = "bench-consumers" -version = "0.0.13" -dependencies = [ - "anyhow", - "async-nats", - "chrono", - "criterion", - "fuel-core-types 0.40.2", - "fuel-streams-core", - "futures", - "nats-publisher", - "statrs", - "tokio", -] - [[package]] name = "bincode" version = "1.3.3" @@ -1592,12 +1560,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "bytemuck" -version = "1.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" - [[package]] name = "byteorder" version = "1.5.0" @@ -1693,9 +1655,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.2" +version = "1.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" +checksum = "c31a0499c1dc64f458ad13872de75c0eb7e3fdb0e67964610c914b034fc5956e" dependencies = [ "jobserver", "libc", @@ -1749,9 +1711,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1973,29 +1935,17 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "confy" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45b1f4c00870f07dc34adcac82bb6a72cc5aabca8536ba1797e01df51d2ce9a0" -dependencies = [ - "directories", - "serde", - "thiserror 1.0.69", - "toml", -] - [[package]] name = "console" -version = "0.15.8" +version = "0.15.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" +checksum = "ea3c6ecd8059b57859df5c69830340ed3c41d30e3da0c1cbed90a96ac853041b" dependencies = [ "encode_unicode", - "lazy_static", "libc", - "unicode-width 0.1.14", - "windows-sys 0.52.0", + "once_cell", + "unicode-width", + "windows-sys 0.59.0", ] [[package]] @@ -2019,18 +1969,18 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.33" +version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" dependencies = [ "proc-macro2", "quote", @@ -2399,9 +2349,9 @@ checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -2418,9 +2368,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" @@ -2809,15 +2759,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "directories" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a49173b84e034382284f27f1af4dcbbd231ffa358c0fe316541a7337f376a35" -dependencies = [ - "dirs-sys 0.4.1", -] - [[package]] name = "directories-next" version = "2.0.0" @@ -2834,7 +2775,7 @@ version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" dependencies = [ - "dirs-sys 0.3.7", + "dirs-sys", ] [[package]] @@ -2848,18 +2789,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "dirs-sys" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", -] - [[package]] name = "dirs-sys-next" version = "0.1.2" @@ -3066,9 +2995,9 @@ checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d" [[package]] name = "encode_unicode" -version = "0.3.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0" [[package]] name = "encoding_rs" @@ -3344,22 +3273,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "eventsource-client" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43ddc25e1ad2cc0106d5e2d967397b4fb2068a66677ee9b0eea4600e5cfe8fb4" -dependencies = [ - "futures", - "hyper 0.14.31", - "hyper-rustls 0.24.2", - "hyper-timeout 0.4.1", - "log", - "pin-project", - "rand", - "tokio", -] - [[package]] name = "eyre" version = "0.6.12" @@ -3378,9 +3291,9 @@ checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fastrand" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ff" @@ -3475,9 +3388,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "foreign-types" @@ -3579,7 +3492,7 @@ dependencies = [ "fuel-core-upgradable-executor", "futures", "hex", - "hyper 0.14.31", + "hyper 0.14.32", "indicatif", "itertools 0.12.1", "num_cpus", @@ -3613,7 +3526,6 @@ dependencies = [ "clap 4.5.23", "const_format", "dirs", - "dotenvy", "fuel-core", "fuel-core-chain-config", "fuel-core-compression", @@ -3658,14 +3570,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a10ccde16fd926137070d3baa77a1096c2ff6cdca26d14177139c16e59e697d" dependencies = [ "anyhow", - "base64 0.22.1", "cynic", "derive_more 0.99.18", - "eventsource-client", "fuel-core-types 0.40.2", - "futures", "hex", - "hyper-rustls 0.24.2", "itertools 0.12.1", "reqwest 0.11.27", "schemafy_lib", @@ -3939,7 +3847,7 @@ dependencies = [ "fuel-vm 0.58.2", "impl-tools", "itertools 0.12.1", - "mockall 0.11.4", + "mockall", "num_enum", "paste", "postcard", @@ -4109,7 +4017,7 @@ dependencies = [ "serde_json", "strum 0.26.3", "strum_macros 0.26.4", - "thiserror 2.0.4", + "thiserror 2.0.8", "tokio", ] @@ -4217,7 +4125,7 @@ dependencies = [ "displaydoc", "fuel-streams-core", "futures", - "thiserror 2.0.4", + "thiserror 2.0.8", "tokio", ] @@ -4225,25 +4133,31 @@ dependencies = [ name = "fuel-streams-core" version = "0.0.13" dependencies = [ + "anyhow", "async-nats", "async-trait", "chrono", "displaydoc", + "fuel-core", + "fuel-core-bin", "fuel-core-client", "fuel-core-importer", + "fuel-core-services", + "fuel-core-storage", "fuel-core-types 0.40.2", "fuel-data-parser", "fuel-networks", "fuel-streams-macros", + "fuel-streams-nats", "fuel-streams-storage", "futures", "hex", "pretty_assertions", "serde", "serde_json", - "sha2 0.10.8", - "thiserror 2.0.4", + "thiserror 2.0.8", "tokio", + "tracing", ] [[package]] @@ -4253,136 +4167,67 @@ dependencies = [ "anyhow", "fuel-core-types 0.40.2", "fuel-streams", - "fuel-streams-ws", "futures", + "sv-webserver", "tokio", ] [[package]] -name = "fuel-streams-macros" -version = "0.0.13" -dependencies = [ - "subject-derive", -] - -[[package]] -name = "fuel-streams-publisher" +name = "fuel-streams-executors" version = "0.0.13" dependencies = [ - "actix-cors", - "actix-server", - "actix-web", "anyhow", - "assert_matches", "async-nats", - "async-trait", - "chrono", - "clap 4.5.23", - "derive_more 1.0.0", - "displaydoc", - "dotenvy", - "elasticsearch", "fuel-core", - "fuel-core-bin", - "fuel-core-importer", - "fuel-core-services", - "fuel-core-storage", - "fuel-core-types 0.40.2", - "fuel-streams", "fuel-streams-core", - "fuel-streams-storage", "futures", - "mockall 0.13.1", - "mockall_double", "num_cpus", - "openssl", - "parking_lot", - "prometheus", - "rand", "rayon", - "rust_decimal", "serde", "serde_json", - "serde_prometheus", "sha2 0.10.8", - "sysinfo", - "thiserror 2.0.4", + "thiserror 2.0.8", "tokio", - "tokio-stream", "tracing", - "tracing-actix-web", - "url", ] [[package]] -name = "fuel-streams-storage" +name = "fuel-streams-macros" +version = "0.0.13" +dependencies = [ + "subject-derive", +] + +[[package]] +name = "fuel-streams-nats" version = "0.0.13" dependencies = [ "async-nats", - "aws-config", - "aws-sdk-s3", - "aws-smithy-runtime-api", - "aws-smithy-types", "displaydoc", "dotenvy", - "fuel-networks", "pretty_assertions", "rand", "serde_json", - "thiserror 2.0.4", + "thiserror 2.0.8", "tokio", "tracing", ] [[package]] -name = "fuel-streams-ws" +name = "fuel-streams-storage" version = "0.0.13" dependencies = [ - "actix-cors", - "actix-server", - "actix-service", - "actix-web", - "actix-ws", - "anyhow", - "async-nats", - "async-trait", - "bytestring", - "chrono", - "clap 4.5.23", - "confy", - "derive_more 1.0.0", - "displaydoc", + "aws-config", + "aws-sdk-s3", + "aws-smithy-runtime-api", + "aws-smithy-types", "dotenvy", - "elasticsearch", - "fuel-streams", - "fuel-streams-core", - "fuel-streams-storage", - "futures", - "futures-util", - "jsonwebtoken 9.3.0", - "num_cpus", - "openssl", - "parking_lot", - "prometheus", + "pretty_assertions", "rand", - "reqwest 0.12.9", - "rust_decimal", - "serde", "serde_json", - "serde_prometheus", - "sysinfo", - "thiserror 2.0.4", - "time", + "thiserror 2.0.8", "tokio", - "tokio-tungstenite 0.24.0", - "toml", "tracing", - "tracing-actix-web", - "tracing-subscriber", - "url", - "urlencoding", - "uuid", - "validator", ] [[package]] @@ -4612,7 +4457,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", ] @@ -5142,9 +4987,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", @@ -5166,9 +5011,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", @@ -5193,26 +5038,25 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "rustls 0.21.12", "rustls-native-certs 0.6.3", "tokio", "tokio-rustls 0.24.1", - "webpki-roots 0.25.4", ] [[package]] name = "hyper-rustls" -version = "0.27.3" +version = "0.27.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "f6884a48c6826ec44f524c7456b163cebe9e55a18d7b5e307cb4f100371cc767" dependencies = [ "futures-util", "http 1.2.0", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", "tokio", "tokio-rustls 0.26.1", @@ -5226,7 +5070,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ - "hyper 0.14.31", + "hyper 0.14.32", "pin-project-lite", "tokio", "tokio-io-timeout", @@ -5238,7 +5082,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", "pin-project-lite", "tokio", @@ -5253,7 +5097,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", "native-tls", "tokio", @@ -5272,7 +5116,7 @@ dependencies = [ "futures-util", "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.1", + "hyper 1.5.2", "pin-project-lite", "socket2", "tokio", @@ -5508,7 +5352,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "rand", "tokio", @@ -5551,9 +5395,9 @@ dependencies = [ [[package]] name = "impl-tools" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a84bc8d2baf8da56e93b4247067d918e1a44829bbbe3e4b875aaf8d7d3c7bc9" +checksum = "b4739bc9af85c18969eba5e4db90dbf26be140ff2e5628593693f18559e9e5fe" dependencies = [ "autocfg", "impl-tools-lib", @@ -5563,9 +5407,9 @@ dependencies = [ [[package]] name = "impl-tools-lib" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a795a1e201125947a063b967c79de6ae152143ab522f481d4f493c44835ba37a" +checksum = "798fe18a7e727001b30a029ab9cdd485afd325801d4df846f0bb5338b2986a2c" dependencies = [ "proc-macro-error2", "proc-macro2", @@ -5621,7 +5465,7 @@ dependencies = [ "console", "number_prefix", "portable-atomic", - "unicode-width 0.2.0", + "unicode-width", "web-time", ] @@ -5728,9 +5572,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ "once_cell", "wasm-bindgen", @@ -5820,9 +5664,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.167" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libflate" @@ -6168,7 +6012,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.19", + "rustls 0.23.20", "socket2", "thiserror 1.0.69", "tokio", @@ -6260,7 +6104,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser", @@ -6427,21 +6271,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" -[[package]] -name = "load-tester" -version = "0.0.13" -dependencies = [ - "anyhow", - "async-nats", - "chrono", - "clap 4.5.23", - "fuel-streams", - "fuel-streams-core", - "futures", - "statrs", - "tokio", -] - [[package]] name = "local-channel" version = "0.1.5" @@ -6551,16 +6380,6 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "matrixmultiply" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9380b911e3e96d10c1f415da0876389aaf1b56759054eeb0de7df940c456ba1a" -dependencies = [ - "autocfg", - "rawpointer", -] - [[package]] name = "md-5" version = "0.10.6" @@ -6609,9 +6428,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ "adler2", ] @@ -6638,22 +6457,8 @@ dependencies = [ "downcast", "fragile", "lazy_static", - "mockall_derive 0.11.4", - "predicates 2.1.5", - "predicates-tree", -] - -[[package]] -name = "mockall" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" -dependencies = [ - "cfg-if", - "downcast", - "fragile", - "mockall_derive 0.13.1", - "predicates 3.1.2", + "mockall_derive", + "predicates", "predicates-tree", ] @@ -6669,30 +6474,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "mockall_derive" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" -dependencies = [ - "cfg-if", - "proc-macro2", - "quote", - "syn 2.0.90", -] - -[[package]] -name = "mockall_double" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1ca96e5ac35256ae3e13536edd39b172b88f41615e1d7b653c8ad24524113e8" -dependencies = [ - "cfg-if", - "proc-macro2", - "quote", - "syn 2.0.90", -] - [[package]] name = "multer" version = "3.1.0" @@ -6742,9 +6523,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.2" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ "core2", "unsigned-varint 0.8.0", @@ -6770,23 +6551,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e94e1e6445d314f972ff7395df2de295fe51b71821694f0b0e1e79c4f12c8577" -[[package]] -name = "nalgebra" -version = "0.33.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b" -dependencies = [ - "approx", - "matrixmultiply", - "num-complex", - "num-rational", - "num-traits", - "rand", - "rand_distr", - "simba", - "typenum", -] - [[package]] name = "names" version = "0.14.0" @@ -6814,25 +6578,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "nats-publisher" -version = "0.0.13" -dependencies = [ - "anyhow", - "async-nats", - "clap 4.5.23", - "criterion", - "fuel-core", - "fuel-core-bin", - "fuel-core-importer", - "fuel-core-storage", - "fuel-core-types 0.40.2", - "fuel-data-parser", - "fuel-streams-core", - "tokio", - "tracing", -] - [[package]] name = "netlink-packet-core" version = "0.7.0" @@ -6984,15 +6729,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", -] - [[package]] name = "num-conv" version = "0.1.0" @@ -7026,7 +6762,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", - "libm", ] [[package]] @@ -7060,6 +6795,15 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + [[package]] name = "number_prefix" version = "0.4.0" @@ -7184,12 +6928,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - [[package]] name = "os_str_bytes" version = "6.6.1" @@ -7371,12 +7109,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 1.0.69", + "thiserror 2.0.8", "ucd-trie", ] @@ -7592,27 +7330,17 @@ dependencies = [ "regex", ] -[[package]] -name = "predicates" -version = "3.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" -dependencies = [ - "anstyle", - "predicates-core", -] - [[package]] name = "predicates-core" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" [[package]] name = "predicates-tree" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" dependencies = [ "predicates-core", "termtree", @@ -7790,9 +7518,9 @@ dependencies = [ [[package]] name = "proptest" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bitflags 2.6.0", "lazy_static", @@ -8003,9 +7731,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.0", - "rustls 0.23.19", + "rustls 0.23.20", "socket2", - "thiserror 2.0.4", + "thiserror 2.0.8", "tokio", "tracing", ] @@ -8021,10 +7749,10 @@ dependencies = [ "rand", "ring 0.17.8", "rustc-hash 2.1.0", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", "slab", - "thiserror 2.0.4", + "thiserror 2.0.8", "tinyvec", "tracing", "web-time", @@ -8032,9 +7760,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.7" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -8089,16 +7817,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "rand_distr" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" -dependencies = [ - "num-traits", - "rand", -] - [[package]] name = "rand_xorshift" version = "0.3.0" @@ -8108,12 +7826,6 @@ dependencies = [ "rand_core", ] -[[package]] -name = "rawpointer" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" - [[package]] name = "rayon" version = "1.10.0" @@ -8148,9 +7860,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ "bitflags 2.6.0", ] @@ -8254,7 +7966,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", + "hyper 0.14.32", "hyper-rustls 0.24.2", "ipnet", "js-sys", @@ -8301,8 +8013,8 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", - "hyper-rustls 0.27.3", + "hyper 1.5.2", + "hyper-rustls 0.27.4", "hyper-tls", "hyper-util", "ipnet", @@ -8314,7 +8026,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pemfile 2.2.0", "rustls-pki-types", "serde", @@ -8559,15 +8271,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -8584,9 +8296,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "ring 0.17.8", @@ -8630,7 +8342,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.0.1", + "security-framework 3.1.0", ] [[package]] @@ -8653,9 +8365,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" dependencies = [ "web-time", ] @@ -8704,15 +8416,6 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" -[[package]] -name = "safe_arch" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3460605018fdc9612bce72735cba0d27efbcd9904780d44c7e3a9948f96148a" -dependencies = [ - "bytemuck", -] - [[package]] name = "same-file" version = "1.0.6" @@ -8874,9 +8577,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" +checksum = "81d3f8c9bfcc3cbb6b0179eb57042d75b1582bdc65c3cb95f3fa999509c03cbc" dependencies = [ "bitflags 2.6.0", "core-foundation 0.10.0", @@ -8887,9 +8590,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" dependencies = [ "core-foundation-sys", "libc", @@ -8897,9 +8600,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" dependencies = [ "serde", ] @@ -9152,19 +8855,6 @@ dependencies = [ "rand_core", ] -[[package]] -name = "simba" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a386a501cd104797982c15ae17aafe8b9261315b5d07e3ec803f2ea26be0fa" -dependencies = [ - "approx", - "num-complex", - "num-traits", - "paste", - "wide", -] - [[package]] name = "simdutf8" version = "0.1.5" @@ -9308,30 +8998,13 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7beae5182595e9a8b683fa98c4317f956c9a2dec3b9716990d20023cc60c766" -[[package]] -name = "statrs" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a3fe7c28c6512e766b0874335db33c94ad7b8f9054228ae1c2abd47ce7d335e" -dependencies = [ - "approx", - "nalgebra", - "num-traits", - "rand", -] - [[package]] name = "streams-tests" version = "0.0.13" dependencies = [ - "anyhow", - "async-trait", "fuel-core", - "fuel-core-importer", - "fuel-core-types 0.40.2", "fuel-streams", "fuel-streams-core", - "fuel-streams-publisher", "futures", "pretty_assertions", "rand", @@ -9446,11 +9119,103 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" +[[package]] +name = "sv-consumer" +version = "0.0.13" +dependencies = [ + "anyhow", + "async-nats", + "clap 4.5.23", + "dotenvy", + "fuel-core", + "fuel-streams-core", + "fuel-streams-executors", + "futures", + "num_cpus", + "openssl", + "serde_json", + "sv-publisher", + "thiserror 2.0.8", + "tokio", + "tokio-util", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "sv-publisher" +version = "0.0.13" +dependencies = [ + "anyhow", + "async-nats", + "clap 4.5.23", + "fuel-core", + "fuel-core-bin", + "fuel-core-types 0.40.2", + "fuel-streams-core", + "fuel-streams-executors", + "futures", + "openssl", + "thiserror 2.0.8", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "sv-webserver" +version = "0.0.13" +dependencies = [ + "actix-cors", + "actix-server", + "actix-service", + "actix-web", + "actix-ws", + "anyhow", + "async-nats", + "bytestring", + "chrono", + "clap 4.5.23", + "derive_more 1.0.0", + "displaydoc", + "dotenvy", + "elasticsearch", + "fuel-streams", + "fuel-streams-core", + "fuel-streams-nats", + "fuel-streams-storage", + "futures", + "futures-util", + "jsonwebtoken 9.3.0", + "num_cpus", + "openssl", + "parking_lot", + "prometheus", + "rand", + "reqwest 0.12.9", + "rust_decimal", + "serde", + "serde_json", + "serde_prometheus", + "sysinfo", + "thiserror 2.0.8", + "time", + "tokio", + "tokio-tungstenite 0.24.0", + "tracing", + "tracing-actix-web", + "tracing-subscriber", + "url", + "urlencoding", + "uuid", + "validator", +] + [[package]] name = "symbolic-common" -version = "12.12.3" +version = "12.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ba5365997a4e375660bed52f5b42766475d5bc8ceb1bb13fea09c469ea0f49" +checksum = "cd33e73f154e36ec223c18013f7064a2c120f1162fc086ac9933542def186b00" dependencies = [ "debugid", "memmap2", @@ -9460,9 +9225,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "12.12.3" +version = "12.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beff338b2788519120f38c59ff4bb15174f52a183e547bac3d6072c2c0aa48aa" +checksum = "89e51191290147f071777e37fe111800bb82a9059f9c95b19d2dd41bfeddf477" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -9756,9 +9521,9 @@ dependencies = [ [[package]] name = "termtree" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "textwrap" @@ -9777,11 +9542,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.4" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" +checksum = "08f5383f3e0071702bf93ab5ee99b52d26936be9dedd9413067cbdcddcb6141a" dependencies = [ - "thiserror-impl 2.0.4", + "thiserror-impl 2.0.8", ] [[package]] @@ -9797,9 +9562,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.4" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" +checksum = "f2f357fcec90b3caef6623a099691be676d033b40a058ac95d2a6ade6fa0c943" dependencies = [ "proc-macro2", "quote", @@ -9844,7 +9609,9 @@ checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", + "libc", "num-conv", + "num_threads", "powerfmt", "serde", "time-core", @@ -9986,7 +9753,7 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.23.19", + "rustls 0.23.20", "tokio", ] @@ -10111,7 +9878,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", + "hyper 0.14.32", "hyper-timeout 0.4.1", "percent-encoding", "pin-project", @@ -10139,7 +9906,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-timeout 0.5.2", "hyper-util", "percent-encoding", @@ -10333,6 +10100,7 @@ dependencies = [ "sharded-slab", "smallvec", "thread_local", + "time", "tracing", "tracing-core", "tracing-log", @@ -10432,9 +10200,9 @@ checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" [[package]] name = "unicode-bidi" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" @@ -10457,12 +10225,6 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" -[[package]] -name = "unicode-width" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" - [[package]] name = "unicode-width" version = "0.2.0" @@ -10651,9 +10413,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -10662,13 +10424,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", "syn 2.0.90", @@ -10677,9 +10438,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.47" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", @@ -10690,9 +10451,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10700,9 +10461,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", @@ -10713,9 +10474,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-encoder" @@ -10947,9 +10708,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", @@ -10980,16 +10741,6 @@ dependencies = [ "rustls-pki-types", ] -[[package]] -name = "wide" -version = "0.7.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e6db2670d2be78525979e9a5f9c69d296fd7d670549fe9ebf70f8708cb5019" -dependencies = [ - "bytemuck", - "safe_arch", -] - [[package]] name = "widestring" version = "1.1.0" diff --git a/Cargo.toml b/Cargo.toml index 1b17bb8e..a2ca7839 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,21 +33,31 @@ clap = { version = "4.5", features = ["derive", "env"] } dotenvy = "0.15" displaydoc = "0.2" futures = "0.3" -fuel-core-bin = { version = "0.40.2", features = ["p2p", "relayer", "rocksdb"] } -fuel-core = { version = "0.40.2", features = ["p2p", "relayer", "rocksdb"] } -fuel-core-client = { version = "0.40.2" } +fuel-core-bin = { version = "0.40.2", default-features = false, features = [ + "p2p", + "relayer", + "rocksdb", +] } +fuel-core = { version = "0.40.2", default-features = false, features = [ + "p2p", + "relayer", + "rocksdb", +] } +fuel-core-client = { version = "0.40.2", default-features = false, features = ["std"] } fuel-core-importer = { version = "0.40.2" } fuel-core-storage = { version = "0.40.2" } -fuel-core-types = { version = "0.40.2", features = ["test-helpers", "serde"] } -fuel-core-services = { version = "0.40.2" } +fuel-core-types = { version = "0.40.2", default-features = false, features = ["std", "serde"] } +fuel-core-services = { version = "0.40.2", default-features = false, features = ["test-helpers"] } futures-util = "0.3" itertools = "0.13" mockall = "0.13" mockall_double = "0.3.1" hex = "0.4" pretty_assertions = "1.4" +num_cpus = "1.16" rand = "0.8" serde = { version = "1.0", features = ["derive"] } +rayon = "1.10.0" serde_json = "1.0" sha2 = "0.10" strum = "0.26" @@ -63,11 +73,14 @@ fuel-streams = { path = "crates/fuel-streams" } fuel-networks = { path = "crates/fuel-networks" } fuel-data-parser = { version = "0.0.13", path = "crates/fuel-data-parser" } fuel-streams-core = { version = "0.0.13", path = "crates/fuel-streams-core" } -fuel-streams-publisher = { version = "0.0.13", path = "crates/fuel-streams-publisher" } -fuel-streams-ws = { version = "0.0.13", path = "crates/fuel-streams-ws" } +sv-webserver = { version = "0.0.13", path = "crates/sv-webserver" } fuel-streams-macros = { version = "0.0.13", path = "crates/fuel-streams-macros" } +fuel-streams-nats = { version = "0.0.13", path = "crates/fuel-streams-nats" } fuel-streams-storage = { version = "0.0.13", path = "crates/fuel-streams-storage" } subject-derive = { version = "0.0.13", path = "crates/fuel-streams-macros/subject-derive" } +fuel-streams-executors = { version = "0.0.13", path = "crates/fuel-streams-executors" } +sv-publisher = { version = "0.0.13", path = "crates/sv-publisher" } +sv-consumer = { version = "0.0.13", path = "crates/sv-consumer" } # Workspace projects [workspace.metadata.cargo-machete] diff --git a/Makefile b/Makefile index feaa6c3f..877935b0 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ RUST_VERSION := 1.81.0 clean clean-build cleanup-artifacts test-watch test bench helm-test \ fmt fmt-cargo fmt-rust fmt-prettier fmt-markdown lint lint-cargo \ lint-rust lint-clippy lint-prettier lint-markdown lint-machete \ - audit audit-fix-test audit-fix load-test run-publisher \ + audit audit-fix-test audit-fix load-test run-publisher run-consumer \ run-mainnet-dev run-mainnet-profiling run-testnet-dev run-testnet-profiling \ start-nats stop-nats restart-nats clean-nats minikube-setup minikube-start \ minikube-delete k8s-setup helm-setup cluster-setup pre-cluster \ @@ -112,6 +112,9 @@ clean-build: rm -rf target/ rm -rf node_modules/ +cleanup-artifacts: REPO_OWNER="fuellabs" +cleanup-artifacts: REPO_NAME="data-systems" +cleanup-artifacts: DAYS_TO_KEEP=10 cleanup-artifacts: @echo "Running artifact cleanup..." @./scripts/cleanup_artifacts.sh $(REPO_OWNER) $(REPO_NAME) $(DAYS_TO_KEEP) @@ -120,20 +123,24 @@ cleanup-artifacts: # Testing # ------------------------------------------------------------ +test-watch: PROFILE="all" test-watch: cargo watch -x "test --profile $(PROFILE)" +test: PACKAGE="all" +test: PROFILE="dev" test: + @echo "Running tests for package $(PACKAGE) with profile $(PROFILE)" @if [ "$(PACKAGE)" = "all" ] || [ -z "$(PACKAGE)" ]; then \ - cargo nextest run --cargo-profile $(PROFILE) --workspace --color always --locked --no-tests=pass && \ - cargo test --profile $(PROFILE) --doc --workspace; \ + cargo nextest run --cargo-profile $(PROFILE) --workspace --color always --no-tests=pass --all-features && \ + cargo test --profile $(PROFILE) --doc --workspace --all-features; \ else \ - cargo nextest run --cargo-profile $(PROFILE) -p $(PACKAGE) --color always --locked --no-tests=pass && \ - cargo test --profile $(PROFILE) --doc -p $(PACKAGE); \ + cargo nextest run --cargo-profile $(PROFILE) -p $(PACKAGE) --color always --no-tests=pass --all-features && \ + cargo test --profile $(PROFILE) --doc -p $(PACKAGE) --all-features; \ fi bench: - cargo bench -p data-parser -p nats-publisher -p bench-consumers + cargo bench -p data-parser helm-test: helm unittest -f "tests/**/*.yaml" -f "tests/*.yaml" cluster/charts/fuel-streams @@ -201,13 +208,14 @@ load-test: # Publisher Run Commands # ------------------------------------------------------------ +run-publisher: NETWORK="testnet" +run-publisher: MODE="dev" +run-publisher: PORT="4000" +run-publisher: TELEMETRY_PORT="8080" +run-publisher: NATS_URL="localhost:4222" +run-publisher: EXTRA_ARGS="" run-publisher: check-network - @./scripts/run_publisher.sh \ - --network $(NETWORK) \ - --mode $(MODE) \ - $(if $(PORT),--port $(PORT),) \ - $(if $(TELEMETRY_PORT),--telemetry-port $(TELEMETRY_PORT),) \ - $(if $(extra_args),--extra-args "$(extra_args)",) + @./scripts/run_publisher.sh run-publisher-mainnet-dev: $(MAKE) run-publisher NETWORK=mainnet MODE=dev @@ -221,46 +229,72 @@ run-publisher-testnet-dev: run-publisher-testnet-profiling: $(MAKE) run-publisher NETWORK=testnet MODE=profiling +# ------------------------------------------------------------ +# Consumer Run Commands +# ------------------------------------------------------------ + +run-consumer: NATS_URL="localhost:4222" +run-consumer: NATS_PUBLISHER_URL="localhost:4333" +run-consumer: + cargo run --package sv-consumer --profile dev -- \ + --nats-url $(NATS_URL) \ + --nats-publisher-url $(NATS_PUBLISHER_URL) + # ------------------------------------------------------------ # Streamer Run Commands # ------------------------------------------------------------ -run-streamer: check-network - @./scripts/run_streamer.sh \ - --mode $(MODE) \ - $(if $(extra_args),--extra-args "$(extra_args)",) +run-webserver: NETWORK="testnet" +run-webserver: MODE="dev" +run-webserver: PORT="9003" +run-webserver: NATS_URL="nats://localhost:4222" +run-webserver: EXTRA_ARGS="" +run-webserver: check-network + @./scripts/run_webserver.sh --mode $(MODE) --port $(PORT) --nats-url $(NATS_URL) --extra-args $(EXTRA_ARGS) -run-streamer-mainnet-dev: - $(MAKE) run-streamer NETWORK=mainnet MODE=dev +run-webserver-mainnet-dev: + $(MAKE) run-webserver NETWORK=mainnet MODE=dev -run-streamer-mainnet-profiling: - $(MAKE) run-streamer NETWORK=mainnet MODE=profiling +run-webserver-mainnet-profiling: + $(MAKE) run-webserver NETWORK=mainnet MODE=profiling -run-streamer-testnet-dev: - $(MAKE) run-streamer NETWORK=testnet MODE=dev +run-webserver-testnet-dev: + $(MAKE) run-webserver NETWORK=testnet MODE=dev -run-streamer-testnet-profiling: - $(MAKE) run-streamer NETWORK=testnet MODE=profiling +run-webserver-testnet-profiling: + $(MAKE) run-webserver NETWORK=testnet MODE=profiling # ------------------------------------------------------------ # Docker Compose # ------------------------------------------------------------ +# Define service profiles +DOCKER_SERVICES := nats localstack docker + +run-docker-compose: PROFILE="all" run-docker-compose: @./scripts/set_env.sh - @docker compose -f cluster/docker/docker-compose.yml --env-file .env $(COMMAND) + @docker compose -f cluster/docker/docker-compose.yml --profile $(PROFILE) --env-file .env $(COMMAND) + +# Common docker-compose commands +define make-docker-commands +start-$(1): + $(MAKE) run-docker-compose PROFILE="$(if $(filter docker,$(1)),all,$(1))" COMMAND="up -d" -start-nats: - $(MAKE) run-docker-compose COMMAND="up -d" +stop-$(1): + $(MAKE) run-docker-compose PROFILE="$(if $(filter docker,$(1)),all,$(1))" COMMAND="down" -stop-nats: - $(MAKE) run-docker-compose COMMAND="down" +restart-$(1): + $(MAKE) run-docker-compose PROFILE="$(if $(filter docker,$(1)),all,$(1))" COMMAND="restart" -restart-nats: - $(MAKE) run-docker-compose COMMAND="restart" +clean-$(1): + $(MAKE) run-docker-compose PROFILE="$(if $(filter docker,$(1)),all,$(1))" COMMAND="down -v --remove-orphans" -clean-nats: - $(MAKE) run-docker-compose COMMAND="down -v --rmi all --remove-orphans" +reset-$(1): clean-$(1) start-$(1) +endef + +# Generate targets for each service +$(foreach service,$(DOCKER_SERVICES),$(eval $(call make-docker-commands,$(service)))) # ------------------------------------------------------------ # Local cluster (Minikube) @@ -290,15 +324,10 @@ minikube-delete: @echo "Deleting minikube..." @minikube delete -k8s-setup: - @echo "Setting up k8s..." - @./cluster/scripts/setup_k8s.sh $(NAMESPACE) - helm-setup: @cd cluster/charts/fuel-streams && helm dependency update - @cd cluster/charts/fuel-streams-publisher && helm dependency update -cluster-setup: minikube-setup k8s-setup helm-setup +cluster-setup: minikube-setup helm-setup pre-cluster: @./scripts/set_env.sh @@ -311,5 +340,4 @@ cluster-up: pre-cluster cluster-down: pre-cluster CLUSTER_MODE=$(MODE) tilt --file ./Tiltfile down -cluster-reset: pre-cluster - CLUSTER_MODE=$(MODE) tilt --file ./Tiltfile reset +cluster-reset: cluster-down cluster-up diff --git a/Tiltfile b/Tiltfile index 2618c336..f81a64c0 100755 --- a/Tiltfile +++ b/Tiltfile @@ -10,15 +10,43 @@ version_settings(True) # Enable 'new version' banner # Load environment variables from .env file dotenv() -# Build publisher image with proper configuration for Minikube +allow_k8s_contexts('minikube') + +# Build sv-publisher +custom_build( + ref='sv-publisher:latest', + command=[ + './cluster/scripts/build_docker.sh', + '--dockerfile', './cluster/docker/sv-publisher.Dockerfile' + ], + deps=[ + './src', + './Cargo.toml', + './Cargo.lock', + './cluster/docker/sv-publisher.Dockerfile' + ], + live_update=[ + sync('./src', '/usr/src'), + sync('./Cargo.toml', '/usr/src/Cargo.toml'), + sync('./Cargo.lock', '/usr/src/Cargo.lock'), + run('cargo build', trigger=['./src', './Cargo.toml', './Cargo.lock']) + ], + ignore=['./target'] +) + +# Build sv-consumer custom_build( - ref='fuel-streams-publisher:latest', - command=['./cluster/scripts/build_publisher.sh'], + ref='sv-consumer:latest', + image_deps=['sv-publisher:latest'], + command=[ + './cluster/scripts/build_docker.sh', + '--dockerfile', './cluster/docker/sv-consumer.Dockerfile' + ], deps=[ './src', './Cargo.toml', './Cargo.lock', - './cluster/docker/fuel-streams-publisher.Dockerfile' + './cluster/docker/sv-consumer.Dockerfile' ], live_update=[ sync('./src', '/usr/src'), @@ -26,19 +54,22 @@ custom_build( sync('./Cargo.lock', '/usr/src/Cargo.lock'), run('cargo build', trigger=['./src', './Cargo.toml', './Cargo.lock']) ], - skips_local_docker=True, ignore=['./target'] ) # Build streamer ws image with proper configuration for Minikube custom_build( - ref='fuel-streams-ws:latest', - command=['./cluster/scripts/build_streamer.sh'], + ref='sv-webserver:latest', + image_deps=['sv-consumer:latest', 'sv-publisher:latest'], + command=[ + './cluster/scripts/build_docker.sh', + '--dockerfile', './cluster/docker/sv-webserver.Dockerfile' + ], deps=[ './src', './Cargo.toml', './Cargo.lock', - './docker/fuel-streams-ws.Dockerfile' + './cluster/docker/sv-webserver.Dockerfile' ], live_update=[ sync('./src', '/usr/src'), @@ -46,7 +77,6 @@ custom_build( sync('./Cargo.lock', '/usr/src/Cargo.lock'), run('cargo build', trigger=['./src', './Cargo.toml', './Cargo.lock']) ], - skips_local_docker=True, ignore=['./target'] ) @@ -57,50 +87,39 @@ config_mode = os.getenv('CLUSTER_MODE', 'full') # Resource configurations RESOURCES = { 'publisher': { - 'name': 'fuel-streams-publisher', - 'ports': ['4000:4000', '8080:8080'], + 'name': 'fuel-streams-sv-publisher', + 'ports': ['8080:8080'], 'labels': 'publisher', - 'config_mode': ['minimal', 'full'] + 'config_mode': ['minimal', 'full'], + 'deps': ['fuel-streams-nats-core', 'fuel-streams-nats-publisher'] + }, + 'consumer': { + 'name': 'fuel-streams-sv-consumer', + 'ports': ['8081:8080'], + 'labels': 'consumer', + 'config_mode': ['minimal', 'full'], + 'deps': ['fuel-streams-nats-core', 'fuel-streams-nats-publisher', 'fuel-streams-sv-publisher'] + }, + 'sv-webserver': { + 'name': 'fuel-streams-sv-webserver', + 'ports': ['9003:9003'], + 'labels': 'ws', + 'config_mode': ['minimal', 'full'], + 'deps': ['fuel-streams-nats-core', 'fuel-streams-nats-publisher'] }, 'nats-core': { 'name': 'fuel-streams-nats-core', - 'ports': ['4222:4222', '8222:8222'], - 'labels': 'nats', - 'config_mode': ['minimal', 'full'] - }, - 'nats-client': { - 'name': 'fuel-streams-nats-client', - 'ports': ['4223:4222', '8443:8443'], + 'ports': ['4222:4222', '6222:6222', '7422:7422'], 'labels': 'nats', 'config_mode': ['minimal', 'full'] }, 'nats-publisher': { 'name': 'fuel-streams-nats-publisher', - 'ports': ['4224:4222'], + 'ports': ['4333:4222', '6222:6222', '7433:7422'], 'labels': 'nats', - 'config_mode': ['minimal', 'full'] + 'config_mode': ['minimal', 'full'], + 'deps': ['fuel-streams-nats-core'] }, - # 'grafana': { - # 'name': 'fuel-streams-grafana', - # 'ports': ['3000:3000'], - # 'labels': 'monitoring', - # 'config_mode': ['minimal', 'full'] - # }, - # 'prometheus-operator': { - # 'name': 'fuel-streams-prometheus-operator', - # 'labels': 'monitoring', - # 'config_mode': ['minimal', 'full'] - # }, - # 'kube-state-metrics': { - # 'name': 'fuel-streams-kube-state-metrics', - # 'labels': 'monitoring', - # 'config_mode': ['minimal', 'full'] - # }, - # 'node-exporter': { - # 'name': 'fuel-streams-prometheus-node-exporter', - # 'labels': 'monitoring', - # 'config_mode': ['minimal', 'full'] - # } } k8s_yaml(helm( @@ -108,8 +127,9 @@ k8s_yaml(helm( name='fuel-streams', namespace='fuel-streams', values=[ - 'cluster/charts/fuel-streams/values-publisher-secrets.yaml', - 'cluster/charts/fuel-streams/values.yaml' + 'cluster/charts/fuel-streams/values.yaml', + 'cluster/charts/fuel-streams/values-local.yaml', + 'cluster/charts/fuel-streams/values-secrets.yaml' ] )) diff --git a/benches/bench-consumers/Cargo.toml b/benches/bench-consumers/Cargo.toml deleted file mode 100644 index fb0dfb5b..00000000 --- a/benches/bench-consumers/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "bench-consumers" -authors = { workspace = true } -keywords = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -repository = { workspace = true } -version = { workspace = true } -rust-version = { workspace = true } -publish = false - -[[bench]] -name = "consumers" -harness = false -path = "benches/consumers.rs" - -[dependencies] -anyhow = { workspace = true } -async-nats = { workspace = true } -chrono = { workspace = true } -fuel-core-types = { workspace = true } -fuel-streams-core = { workspace = true, features = ["bench-helpers"] } -futures = { workspace = true } -nats-publisher = { path = "../nats-publisher" } -statrs = "0.18" -tokio = { workspace = true } - -[dev-dependencies] -criterion = { version = "0.5", features = ["html_reports", "async_tokio"] } diff --git a/benches/bench-consumers/README.md b/benches/bench-consumers/README.md deleted file mode 100644 index ad650983..00000000 --- a/benches/bench-consumers/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Running - -1. After running the [`nats-publisher`](../nats-publisher/README.md) locally, just execute the run on this project: - - ```sh - cargo run - ``` - -2. You can also run benchmarks with cargo: - - ```sh - cargo bench --bench consumers - ``` diff --git a/benches/bench-consumers/benches/consumers.rs b/benches/bench-consumers/benches/consumers.rs deleted file mode 100644 index 8b66907f..00000000 --- a/benches/bench-consumers/benches/consumers.rs +++ /dev/null @@ -1,43 +0,0 @@ -use bench_consumers::runners::{ - runner_consumer::run_blocks_consumer, - runner_kv_watcher::run_watch_kv_blocks, - runner_subscription::run_subscriptions, -}; -use criterion::{criterion_group, criterion_main, Criterion}; -use nats_publisher::utils::nats::NatsHelper; -use tokio::runtime::Runtime; - -static MSGS_LIMIT: usize = 10000; - -fn benchmark_all(c: &mut Criterion) { - let rt = Runtime::new().unwrap(); - let mut group = c.benchmark_group("NATS Benchmarks"); - let nats = rt.block_on(async { NatsHelper::connect(false).await.unwrap() }); - - group.bench_function("consume_blocks_ack_none", |b| { - b.to_async(&rt).iter(|| async { - run_blocks_consumer(&nats, MSGS_LIMIT).await.unwrap() - }); - }); - - group.bench_function("watch_kv_blocks", |b| { - b.to_async(&rt).iter(|| async { - run_watch_kv_blocks(&nats, MSGS_LIMIT).await.unwrap() - }); - }); - - group.bench_function("subscriptions", |b| { - b.to_async(&rt).iter(|| async { - run_subscriptions(&nats, MSGS_LIMIT).await.unwrap() - }); - }); - - group.finish(); -} - -criterion_group!( - name = benches; - config = Criterion::default().sample_size(10); // Adjust sample size as needed - targets = benchmark_all -); -criterion_main!(benches); diff --git a/benches/bench-consumers/src/lib.rs b/benches/bench-consumers/src/lib.rs deleted file mode 100644 index 0edfafa9..00000000 --- a/benches/bench-consumers/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod runners; diff --git a/benches/bench-consumers/src/main.rs b/benches/bench-consumers/src/main.rs deleted file mode 100644 index a4bc6b64..00000000 --- a/benches/bench-consumers/src/main.rs +++ /dev/null @@ -1,10 +0,0 @@ -use runners::runner_all::run_all_benchmarks; - -mod runners; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - println!("Running benchmarks"); - run_all_benchmarks().await?; - Ok(()) -} diff --git a/benches/bench-consumers/src/runners/benchmark_results.rs b/benches/bench-consumers/src/runners/benchmark_results.rs deleted file mode 100644 index e4aaaf42..00000000 --- a/benches/bench-consumers/src/runners/benchmark_results.rs +++ /dev/null @@ -1,103 +0,0 @@ -use core::fmt; -use std::time::{Duration, Instant}; - -use chrono::{DateTime, Utc}; -use statrs::statistics::{Data, Distribution}; - -#[derive(Debug, Clone)] -pub struct BenchmarkResult { - pub name: String, - pub message_count: usize, - pub error_count: usize, - start_time: Instant, - pub elapsed_time: Option, - pub messages_per_second: Option, - pub publish_times: Vec, - pub mean_publish_time: Option, - pub messages_limit: usize, -} - -impl fmt::Display for BenchmarkResult { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "\n{}\nBenchmark Results: {}\n{}\nTotal Messages: {}\nTotal Errors: {}\nElapsed Time: {:?}\nMessages per Second: {:.2}\nMean Publish Time: {:?}\n{}", - "=".repeat(50), - self.name, - "=".repeat(50), - self.message_count, - self.error_count, - self.elapsed_time.unwrap_or_default(), - self.messages_per_second.unwrap_or_default(), - self.mean_publish_time.unwrap_or_default(), - "=".repeat(50) - ) - } -} - -impl BenchmarkResult { - pub fn new(name: String, messages_limit: usize) -> Self { - Self { - name, - message_count: 0, - error_count: 0, - start_time: Instant::now(), - elapsed_time: None, - messages_per_second: None, - publish_times: vec![], - mean_publish_time: None, - messages_limit, - } - } - - pub fn increment_message_count(&mut self) { - self.message_count += 1; - } - - pub fn increment_error_count(&mut self) { - self.error_count += 1; - } - - pub fn finalize(&mut self) -> &mut Self { - self.calculate_mean_publish_time(); - let elapsed = self.start_time.elapsed(); - self.elapsed_time = Some(elapsed); - self.messages_per_second = - Some(self.message_count as f64 / elapsed.as_secs_f64()); - self - } - - pub fn is_complete(&self) -> bool { - self.message_count + self.error_count >= self.messages_limit - } - - pub fn add_publish_time(&mut self, timestamp: u128) -> &mut Self { - let current_time = Utc::now(); - let publish_time = - DateTime::::from_timestamp_millis(timestamp as i64) - .expect("Invalid timestamp"); - let duration = current_time - .signed_duration_since(publish_time) - .to_std() - .expect("Duration calculation failed"); - - self.publish_times.push(duration); - self - } - - pub fn calculate_mean_publish_time(&mut self) { - if self.publish_times.is_empty() { - return; - } - - let times_ns: Vec = self - .publish_times - .iter() - .map(|d| d.as_nanos() as f64) - .collect(); - - let data = Data::new(times_ns); - let mean_ns = data.mean().unwrap(); - self.mean_publish_time = Some(Duration::from_nanos(mean_ns as u64)); - } -} diff --git a/benches/bench-consumers/src/runners/mod.rs b/benches/bench-consumers/src/runners/mod.rs deleted file mode 100644 index b1918cbe..00000000 --- a/benches/bench-consumers/src/runners/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -pub mod benchmark_results; -pub mod runner_all; -pub mod runner_consumer; -pub mod runner_kv_watcher; -pub mod runner_subscription; diff --git a/benches/bench-consumers/src/runners/runner_all.rs b/benches/bench-consumers/src/runners/runner_all.rs deleted file mode 100644 index 410e4570..00000000 --- a/benches/bench-consumers/src/runners/runner_all.rs +++ /dev/null @@ -1,25 +0,0 @@ -use anyhow::Result; -use nats_publisher::utils::nats::NatsHelper; -use tokio::try_join; - -static MSGS_LIMIT: usize = 5000; - -use super::{ - runner_consumer::run_blocks_consumer, - runner_kv_watcher::run_watch_kv_blocks, - runner_subscription::run_subscriptions, -}; - -#[allow(dead_code)] -pub async fn run_all_benchmarks() -> Result<()> { - let use_nats_compression = false; // adjust as needed - let nats = NatsHelper::connect(use_nats_compression).await?; - - let _ = try_join!( - run_subscriptions(&nats, MSGS_LIMIT), - run_watch_kv_blocks(&nats, MSGS_LIMIT), - run_blocks_consumer(&nats, MSGS_LIMIT), - ); - - Ok(()) -} diff --git a/benches/bench-consumers/src/runners/runner_consumer.rs b/benches/bench-consumers/src/runners/runner_consumer.rs deleted file mode 100644 index bbf02f23..00000000 --- a/benches/bench-consumers/src/runners/runner_consumer.rs +++ /dev/null @@ -1,55 +0,0 @@ -use anyhow::Result; -use async_nats::jetstream::consumer::AckPolicy; -pub use async_nats::jetstream::consumer::{ - pull::Config as PullConsumerConfig, - DeliverPolicy, -}; -use fuel_core_types::blockchain::block::Block; -use fuel_streams_core::prelude::StreamData; -use futures::StreamExt; -use nats_publisher::utils::nats::NatsHelper; - -use super::benchmark_results::BenchmarkResult; - -pub async fn run_blocks_consumer( - nats: &NatsHelper, - limit: usize, -) -> Result<()> { - let mut result = BenchmarkResult::new( - "Blocks Consumer (Ephemeral + AckNone)".into(), - limit, - ); - - let consumer = nats - .stream_blocks - .create_consumer(PullConsumerConfig { - deliver_policy: DeliverPolicy::New, - ack_policy: AckPolicy::None, - ..Default::default() - }) - .await?; - - let mut messages = consumer.messages().await?; - while let Some(message) = messages.next().await { - let msg = message?; - match nats - .data_parser() - .decode::>(&msg.payload) - .await - { - Err(_) => result.increment_error_count(), - Ok(decoded) => { - result - .add_publish_time(decoded.ts_as_millis()) - .increment_message_count(); - if result.is_complete() { - result.finalize(); - println!("{}", result); - break; - } - } - } - } - - Ok(()) -} diff --git a/benches/bench-consumers/src/runners/runner_kv_watcher.rs b/benches/bench-consumers/src/runners/runner_kv_watcher.rs deleted file mode 100644 index a4db8d73..00000000 --- a/benches/bench-consumers/src/runners/runner_kv_watcher.rs +++ /dev/null @@ -1,40 +0,0 @@ -use anyhow::Result; -use fuel_core_types::blockchain::block::Block; -use fuel_streams_core::prelude::StreamData; -use futures::StreamExt; -use nats_publisher::utils::nats::NatsHelper; - -use super::benchmark_results::BenchmarkResult; - -#[allow(dead_code)] -pub async fn run_watch_kv_blocks( - nats: &NatsHelper, - limit: usize, -) -> Result<()> { - let mut result = - BenchmarkResult::new("KV Blocks Watcher".to_string(), limit); - let mut watch = nats.kv_blocks.watch_all().await?; - - while let Some(message) = watch.next().await { - let item = message?; - match nats - .data_parser() - .decode::>(&item.value) - .await - { - Err(_) => result.increment_error_count(), - Ok(decoded) => { - result - .add_publish_time(decoded.ts_as_millis()) - .increment_message_count(); - if result.is_complete() { - result.finalize(); - println!("{}", result); - break; - } - } - } - } - - Ok(()) -} diff --git a/benches/bench-consumers/src/runners/runner_subscription.rs b/benches/bench-consumers/src/runners/runner_subscription.rs deleted file mode 100644 index 1ec3a7f6..00000000 --- a/benches/bench-consumers/src/runners/runner_subscription.rs +++ /dev/null @@ -1,35 +0,0 @@ -use anyhow::Result; -use fuel_core_types::blockchain::block::Block; -use fuel_streams_core::prelude::StreamData; -use futures::StreamExt; -use nats_publisher::utils::nats::NatsHelper; - -use super::benchmark_results::BenchmarkResult; - -#[allow(dead_code)] -pub async fn run_subscriptions(nats: &NatsHelper, limit: usize) -> Result<()> { - let mut result = BenchmarkResult::new("Pub/Sub".to_string(), limit); - let mut subscriber = nats.client.subscribe("blocks.sub.*").await?; - while let Some(message) = subscriber.next().await { - let payload = message.payload; - match nats - .data_parser() - .decode::>(&payload) - .await - { - Err(_) => result.increment_error_count(), - Ok(decoded) => { - result - .add_publish_time(decoded.ts_as_millis()) - .increment_message_count(); - if result.is_complete() { - result.finalize(); - println!("{}", result); - break; - } - } - } - } - - Ok(()) -} diff --git a/benches/load-tester/Cargo.toml b/benches/load-tester/Cargo.toml deleted file mode 100644 index 32f93f06..00000000 --- a/benches/load-tester/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "load-tester" -authors = { workspace = true } -keywords = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -repository = { workspace = true } -version = { workspace = true } -rust-version = { workspace = true } -publish = false - -[dependencies] -anyhow = { workspace = true } -async-nats = { workspace = true } -chrono = { workspace = true } -clap = { workspace = true } -fuel-streams = { workspace = true } -fuel-streams-core = { workspace = true, features = ["bench-helpers"] } -futures = { workspace = true } -statrs = "0.18.0" -tokio = { workspace = true } diff --git a/benches/load-tester/README.md b/benches/load-tester/README.md deleted file mode 100644 index 984a28a6..00000000 --- a/benches/load-tester/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Running - -To run the load-test suite: - - ```sh - cargo run -- --network testnet --max-subscriptions 10 --step-size 1 - ``` - -Adjustments are to be applied based on the network, max-subscriptions and step-size. diff --git a/benches/load-tester/src/lib.rs b/benches/load-tester/src/lib.rs deleted file mode 100644 index 0edfafa9..00000000 --- a/benches/load-tester/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod runners; diff --git a/benches/load-tester/src/main.rs b/benches/load-tester/src/main.rs deleted file mode 100644 index b13a223c..00000000 --- a/benches/load-tester/src/main.rs +++ /dev/null @@ -1,16 +0,0 @@ -use clap::Parser; -use load_tester::runners::{cli::Cli, runner_all::LoadTesterEngine}; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let cli = Cli::parse(); - println!("Running load test ..."); - let load_tester = LoadTesterEngine::new( - cli.network, - cli.max_subscriptions, - cli.step_size, - ); - load_tester.run().await?; - println!("Finished load testing!"); - Ok(()) -} diff --git a/benches/load-tester/src/runners/cli.rs b/benches/load-tester/src/runners/cli.rs deleted file mode 100644 index 46cb07ad..00000000 --- a/benches/load-tester/src/runners/cli.rs +++ /dev/null @@ -1,33 +0,0 @@ -use clap::Parser; -use fuel_streams::types::FuelNetwork; - -#[derive(Clone, Parser)] -pub struct Cli { - /// Fuel Network to connect to. - #[arg( - long, - value_name = "NETWORK", - env = "NETWORK", - default_value = "Local", - value_parser = clap::value_parser!(FuelNetwork) - )] - pub network: FuelNetwork, - /// Maximum subscriptions for load testing - #[arg( - long, - value_name = "MAXS", - env = "MAX_SUBS", - default_value = "10", - help = "Maximum subscriptions for load testing." - )] - pub max_subscriptions: u16, - /// Maximum step size for load testing - #[arg( - long, - value_name = "SSIZE", - env = "STEP_SIZE", - default_value = "1", - help = "Maximum step size for load testing." - )] - pub step_size: u16, -} diff --git a/benches/load-tester/src/runners/mod.rs b/benches/load-tester/src/runners/mod.rs deleted file mode 100644 index cc84e6f4..00000000 --- a/benches/load-tester/src/runners/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod cli; -pub mod results; -pub mod runner_all; -pub mod runner_streamable; diff --git a/benches/load-tester/src/runners/results.rs b/benches/load-tester/src/runners/results.rs deleted file mode 100644 index fe1b56b8..00000000 --- a/benches/load-tester/src/runners/results.rs +++ /dev/null @@ -1,118 +0,0 @@ -use core::fmt; -use std::{ - sync::{ - atomic::{AtomicUsize, Ordering}, - RwLock, - }, - time::{Duration, Instant}, -}; - -use chrono::{DateTime, Utc}; -use statrs::statistics::{Data, Distribution}; - -#[derive(Debug)] -pub struct LoadTestTracker { - pub name: String, - pub message_count: AtomicUsize, - pub error_count: AtomicUsize, - start_time: Instant, - pub elapsed_time: RwLock>, - pub messages_per_second: RwLock>, - pub publish_times: RwLock>, - pub mean_publish_time: RwLock>, -} - -impl fmt::Display for LoadTestTracker { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "\n{}\nLoadTest Results: {}\n{}\nTotal Messages: {}\nTotal Errors: {}\nElapsed Time: {:?}\nMessages per Second: {:.2}\nMean Publish Time: {:?}\n{}", - "=".repeat(50), - self.name, - "=".repeat(50), - self.message_count.load(Ordering::Relaxed), - self.error_count.load(Ordering::Relaxed), - self.elapsed_time.read().unwrap().unwrap_or_default(), - self.messages_per_second.read().unwrap().unwrap_or_default(), - self.mean_publish_time.read().unwrap().unwrap_or_default(), - "=".repeat(50) - ) - } -} - -impl LoadTestTracker { - pub fn new(name: String) -> Self { - Self { - name, - message_count: AtomicUsize::new(0), - error_count: AtomicUsize::new(0), - start_time: Instant::now(), - elapsed_time: RwLock::new(None), - messages_per_second: RwLock::new(None), - publish_times: RwLock::new(vec![]), - mean_publish_time: RwLock::new(None), - } - } - - pub fn increment_message_count(&self) { - self.message_count.fetch_add(1, Ordering::Relaxed); - } - - pub fn increment_error_count(&self) { - self.error_count.fetch_add(1, Ordering::Relaxed); - } - - pub fn refresh(&self) -> &Self { - self.calculate_mean_publish_time(); - - let elapsed = self.start_time.elapsed(); - let message_count = self.message_count.load(Ordering::Relaxed); - - if let Ok(mut elapsed_time) = self.elapsed_time.write() { - *elapsed_time = Some(elapsed); - } - - if let Ok(mut messages_per_second) = self.messages_per_second.write() { - *messages_per_second = - Some(message_count as f64 / elapsed.as_secs_f64()); - } - - self - } - - pub fn add_publish_time(&self, timestamp: u128) -> &Self { - let current_time = Utc::now(); - let publish_time = - DateTime::::from_timestamp_millis(timestamp as i64) - .expect("Invalid timestamp"); - let duration = current_time - .signed_duration_since(publish_time) - .to_std() - .expect("Duration calculation failed"); - - if let Ok(mut times) = self.publish_times.write() { - times.push(duration); - } - self - } - - pub fn calculate_mean_publish_time(&self) { - // Lock the mutex to access publish_times - let times = self.publish_times.read().unwrap(); - - if times.is_empty() { - return; - } - - let times_ns: Vec = - times.iter().map(|d| d.as_nanos() as f64).collect(); - drop(times); - - let data = Data::new(times_ns); - let mean_ns = data.mean().unwrap(); - - if let Ok(mut mean_publish_time) = self.mean_publish_time.write() { - *mean_publish_time = Some(Duration::from_nanos(mean_ns as u64)); - } - } -} diff --git a/benches/load-tester/src/runners/runner_all.rs b/benches/load-tester/src/runners/runner_all.rs deleted file mode 100644 index 21ee7815..00000000 --- a/benches/load-tester/src/runners/runner_all.rs +++ /dev/null @@ -1,247 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use anyhow::Result; -use fuel_streams::client::Client; -use fuel_streams_core::prelude::*; -use tokio::task::JoinHandle; - -use super::{ - results::LoadTestTracker, - runner_streamable::run_streamable_consumer, -}; - -pub struct LoadTesterEngine { - max_subscriptions: u16, - step_size: u16, - fuel_network: FuelNetwork, -} - -impl LoadTesterEngine { - pub fn new( - fuel_network: FuelNetwork, - max_subscriptions: u16, - step_size: u16, - ) -> Self { - Self { - fuel_network, - max_subscriptions, - step_size, - } - } -} - -impl LoadTesterEngine { - pub async fn run(&self) -> Result<(), anyhow::Error> { - let client = Client::connect(self.fuel_network).await?; - let mut handles: Vec> = vec![]; - // blocks - let blocks_test_tracker = - Arc::new(LoadTestTracker::new("Blocks Consumer".into())); - let blocks_test_tracker_printer = Arc::clone(&blocks_test_tracker); - - // inputs - let inputs_test_tracker = - Arc::new(LoadTestTracker::new("Inputs Consumer".into())); - let inputs_test_tracker_printer = Arc::clone(&inputs_test_tracker); - - // txs - let txs_test_tracker = - Arc::new(LoadTestTracker::new("Txs Consumer".into())); - let txs_test_tracker_printer = Arc::clone(&txs_test_tracker); - - // receipts - let receipts_test_tracker = - Arc::new(LoadTestTracker::new("Receipts Consumer".into())); - let receipts_test_tracker_printer = Arc::clone(&receipts_test_tracker); - - // utxos - let utxos_test_tracker = - Arc::new(LoadTestTracker::new("Utxos Consumer".into())); - let utxos_test_tracker_printer = Arc::clone(&utxos_test_tracker); - - // logs - let logs_test_tracker = - Arc::new(LoadTestTracker::new("Logs Consumer".into())); - let logs_test_tracker_printer = Arc::clone(&logs_test_tracker); - - // outputs - let outputs_test_tracker = - Arc::new(LoadTestTracker::new("Outputs Consumer".into())); - let outputs_test_tracker_printer = Arc::clone(&outputs_test_tracker); - - // print regularly the tracked metrics - handles.push(tokio::spawn(async move { - loop { - // blocks - blocks_test_tracker_printer.refresh(); - println!("{}", blocks_test_tracker_printer); - - // inputs - inputs_test_tracker_printer.refresh(); - println!("{}", inputs_test_tracker_printer); - - // txs - txs_test_tracker_printer.refresh(); - println!("{}", txs_test_tracker_printer); - - // utxos - utxos_test_tracker_printer.refresh(); - println!("{}", utxos_test_tracker_printer); - - // receipts - receipts_test_tracker_printer.refresh(); - println!("{}", receipts_test_tracker_printer); - - // outputs - outputs_test_tracker_printer.refresh(); - println!("{}", outputs_test_tracker_printer); - - // logs - logs_test_tracker_printer.refresh(); - println!("{}", logs_test_tracker_printer); - - // do a short pause - tokio::time::sleep(Duration::from_secs(5)).await; - } - })); - - // Incrementally increase subscriptions - for current_subs in - (1..=self.max_subscriptions).step_by(self.step_size as usize) - { - let client = client.clone(); - let blocks_test_tracker = Arc::clone(&blocks_test_tracker); - for _ in 0..current_subs { - // blocks - { - let client = client.clone(); - let blocks_test_tracker = Arc::clone(&blocks_test_tracker); - handles.push(tokio::spawn(async move { - if let Err(e) = run_streamable_consumer::( - &client, - blocks_test_tracker, - ) - .await - { - eprintln!( - "Error in blocks subscriptions - {:?}", - e - ); - } - })); - } - // logs - { - let client = client.clone(); - let logs_test_tracker = Arc::clone(&logs_test_tracker); - handles.push(tokio::spawn(async move { - if let Err(e) = run_streamable_consumer::( - &client, - logs_test_tracker, - ) - .await - { - eprintln!("Error in logs subscriptions - {:?}", e); - } - })); - } - // inputs - { - let client = client.clone(); - let inputs_test_tracker = Arc::clone(&inputs_test_tracker); - handles.push(tokio::spawn(async move { - if let Err(e) = run_streamable_consumer::( - &client, - inputs_test_tracker, - ) - .await - { - eprintln!( - "Error in inputs subscriptions - {:?}", - e - ); - } - })); - } - // txs - { - let client = client.clone(); - let txs_test_tracker = Arc::clone(&txs_test_tracker); - handles.push(tokio::spawn(async move { - if let Err(e) = run_streamable_consumer::( - &client, - txs_test_tracker, - ) - .await - { - eprintln!("Error in txs subscriptions - {:?}", e); - } - })); - } - // outputs - { - let client = client.clone(); - let outputs_test_tracker = - Arc::clone(&outputs_test_tracker); - handles.push(tokio::spawn(async move { - if let Err(e) = run_streamable_consumer::( - &client, - outputs_test_tracker, - ) - .await - { - eprintln!( - "Error in outputs subscriptions - {:?}", - e - ); - } - })); - } - // utxos - { - let client = client.clone(); - let utxos_test_tracker = Arc::clone(&utxos_test_tracker); - handles.push(tokio::spawn(async move { - if let Err(e) = run_streamable_consumer::( - &client, - utxos_test_tracker, - ) - .await - { - eprintln!("Error in utxos subscriptions - {:?}", e); - } - })); - } - // receipts - { - let client = client.clone(); - let receipts_test_tracker = - Arc::clone(&receipts_test_tracker); - handles.push(tokio::spawn(async move { - if let Err(e) = run_streamable_consumer::( - &client, - receipts_test_tracker, - ) - .await - { - eprintln!( - "Error in receipts subscriptions - {:?}", - e - ); - } - })); - } - } - - // Small pause between test iterations - tokio::time::sleep(Duration::from_secs(5)).await; - } - - // cleanup - for handle in handles.iter() { - handle.abort(); - } - - Ok(()) - } -} diff --git a/benches/load-tester/src/runners/runner_streamable.rs b/benches/load-tester/src/runners/runner_streamable.rs deleted file mode 100644 index bda6f2db..00000000 --- a/benches/load-tester/src/runners/runner_streamable.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::sync::Arc; - -use anyhow::Result; -pub use async_nats::jetstream::consumer::DeliverPolicy; -use fuel_streams::{client::Client, StreamConfig}; -use fuel_streams_core::prelude::*; -use futures::StreamExt; - -use super::results::LoadTestTracker; - -pub async fn run_streamable_consumer( - client: &Client, - load_test_tracker: Arc, -) -> Result<()> { - // Create a new stream for blocks - let stream = fuel_streams::Stream::::new(client).await; - - // Configure the stream to start from the last published block - let config = StreamConfig { - deliver_policy: DeliverPolicy::Last, - }; - - // Subscribe to the block stream with the specified configuration - let mut sub = stream.subscribe_raw_with_config(config).await?; - - // Process incoming blocks - while let Some(bytes) = sub.next().await { - load_test_tracker.increment_message_count(); - let decoded_msg = S::decode_raw(bytes).unwrap(); - - let ts_millis = decoded_msg.ts_as_millis(); - load_test_tracker - .add_publish_time(ts_millis) - .increment_message_count(); - } - - Ok(()) -} diff --git a/benches/nats-publisher/README.md b/benches/nats-publisher/README.md deleted file mode 100644 index 1dbe8ad5..00000000 --- a/benches/nats-publisher/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# Running - -1. First make sure you have your `.env` configured properly: - - ```sh - make create-env - ``` - -2. Make sure you have NATS server running within the workspace root: - - ```sh - make start-nats - ``` - -3. The, you can start local node and start publishing on NATS: - ```sh - make run-publisher - ``` diff --git a/benches/nats-publisher/config/nats.conf b/benches/nats-publisher/config/nats.conf deleted file mode 100644 index e2b7d425..00000000 --- a/benches/nats-publisher/config/nats.conf +++ /dev/null @@ -1,5 +0,0 @@ -jetstream: { - max_mem_store: 64MiB, - max_file_store: 10GiB -} -max_payload = 8388608 diff --git a/benches/nats-publisher/src/lib.rs b/benches/nats-publisher/src/lib.rs deleted file mode 100644 index 3bed9398..00000000 --- a/benches/nats-publisher/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[allow(unused)] -pub mod utils; diff --git a/benches/nats-publisher/src/main.rs b/benches/nats-publisher/src/main.rs deleted file mode 100644 index 62050a44..00000000 --- a/benches/nats-publisher/src/main.rs +++ /dev/null @@ -1,78 +0,0 @@ -mod utils; - -use clap::Parser; -use fuel_core_importer::ports::ImporterDatabase; -use fuel_streams_core::prelude::*; -use utils::{blocks::BlockHelper, nats::NatsHelper, tx::TxHelper}; - -#[derive(Parser)] -pub struct Cli { - #[command(flatten)] - fuel_core_config: fuel_core_bin::cli::run::Command, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - fuel_core_bin::cli::init_logging(); - - let cli = Cli::parse(); - let service = - fuel_core_bin::cli::run::get_service(cli.fuel_core_config).await?; - let chain_config = service.shared.config.snapshot_reader.chain_config(); - let chain_id = chain_config.consensus_parameters.chain_id(); - let block_importer = service.shared.block_importer.block_importer.clone(); - let database = service.shared.database.clone(); - - service - .start_and_await() - .await - .expect("Fuel core service startup failed"); - - // ------------------------------------------------------------------------ - // NATS - // ------------------------------------------------------------------------ - let nats = NatsHelper::connect(true).await?; - let block_helper = BlockHelper::new(nats.to_owned(), &database); - let tx_helper = TxHelper::new(nats.to_owned(), &chain_id, &database); - - // ------------------------------------------------------------------------ - // OLD BLOCKS - // ------------------------------------------------------------------------ - tokio::task::spawn({ - let database = database.clone(); - let block_helper = block_helper.clone(); - let _tx_helper = tx_helper.clone(); - let last_height = database.on_chain().latest_block_height()?.unwrap(); - async move { - for height in 0..*last_height { - let height = height.into(); - let block = block_helper.find_by_height(height); - let block = - Block::new(&block, Consensus::default(), Vec::new()); - - block_helper.publish(&block).await?; - // for (index, tx) in block.transactions().iter().enumerate() { - // tx_helper.publish(&block, tx, index).await?; - // } - } - Ok::<(), async_nats::Error>(()) - } - }); - - // ------------------------------------------------------------------------ - // NEW BLOCKS - // ------------------------------------------------------------------------ - let mut subscription = block_importer.subscribe(); - while let Ok(result) = subscription.recv().await { - let result = &**result; - let block = &result.sealed_block.entity; - let block = Block::new(block, Consensus::default(), Vec::new()); - - block_helper.publish(&block).await?; - // for (index, tx) in block.transactions().iter().enumerate() { - // tx_helper.publish(block, tx, index).await?; - // } - } - - Ok(()) -} diff --git a/benches/nats-publisher/src/utils/blocks.rs b/benches/nats-publisher/src/utils/blocks.rs deleted file mode 100644 index 3f192f7c..00000000 --- a/benches/nats-publisher/src/utils/blocks.rs +++ /dev/null @@ -1,93 +0,0 @@ -use async_nats::jetstream::context::Publish; -use fuel_core::combined_database::CombinedDatabase; -use fuel_core_storage::transactional::AtomicView; -use fuel_streams_core::prelude::*; -use tokio::try_join; -use tracing::info; - -use super::nats::NatsHelper; - -#[derive(Clone)] -pub struct BlockHelper { - nats: NatsHelper, - database: CombinedDatabase, -} - -impl BlockHelper { - pub fn new(nats: NatsHelper, database: &CombinedDatabase) -> Self { - Self { - nats, - database: database.to_owned(), - } - } - - pub fn find_by_height(&self, height: FuelCoreBlockHeight) -> FuelCoreBlock { - self.database - .on_chain() - .latest_view() - .unwrap() - .get_sealed_block_by_height(&height) - .unwrap() - .unwrap_or_else(|| { - panic!("NATS Publisher: no block at height {height}") - }) - .entity - } - - pub async fn publish(&self, block: &Block) -> anyhow::Result<()> { - try_join!( - self.publish_core(block), - self.publish_encoded(block), - self.publish_to_kv(block) - )?; - Ok(()) - } -} - -/// Publisher -impl BlockHelper { - async fn publish_core(&self, block: &Block) -> anyhow::Result<()> { - let subject: BlocksSubject = block.into(); - let payload = self.nats.data_parser().encode(block).await?; - self.nats - .context - .publish(subject.parse(), payload.into()) - .await?; - - Ok(()) - } - async fn publish_encoded(&self, block: &Block) -> anyhow::Result<()> { - let height = block.height; - let subject: BlocksSubject = block.into(); - let payload = self.nats.data_parser().encode(block).await?; - let nats_payload = Publish::build() - .message_id(subject.parse()) - .payload(payload.into()); - - self.nats - .context - .send_publish(subject.parse(), nats_payload) - .await? - .await?; - - info!( - "NATS: publishing block {} encoded to stream \"blocks_encoded\"", - height - ); - Ok(()) - } - - async fn publish_to_kv(&self, block: &Block) -> anyhow::Result<()> { - let height = block.height; - let subject: BlocksSubject = block.into(); - - let payload = self.nats.data_parser().encode(block).await?; - self.nats - .kv_blocks - .put(subject.parse(), payload.into()) - .await?; - - info!("NATS: publishing block {} to kv store \"blocks\"", height); - Ok(()) - } -} diff --git a/benches/nats-publisher/src/utils/mod.rs b/benches/nats-publisher/src/utils/mod.rs deleted file mode 100644 index 642417eb..00000000 --- a/benches/nats-publisher/src/utils/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod blocks; -pub mod nats; -pub mod tx; diff --git a/benches/nats-publisher/src/utils/nats.rs b/benches/nats-publisher/src/utils/nats.rs deleted file mode 100644 index 2a6e3ad6..00000000 --- a/benches/nats-publisher/src/utils/nats.rs +++ /dev/null @@ -1,130 +0,0 @@ -use async_nats::{ - jetstream::{ - kv::{self, Store}, - stream::{self, Compression, Stream}, - Context, - }, - ConnectOptions, -}; -use fuel_data_parser::DataParser; -use fuel_streams_core::prelude::FuelNetwork; - -#[allow(dead_code)] -#[derive(Clone)] -pub struct NatsHelper { - pub client: async_nats::Client, - pub kv_blocks: Store, - pub kv_transactions: Store, - pub context: Context, - pub stream_blocks: Stream, - pub stream_transactions: Stream, - pub use_nats_compression: bool, - pub data_parser: DataParser, -} - -impl NatsHelper { - pub async fn connect(use_nats_compression: bool) -> anyhow::Result { - let client = connect().await?; - let ( - context, - kv_blocks, - kv_transactions, - stream_blocks, - stream_transactions, - ) = create_resources(&client, use_nats_compression).await?; - // adjust as needed - let data_parser = DataParser::default(); - - Ok(Self { - client, - context, - kv_blocks, - kv_transactions, - stream_blocks, - stream_transactions, - use_nats_compression, - data_parser, - }) - } - - #[allow(dead_code)] - pub fn data_parser(&self) -> &DataParser { - &self.data_parser - } - - #[allow(dead_code)] - pub fn data_parser_mut(&mut self) -> &mut DataParser { - &mut self.data_parser - } -} - -pub async fn connect() -> anyhow::Result { - Ok(ConnectOptions::new() - .user_and_password("admin".into(), "secret".into()) - .connect(FuelNetwork::Local.to_nats_url()) - .await?) -} - -async fn create_resources( - client: &async_nats::Client, - use_nats_compression: bool, -) -> anyhow::Result<(Context, Store, Store, Stream, Stream)> { - let jetstream = async_nats::jetstream::new(client.clone()); - - // ------------------------------------------------------------------------ - // BLOCKS - // ------------------------------------------------------------------------ - let stream_blocks = jetstream - .get_or_create_stream(stream::Config { - name: "blocks_encoded".into(), - subjects: vec!["blocks.>".into()], - compression: if use_nats_compression { - Some(Compression::S2) - } else { - None - }, - ..Default::default() - }) - .await?; - - // TRANSACTIONS - // ------------------------------------------------------------------------ - let stream_transactions = jetstream - .get_or_create_stream(stream::Config { - name: "transactions_encoded".into(), - subjects: vec!["transactions.>".into()], - compression: if use_nats_compression { - Some(Compression::S2) - } else { - None - }, - ..Default::default() - }) - .await?; - - // KV STORE - // ------------------------------------------------------------------------ - let kv_blocks = jetstream - .create_key_value(kv::Config { - compression: use_nats_compression, - bucket: "blocks".into(), - ..Default::default() - }) - .await?; - - let kv_transactions = jetstream - .create_key_value(kv::Config { - compression: use_nats_compression, - bucket: "transactions".into(), - ..Default::default() - }) - .await?; - - Ok(( - jetstream, - kv_blocks, - kv_transactions, - stream_blocks, - stream_transactions, - )) -} diff --git a/benches/nats-publisher/src/utils/tx.rs b/benches/nats-publisher/src/utils/tx.rs deleted file mode 100644 index 49da5f26..00000000 --- a/benches/nats-publisher/src/utils/tx.rs +++ /dev/null @@ -1,129 +0,0 @@ -use async_nats::jetstream::context::Publish; -use fuel_core::combined_database::CombinedDatabase; -use fuel_core_types::fuel_types::ChainId; -use fuel_streams_core::prelude::*; -use tokio::try_join; -use tracing::info; - -use super::nats::NatsHelper; - -#[allow(unused)] -#[derive(Clone)] -pub struct TxHelper { - nats: NatsHelper, - chain_id: ChainId, - database: CombinedDatabase, -} - -#[allow(unused)] -/// Public -impl TxHelper { - pub fn new( - nats: NatsHelper, - chain_id: &ChainId, - database: &CombinedDatabase, - ) -> Self { - Self { - nats, - chain_id: chain_id.to_owned(), - database: database.to_owned(), - } - } - - pub async fn publish( - &self, - block: &Block, - tx: &Transaction, - index: usize, - ) -> anyhow::Result<()> { - try_join!( - self.publish_core(block, tx, index), - self.publish_encoded(block, tx, index), - self.publish_to_kv(block, tx, index) - )?; - Ok(()) - } -} - -/// Publishers -impl TxHelper { - async fn publish_core( - &self, - block: &Block, - tx: &Transaction, - index: usize, - ) -> anyhow::Result<()> { - let subject = &self.get_subject(tx, block, index); - let payload = self.nats.data_parser().encode(block).await?; - self.nats - .context - .publish(subject.parse(), payload.into()) - .await?; - Ok(()) - } - - async fn publish_encoded( - &self, - block: &Block, - tx: &Transaction, - index: usize, - ) -> anyhow::Result<()> { - let tx_id = &tx.id; - let subject = self.get_subject(tx, block, index); - let payload = self.nats.data_parser().encode(block).await?; - let nats_payload = Publish::build() - .message_id(subject.parse()) - .payload(payload.into()); - - self.nats - .context - .send_publish(subject.parse(), nats_payload) - .await? - .await?; - - info!( - "NATS: publishing transaction {} json to stream \"transactions_encoded\"", - tx_id - ); - Ok(()) - } - - async fn publish_to_kv( - &self, - block: &Block, - tx: &Transaction, - index: usize, - ) -> anyhow::Result<()> { - let tx_id = &tx.id; - let subject = self.get_subject(tx, block, index); - let payload = self.nats.data_parser().encode(block).await?; - self.nats - .kv_transactions - .put(subject.parse(), payload.into()) - .await?; - - info!( - "NATS: publishing transaction {} to kv store \"transactions\"", - tx_id - ); - Ok(()) - } -} - -/// Getters -impl TxHelper { - fn get_subject( - &self, - tx: &Transaction, - block: &Block, - index: usize, - ) -> TransactionsSubject { - // construct tx subject - let mut subject: TransactionsSubject = tx.into(); - subject = subject - .with_index(Some(index)) - .with_block_height(Some(BlockHeight::from(block.height))) - .with_status(Some(tx.status.clone())); - subject - } -} diff --git a/cluster/README.md b/cluster/README.md index 389ef86f..835541bd 100755 --- a/cluster/README.md +++ b/cluster/README.md @@ -10,15 +10,15 @@ The latter is intended for local development, but it also allows us to deploy th The following are prerequisites for spinning up the fuel-data-systems cluster locally: -- kubectl - `https://www.howtoforge.com/how-to-install-kubernetes-with-minikube-ubuntu-20-04/` +- kubectl + `https://www.howtoforge.com/how-to-install-kubernetes-with-minikube-ubuntu-20-04/` -- Tilt: - `https://docs.tilt.dev/install.html` +- Tilt: + `https://docs.tilt.dev/install.html` -- minikube based on the following description: - `https://phoenixnap.com/kb/install-minikube-on-ubuntu` - `https://minikube.sigs.k8s.io/docs/start/` +- minikube based on the following description: + `https://phoenixnap.com/kb/install-minikube-on-ubuntu` + `https://minikube.sigs.k8s.io/docs/start/` ...or alternatively use this tool which will automatically set up your cluster: `https://github.com/tilt-dev/ctlptl##minikube-with-a-built-in-registry` @@ -26,32 +26,36 @@ The following are prerequisites for spinning up the fuel-data-systems cluster lo ## Setup 1. To setup and start the local environment, run: - ```bash - make cluster-setup # Sets up both minikube and kubernetes configuration - ``` - Alternatively, you can run the setup steps individually: - ```bash - make minikube-setup # Sets up minikube with required addons - make k8s-setup # Configures kubernetes with proper namespace and context - ``` + ```bash + make cluster-setup # Sets up both minikube and kubernetes configuration + ``` - You can also start the minikube cluster without running the setup script: - ```bash - make minikube-start # Start minikube cluster - ``` + Alternatively, you can run the setup steps individually: + + ```bash + make minikube-setup # Sets up minikube with required addons + make k8s-setup # Configures kubernetes with proper namespace and context + ``` + + You can also start the minikube cluster without running the setup script: + + ```bash + make minikube-start # Start minikube cluster + ``` 2. Start the Tilt services: - ```bash - make cluster-up # Starts Tiltfile services - ``` + ```bash + make cluster-up # Starts Tiltfile services + ``` You can use the following commands to manage the services: + ```bash -make cluster-up # Start services -make cluster-down # Stop services -make cluster-reset # Reset services -make minikube-start # Start minikube (if you've already run setup before) +make cluster-up # Start services +make cluster-down # Stop services +make cluster-reset # Reset services +make minikube-start # Start minikube (if you've already run setup before) ``` ## Using `k9s` for an interactive terminal UI @@ -62,9 +66,9 @@ Run it with `k9s --context= --namespace=.yaml`. -- Tilt [tutorial](https://docs.tilt.dev/tutorial.html) +- How [kubernetes works](https://www.youtube.com/watch?v=ZuIQurh_kDk) +- Kubernetes [concepts](https://kubernetes.io/docs/concepts/) +- Kubectl [overview](https://kubernetes.io/docs/reference/kubectl/overview/) +- Kubectl [cheat sheet](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) +- Helm [chart tutorial](https://docs.bitnami.com/kubernetes/how-to/create-your-first-helm-chart/), then examine the helm charts in this repository, and the values yaml files that are used to template them. The defults values are in the charts themselves as `values.yaml`, and the values for specific configurations are at `values/.yaml`. +- Tilt [tutorial](https://docs.tilt.dev/tutorial.html) diff --git a/cluster/charts/fuel-streams-publisher/.helmignore b/cluster/charts/fuel-streams-publisher/.helmignore deleted file mode 100644 index 0e8a0eb3..00000000 --- a/cluster/charts/fuel-streams-publisher/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*.orig -*~ -# Various IDEs -.project -.idea/ -*.tmproj -.vscode/ diff --git a/cluster/charts/fuel-streams-publisher/Chart.yaml b/cluster/charts/fuel-streams-publisher/Chart.yaml deleted file mode 100644 index 1628fc80..00000000 --- a/cluster/charts/fuel-streams-publisher/Chart.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v2 -name: fuel-streams-publisher -description: A Helm chart for Kubernetes deployment of Fuel streams publisher service -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.4.7 -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: 0.2.0 diff --git a/cluster/charts/fuel-streams-publisher/templates/_helpers.tpl b/cluster/charts/fuel-streams-publisher/templates/_helpers.tpl deleted file mode 100644 index 0bc5f455..00000000 --- a/cluster/charts/fuel-streams-publisher/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "fuel-streams-publisher.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "fuel-streams-publisher.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "fuel-streams-publisher.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "fuel-streams-publisher.labels" -}} -helm.sh/chart: {{ include "fuel-streams-publisher.chart" . }} -{{ include "fuel-streams-publisher.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "fuel-streams-publisher.selectorLabels" -}} -app.kubernetes.io/name: {{ include "fuel-streams-publisher.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "fuel-streams-publisher.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "fuel-streams-publisher.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/cluster/charts/fuel-streams-publisher/templates/hpa.yaml b/cluster/charts/fuel-streams-publisher/templates/hpa.yaml deleted file mode 100644 index b2759de2..00000000 --- a/cluster/charts/fuel-streams-publisher/templates/hpa.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "fuel-streams-publisher.fullname" . }} - labels: - {{- include "fuel-streams-publisher.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "fuel-streams-publisher.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} \ No newline at end of file diff --git a/cluster/charts/fuel-streams-publisher/templates/service.yaml b/cluster/charts/fuel-streams-publisher/templates/service.yaml deleted file mode 100644 index 49c7f6fe..00000000 --- a/cluster/charts/fuel-streams-publisher/templates/service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service - -metadata: - name: {{ include "fuel-streams-publisher.fullname" . }} - labels: - {{- include "fuel-streams-publisher.labels" . | nindent 4 }} - -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "fuel-streams-publisher.selectorLabels" . | nindent 4 }} diff --git a/cluster/charts/fuel-streams-publisher/templates/serviceaccount.yaml b/cluster/charts/fuel-streams-publisher/templates/serviceaccount.yaml deleted file mode 100644 index b3c456bb..00000000 --- a/cluster/charts/fuel-streams-publisher/templates/serviceaccount.yaml +++ /dev/null @@ -1,13 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "fuel-streams-publisher.serviceAccountName" . }} - labels: - {{- include "fuel-streams-publisher.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -automountServiceAccountToken: {{ .Values.serviceAccount.automount }} -{{- end }} diff --git a/cluster/charts/fuel-streams-publisher/templates/statefulset.yaml b/cluster/charts/fuel-streams-publisher/templates/statefulset.yaml deleted file mode 100644 index b0d6b039..00000000 --- a/cluster/charts/fuel-streams-publisher/templates/statefulset.yaml +++ /dev/null @@ -1,147 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "fuel-streams-publisher.fullname" . }} - labels: - {{- include "fuel-streams-publisher.labels" . | nindent 4 }} -spec: - # Define the headless service that governs this StatefulSet - serviceName: {{ include "fuel-streams-publisher.fullname" . | quote }} - # Handle replica count unless autoscaling is enabled - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.config.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "fuel-streams-publisher.selectorLabels" . | nindent 6 }} - template: - metadata: - annotations: - # Prometheus scraping configuration - {{- if .Values.prometheus.enabled }} - prometheus.io/scrape: {{ .Values.prometheus.scrape | quote }} - prometheus.io/port: {{ .Values.service.port | quote }} - prometheus.io/path: {{ .Values.prometheus.path | quote }} - {{- end }} - # Add checksums to force pod restart when configs change - {{/* checksum/config: {{ include (print $.Template.BasePath "/env-configmap.yaml") . | sha256sum }} */}} - {{/* checksum/secrets: {{ include (print $.Template.BasePath "/env-secrets.yaml") . | sha256sum }} */}} - {{- with .Values.config.annotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "fuel-streams-publisher.labels" . | nindent 8 }} - {{- with .Values.config.labels }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.config.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "fuel-streams-publisher.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.config.podSecurityContext | nindent 8 }} - # Initialize persistent volumes with correct permissions - {{- if .Values.persistence.enabled }} - initContainers: - - name: {{ .Values.persistence.data.containerName }} - image: alpine:latest - imagePullPolicy: IfNotPresent - command: ["/bin/chown"] - args: ["-R", "1000:1000", "{{ .Values.persistence.data.mountPath }}"] - volumeMounts: - - name: {{ .Values.persistence.data.name }} - mountPath: {{ .Values.persistence.data.mountPath }} - - name: {{ .Values.persistence.temp.containerName }} - image: alpine:latest - imagePullPolicy: IfNotPresent - command: ["/bin/chown"] - args: ["-R", "1000:1000", "{{ .Values.persistence.temp.mountPath }}"] - volumeMounts: - - name: {{ .Values.persistence.temp.name }} - mountPath: {{ .Values.persistence.temp.mountPath }} - {{- end }} - # Main application container - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - # Define container ports for application and metrics - ports: - - name: http - containerPort: {{ int .Values.service.port }} - protocol: TCP - # Health check probes - livenessProbe: - {{- toYaml .Values.livenessProbe | nindent 12 }} - readinessProbe: - {{- toYaml .Values.readinessProbe | nindent 12 }} - resources: - {{- toYaml .Values.config.resources | nindent 12 }} - env: - {{- range $key, $value := .Values.env }} - - name: {{ $key }} - value: {{ $value | quote }} - {{- end }} - {{- if .Values.extraEnv }} - {{- toYaml .Values.extraEnv | nindent 12 }} - {{- end }} - envFrom: - - configMapRef: - name: {{ include "fuel-streams-publisher.fullname" . }} - optional: true - - secretRef: - name: {{ include "fuel-streams-publisher.fullname" . }} - optional: true - {{- if .Values.envFrom }} - {{- toYaml .Values.envFrom | nindent 12 }} - {{- end }} - - # Mount persistent volumes if enabled - {{- if .Values.persistence.enabled }} - volumeMounts: - - name: {{ .Values.persistence.data.name }} - mountPath: {{ .Values.persistence.data.mountPath }} - readOnly: false - - name: {{ .Values.persistence.temp.name }} - mountPath: {{ .Values.persistence.temp.mountPath }} - readOnly: false - {{- end }} - # Node assignment rules - {{- with .Values.config.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.config.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - # Persistent volume claims configuration - {{- if .Values.persistence.enabled }} - volumeClaimTemplates: - - metadata: - name: {{ .Values.persistence.data.name }} - spec: - accessModes: - - {{ .Values.persistence.data.accessMode }} - storageClassName: {{ .Values.persistence.data.storageClass }} - resources: - requests: - storage: {{ .Values.persistence.data.size }} - - metadata: - name: {{ .Values.persistence.temp.name }} - spec: - accessModes: - - {{ .Values.persistence.temp.accessMode }} - storageClassName: {{ .Values.persistence.temp.storageClass }} - resources: - requests: - storage: {{ .Values.persistence.temp.size }} - {{- end }} diff --git a/cluster/charts/fuel-streams-publisher/values.yaml b/cluster/charts/fuel-streams-publisher/values.yaml deleted file mode 100644 index 91ca6eef..00000000 --- a/cluster/charts/fuel-streams-publisher/values.yaml +++ /dev/null @@ -1,124 +0,0 @@ -# Default values for fuel-streams-publisher -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# These are custom resource definitions that can be overridden by the user -# nameOverride: "" -# fullnameOverride: "" - -# general configurations -config: - replicaCount: 1 - imagePullSecrets: [] - annotations: {} - labels: {} - nodeSelector: {} - tolerations: [] - affinity: {} - resources: {} - podSecurityContext: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # resources: - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - -image: - repository: ghcr.io/fuellabs/fuel-streams-publisher - pullPolicy: Always - tag: "latest" - -serviceAccount: - create: true - automount: true - # annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - -service: - type: ClusterIP - port: 8080 - -prometheus: - enabled: true - scrape: true - path: /metrics - -securityContext: - capabilities: - drop: [ALL] - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1000 - -livenessProbe: {} -readinessProbe: {} - -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 3 - targetCPUUtilizationPercentage: 80 - targetMemoryUtilizationPercentage: 80 - -persistence: - enabled: true - data: - name: rocks-db-vol - containerName: update-rocks-db-vol - mountPath: /mnt/db - size: 500Gi - storageClass: gp3-generic - accessMode: ReadWriteOnce - temp: - name: tmp-vol - containerName: update-tmp-vol - mountPath: /tmp - size: 5Gi - storageClass: gp3-generic - accessMode: ReadWriteOnce - -# Additional environment variables with complex structures -# extraEnv: -# - name: RELAYER -# valueFrom: -# secretKeyRef: -# name: fuel-streams-publisher -# key: RELAYER -# - name: KEYPAIR -# valueFrom: -# secretKeyRef: -# name: fuel-streams-publisher -# key: KEYPAIR -# - name: NATS_ADMIN_PASS -# valueFrom: -# secretKeyRef: -# name: fuel-streams-publisher -# key: NATS_ADMIN_PASS -# Optional: Bulk environment references -# envFrom: -# - configMapRef: -# name: additional-config -# - secretRef: -# name: additional-secrets - -env: - RELAYER_V2_LISTENING_CONTRACTS: "0xAEB0c00D0125A8a788956ade4f4F12Ead9f65DDf" - RELAYER_DA_DEPLOY_HEIGHT: "20620434" - RELAYER_LOG_PAGE_SIZE: "2000" - SYNC_HEADER_BATCH_SIZE: "100" - P2P_PORT: "30333" - RESERVED_NODES: "/dnsaddr/mainnet.fuel.network" - CHAIN_CONFIG: "mainnet" - PUBLISHER_MAX_THREADS: "32" - DB_PATH: "/mnt/db/" - POA_INSTANT: "false" - SERVICE_NAME: "NATS Publisher Node" - NATS_URL: "nats:4222" diff --git a/cluster/charts/fuel-streams/Chart.lock b/cluster/charts/fuel-streams/Chart.lock index 44606f01..3b1e4e2c 100644 --- a/cluster/charts/fuel-streams/Chart.lock +++ b/cluster/charts/fuel-streams/Chart.lock @@ -1,12 +1,12 @@ dependencies: - name: nats repository: https://nats-io.github.io/k8s/helm/charts/ - version: 1.2.6 + version: 1.2.8 - name: nats repository: https://nats-io.github.io/k8s/helm/charts/ - version: 1.2.6 + version: 1.2.8 - name: nats repository: https://nats-io.github.io/k8s/helm/charts/ - version: 1.2.6 -digest: sha256:ad7948ef2413ea2f9af239829570612f04abd624844413ad73600fd67ce2e9b6 -generated: "2024-12-08T20:11:41.144678-03:00" + version: 1.2.8 +digest: sha256:a5f3dd64e1a20f7c9d58894359f6f909f33d14772355ee70033fd411219bcc7e +generated: "2024-12-18T16:59:13.903435-03:00" diff --git a/cluster/charts/fuel-streams/Chart.yaml b/cluster/charts/fuel-streams/Chart.yaml index 398418cc..228de229 100755 --- a/cluster/charts/fuel-streams/Chart.yaml +++ b/cluster/charts/fuel-streams/Chart.yaml @@ -2,20 +2,15 @@ apiVersion: v2 appVersion: "1.0" description: A Helm chart for Kubernetes name: fuel-streams -version: 0.1.3 +version: 0.7.2 dependencies: - name: nats - version: 1.2.6 + version: 1.2.8 repository: https://nats-io.github.io/k8s/helm/charts/ alias: nats-core condition: nats-core.enabled - name: nats - version: 1.2.6 + version: 1.2.8 repository: https://nats-io.github.io/k8s/helm/charts/ alias: nats-publisher condition: nats-publisher.enabled - - name: nats - version: 1.2.6 - repository: https://nats-io.github.io/k8s/helm/charts/ - alias: nats-client - condition: nats-client.enabled diff --git a/cluster/charts/fuel-streams/templates/_blocks.tpl b/cluster/charts/fuel-streams/templates/_blocks.tpl index fe70c729..580c16e6 100644 --- a/cluster/charts/fuel-streams/templates/_blocks.tpl +++ b/cluster/charts/fuel-streams/templates/_blocks.tpl @@ -57,4 +57,56 @@ readinessProbe: startupProbe: {{- include "merge" (dict "context" .context "service" .service "defaultKey" "startupProbe" "path" "config.startupProbe") | nindent 2 }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} + +{{/* +Configure nats accounts +*/}} +{{- define "nats-accounts" -}} +data: + auth.conf: | + accounts { + SYS: { + users: [ + {user: $NATS_SYSTEM_USER, password: $NATS_SYSTEM_PASS} + ] + } + ADMIN: { + jetstream: enabled + users: [ + {user: $NATS_ADMIN_USER, password: $NATS_ADMIN_PASS} + ] + } + PUBLIC: { + jetstream: enabled + users: [ + { + user: $NATS_PUBLIC_USER + password: $NATS_PUBLIC_PASS + permissions: { + subscribe: ">" + publish: { + deny: [ + "*.by_id.>" + "*.blocks.>" + "*.transactions.>" + "*.inputs.>" + "*.outputs.>" + "*.receipts.>" + "*.logs.>" + "*.utxos.>" + "$JS.API.STREAM.CREATE.>" + "$JS.API.STREAM.UPDATE.>" + "$JS.API.STREAM.DELETE.>" + "$JS.API.STREAM.PURGE.>" + "$JS.API.STREAM.RESTORE.>" + "$JS.API.STREAM.MSG.DELETE.>" + "$JS.API.CONSUMER.DURABLE.CREATE.>" + ] + } + } + } + ] + } + } +{{- end }} diff --git a/cluster/charts/fuel-streams/templates/_helpers.tpl b/cluster/charts/fuel-streams/templates/_helpers.tpl index 4344d0aa..e3de60d9 100644 --- a/cluster/charts/fuel-streams/templates/_helpers.tpl +++ b/cluster/charts/fuel-streams/templates/_helpers.tpl @@ -3,6 +3,16 @@ Expand the name of the chart. If nameOverride is provided in Values.config, use that instead of .Chart.Name. The result is truncated to 63 chars and has any trailing "-" removed to comply with Kubernetes naming rules. Returns: String - The chart name, truncated and cleaned +Example: + Given: + .Chart.Name = "fuel-streams" + .Values.config.nameOverride = "custom-name" + Result: "custom-name" + + Given: + .Chart.Name = "fuel-streams" + .Values.config.nameOverride = null + Result: "fuel-streams" */}} {{- define "fuel-streams.name" -}} {{- default .Chart.Name .Values.config.nameOverride | trunc 63 | trimSuffix "-" }} @@ -17,6 +27,24 @@ This template follows these rules: - If not, concatenate release name and chart name with a hyphen The result is truncated to 63 chars and has any trailing "-" removed to comply with Kubernetes naming rules. Returns: String - The fully qualified app name, truncated and cleaned +Example: + Given: + .Values.config.fullnameOverride = "override-name" + Result: "override-name" + + Given: + .Release.Name = "my-release" + .Chart.Name = "fuel-streams" + .Values.config.nameOverride = null + .Values.config.fullnameOverride = null + Result: "my-release-fuel-streams" + + Given: + .Release.Name = "fuel-streams-prod" + .Chart.Name = "fuel-streams" + .Values.config.nameOverride = null + .Values.config.fullnameOverride = null + Result: "fuel-streams-prod" */}} {{- define "fuel-streams.fullname" -}} {{- if .Values.config.fullnameOverride }} @@ -49,26 +77,43 @@ Includes: - Selector labels (app name and instance) - App version (if defined) - Managed-by label indicating Helm management +Parameters: + - name: Optional custom name to use instead of the default name + - .: Full context (passed automatically or as "context") Returns: Map - A set of key-value pairs representing Kubernetes labels +Example: + {{- include "fuel-streams.labels" . }} + # Or with custom name: + {{- include "fuel-streams.labels" (dict "name" "custom-name" "context" $) }} */}} {{- define "fuel-streams.labels" -}} -helm.sh/chart: {{ include "fuel-streams.chart" . }} -{{ include "fuel-streams.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- $context := default . .context -}} +helm.sh/chart: {{ include "fuel-streams.chart" $context }} +{{ include "fuel-streams.selectorLabels" (dict "name" .name "context" $context) }} +{{- if $context.Chart.AppVersion }} +app.kubernetes.io/version: {{ $context.Chart.AppVersion | quote }} {{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/managed-by: {{ $context.Release.Service }} {{- end }} {{/* Selector labels Core identifying labels used for object selection and service discovery. These labels should be used consistently across all related resources. +Parameters: + - name: Optional custom name to use instead of the default name + - .: Full context (passed automatically or as "context") Returns: Map - A set of key-value pairs for Kubernetes selector labels +Example: + {{- include "fuel-streams.selectorLabels" . }} + # Or with custom name: + {{- include "fuel-streams.selectorLabels" (dict "name" "custom-name" "context" $) }} */}} {{- define "fuel-streams.selectorLabels" -}} -app.kubernetes.io/name: {{ include "fuel-streams.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} +{{- $context := default . .context -}} +{{- $name := default (include "fuel-streams.name" $context) .name -}} +app.kubernetes.io/name: {{ $name }} +app.kubernetes.io/instance: {{ $context.Release.Name }} {{- end }} {{/* @@ -172,4 +217,4 @@ Returns: Value if it exists and is not empty {{- if and $value (not (empty $value)) (not (eq (kindOf $value) "invalid")) }} {{- toYaml $value | nindent 0 }} {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/cluster/charts/fuel-streams/templates/_hpa.yaml b/cluster/charts/fuel-streams/templates/_hpa.yaml new file mode 100644 index 00000000..2f539b35 --- /dev/null +++ b/cluster/charts/fuel-streams/templates/_hpa.yaml @@ -0,0 +1,55 @@ +{{- define "k8s.hpa" -}} +{{- $service := .service -}} +{{- $context := .context -}} +{{- $autoscaling := $service.autoscaling -}} +{{- if $autoscaling.enabled }} +--- +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + {{- include "k8s.metadata" (dict "context" $context "suffix" (printf "-%s" $service.name)) | nindent 2 }} + labels: + {{- include "fuel-streams.labels" (dict "name" $service.name "context" $context) | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "fuel-streams.fullname" $context }}-{{ $service.name }} + minReplicas: {{ $autoscaling.minReplicas }} + maxReplicas: {{ $autoscaling.maxReplicas }} + behavior: + scaleDown: + stabilizationWindowSeconds: {{ $autoscaling.behavior.scaleDown.stabilizationWindowSeconds | default 300 }} + policies: + - type: Percent + value: {{ $autoscaling.behavior.scaleDown.percentValue | default 100 }} + periodSeconds: {{ $autoscaling.behavior.scaleDown.periodSeconds | default 15 }} + scaleUp: + stabilizationWindowSeconds: {{ $autoscaling.behavior.scaleUp.stabilizationWindowSeconds | default 0 }} + policies: + - type: Percent + value: {{ $autoscaling.behavior.scaleUp.percentValue | default 100 }} + periodSeconds: {{ $autoscaling.behavior.scaleUp.periodSeconds | default 15 }} + - type: Pods + value: {{ $autoscaling.behavior.scaleUp.podValue | default 4 }} + periodSeconds: {{ $autoscaling.behavior.scaleUp.periodSeconds | default 15 }} + selectPolicy: Max + metrics: + {{- if $autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ $autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if $autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ $autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/cluster/charts/fuel-streams/templates/certificate.yaml b/cluster/charts/fuel-streams/templates/certificate.yaml deleted file mode 100644 index 971e1070..00000000 --- a/cluster/charts/fuel-streams/templates/certificate.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- $tls := .Values.tls }} -{{- $externalService := .Values.externalService }} -{{- if and $tls.enabled $externalService.dns }} -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - {{- include "k8s.metadata" (dict "context" . "suffix" "-cert-validator") | nindent 2 }} - labels: - {{- include "fuel-streams.labels" . | nindent 4 }} - {{- include "set-value" (dict "context" $tls "path" "labels") | nindent 4 }} - app.kubernetes.io/service: external-service - annotations: - cert-manager.io/cluster-issuer: {{ $tls.issuer }} - kubernetes.io/ingress.class: nginx - acme.cert-manager.io/http01-ingress-class: nginx - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/force-ssl-redirect: "false" - {{- include "set-value" (dict "context" $tls "path" "annotations") | nindent 4 }} -spec: - ingressClassName: nginx - rules: - - host: {{ $externalService.dns }} - http: - paths: - - path: /.well-known/acme-challenge/ - pathType: Prefix - backend: - service: - name: cm-acme-http-solver - port: - number: 8089 ---- -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - {{- include "k8s.metadata" (dict "context" . "suffix" "-ws-cert") | nindent 2 }} - labels: - {{- include "fuel-streams.labels" . | nindent 4 }} - {{- include "set-value" (dict "context" $tls "path" "labels") | nindent 4 }} - app.kubernetes.io/service: external-service - annotations: - {{- include "set-value" (dict "context" $tls "path" "annotations") | nindent 4 }} -spec: - secretName: {{ include "fuel-streams.fullname" . }}-ws-tls - duration: {{ $tls.duration }} - renewBefore: {{ $tls.renewBefore }} - dnsNames: - - {{ $externalService.dns }} - issuerRef: - name: {{ $tls.issuer }} - kind: ClusterIssuer -{{- end }} diff --git a/cluster/charts/fuel-streams/templates/common-config.yaml b/cluster/charts/fuel-streams/templates/common-config.yaml new file mode 100644 index 00000000..0ffe75dd --- /dev/null +++ b/cluster/charts/fuel-streams/templates/common-config.yaml @@ -0,0 +1,21 @@ +{{- if .Values.commonConfigMap.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: fuel-streams-config + labels: + app.kubernetes.io/instance: fuel-streams +data: + {{ .Values.commonConfigMap.data | toYaml | nindent 2 }} +{{- end }} +{{- if .Values.localSecrets.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: fuel-streams-keys + labels: + app.kubernetes.io/instance: fuel-streams +stringData: + {{ .Values.localSecrets.data | toYaml | nindent 2 }} +{{- end }} diff --git a/cluster/charts/fuel-streams/templates/consumer/statefulset.yaml b/cluster/charts/fuel-streams/templates/consumer/statefulset.yaml new file mode 100644 index 00000000..a1f73522 --- /dev/null +++ b/cluster/charts/fuel-streams/templates/consumer/statefulset.yaml @@ -0,0 +1,84 @@ +{{- $consumer := .Values.consumer -}} +{{- if $consumer.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + {{- include "k8s.metadata" (dict "context" . "suffix" "-consumer") | nindent 2 }} + annotations: + {{- include "set-value" (dict "context" $consumer "path" "config.annotations") | nindent 4 }} + labels: + {{- include "fuel-streams.labels" (dict "name" "consumer" "context" .) | nindent 4 }} + {{- include "set-value" (dict "context" $consumer "path" "config.labels") | nindent 4 }} +spec: + serviceName: {{ include "fuel-streams.fullname" . }}-consumer + {{- if not $consumer.autoscaling.enabled }} + replicas: {{ $consumer.config.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "fuel-streams.selectorLabels" (dict "name" "consumer" "context" .) | nindent 6 }} + + template: + metadata: + annotations: + {{- include "set-value" (dict "context" $consumer "path" "config.podAnnotations") | nindent 8 }} + labels: + {{- include "fuel-streams.labels" (dict "name" "consumer" "context" .) | nindent 8 }} + {{- include "set-value" (dict "context" $consumer "path" "config.labels") | nindent 8 }} + + spec: + {{- if .Values.serviceAccount.create }} + serviceAccountName: {{ include "fuel-streams.serviceAccountName" . }} + {{- end }} + {{- include "set-field-and-value" (dict "context" $consumer "field" "imagePullSecrets" "path" "config.imagePullSecrets") | nindent 6 }} + {{- include "set-field-and-value" (dict "context" $consumer "field" "nodeSelector" "path" "config.nodeSelector") | nindent 6 }} + {{- include "set-field-and-value" (dict "context" $consumer "field" "affinity" "path" "config.affinity") | nindent 6 }} + {{- include "set-field-and-value" (dict "context" $consumer "field" "tolerations" "path" "config.tolerations") | nindent 6 }} + {{- include "k8s.security-context" (dict "context" . "service" "consumer") | nindent 6 }} + + containers: + - name: consumer + image: "{{ $consumer.image.repository }}:{{ $consumer.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ $consumer.image.pullPolicy }} + command: ["/usr/src/sv-consumer"] + args: + - "--nats-url" + - "$(NATS_URL)" + - "--nats-publisher-url" + - "$(NATS_PUBLISHER_URL)" + {{- with $consumer.image.args }} + {{- toYaml . | nindent 10 }} + {{- end }} + + ports: + - name: consumer + containerPort: {{ $consumer.port }} + protocol: TCP + {{- with $consumer.config.ports }} + {{- toYaml . | nindent 12 }} + {{- end }} + + {{- include "set-field-and-value" (dict "context" $consumer "field" "resources" "path" "config.resources") | nindent 10 }} + {{- include "k8s.probes" (dict "context" . "service" "consumer") | nindent 10 }} + {{- include "k8s.container-security-context" (dict "context" . "service" "consumer") | nindent 10 }} + + env: + - name: PORT + value: {{ $consumer.port | quote }} + {{- with $consumer.env }} + {{- toYaml . | nindent 12 }} + {{- end }} + + envFrom: + - configMapRef: + name: {{ include "fuel-streams.fullname" $ }}-config + optional: true + - secretRef: + name: {{ include "fuel-streams.fullname" $ }}-keys + optional: true + {{- with $consumer.envFrom }} + {{- toYaml . | nindent 12 }} + {{- end }} + +{{- include "k8s.hpa" (dict "context" . "service" (dict "name" "consumer" "autoscaling" $consumer.autoscaling)) }} +{{- end }} diff --git a/cluster/charts/fuel-streams/templates/external-service.yaml b/cluster/charts/fuel-streams/templates/external-service.yaml deleted file mode 100644 index 4b2c4602..00000000 --- a/cluster/charts/fuel-streams/templates/external-service.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- $externalService := .Values.externalService }} -{{- if and $externalService.enabled $externalService.dns }} -apiVersion: v1 -kind: Service -metadata: - {{- include "k8s.metadata" (dict "context" . "suffix" "-external") | nindent 2 }} - labels: - {{- include "fuel-streams.labels" . | nindent 4 }} - {{- include "set-value" (dict "context" $externalService "path" "labels") | nindent 4 }} - app.kubernetes.io/service: external-service - annotations: - external-dns.alpha.kubernetes.io/hostname: {{ $externalService.dns | quote }} - external-dns.alpha.kubernetes.io/cloudflare-proxied: "false" - service.beta.kubernetes.io/aws-load-balancer-attributes: load_balancing.cross_zone.enabled=true - service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp - service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip - service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing - service.beta.kubernetes.io/aws-load-balancer-target-group-attributes: preserve_client_ip.enabled=true,stickiness.enabled=true,stickiness.type=source_ip,load_balancing.cross_zone.enabled=true - service.beta.kubernetes.io/aws-load-balancer-type: external - service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "WebSocket=true" - {{- include "set-value" (dict "context" $externalService "path" "annotations") | nindent 2 }} -spec: - type: LoadBalancer - loadBalancerClass: service.k8s.aws/nlb - externalTrafficPolicy: Local - ports: - {{- toYaml $externalService.ports | nindent 4 }} - selector: - {{- include "fuel-streams.selectorLabels" . | nindent 4 }} - app.kubernetes.io/service: external-service -{{- end }} diff --git a/cluster/charts/fuel-streams/templates/publisher/network-configmap.yaml b/cluster/charts/fuel-streams/templates/publisher/network-configmap.yaml deleted file mode 100644 index 18a644ef..00000000 --- a/cluster/charts/fuel-streams/templates/publisher/network-configmap.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- $publisher := .Values.publisher }} -apiVersion: v1 -kind: ConfigMap -metadata: - {{- include "k8s.metadata" (dict "context" . "suffix" "-network-config") | nindent 2 }} - annotations: - {{- include "set-value" (dict "context" $publisher "path" "config.annotations") | nindent 4 }} - labels: - {{- include "fuel-streams.labels" . | nindent 4 }} - {{- include "set-value" (dict "context" $publisher "path" "config.labels") | nindent 4 }} - app.kubernetes.io/component: publisher -data: - P2P_PORT: "30333" - DB_PATH: {{ .Values.publisher.storage.mountPath | quote }} - POA_INSTANT: "false" - SERVICE_NAME: "Publisher Node ({{ $publisher.network }})" - {{- if eq $publisher.network "mainnet" }} - RELAYER_V2_LISTENING_CONTRACTS: "0xAEB0c00D0125A8a788956ade4f4F12Ead9f65DDf" - RELAYER_DA_DEPLOY_HEIGHT: "20620434" - RELAYER_LOG_PAGE_SIZE: "2000" - SYNC_HEADER_BATCH_SIZE: "100" - RESERVED_NODES: "/dnsaddr/mainnet.fuel.network" - CHAIN_CONFIG: "mainnet" - {{- else if eq $publisher.network "testnet" }} - RELAYER_V2_LISTENING_CONTRACTS: "0x01855B78C1f8868DE70e84507ec735983bf262dA" - RELAYER_DA_DEPLOY_HEIGHT: "5827607" - RELAYER_LOG_PAGE_SIZE: "2000" - SYNC_HEADER_BATCH_SIZE: "100" - RESERVED_NODES: "/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmDxoChB7AheKNvCVpD4PHJwuDGn8rifMBEHmEynGHvHrf,/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmHnANNk4HjAxQV66BNCRxd2MBUU89ijboZkE69aLuSn1g,/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmVE468rpkh2X1kzz8qQXmqNFiPxU5Lrya28nZdbRUdVJX" - CHAIN_CONFIG: "testnet" - {{- end }} diff --git a/cluster/charts/fuel-streams/templates/publisher/statefulset.yaml b/cluster/charts/fuel-streams/templates/publisher/statefulset.yaml index 0ad2c022..9220c741 100644 --- a/cluster/charts/fuel-streams/templates/publisher/statefulset.yaml +++ b/cluster/charts/fuel-streams/templates/publisher/statefulset.yaml @@ -7,9 +7,8 @@ metadata: annotations: {{- include "set-value" (dict "context" $publisher "path" "config.annotations") | nindent 4 }} labels: - {{- include "fuel-streams.labels" . | nindent 4 }} + {{- include "fuel-streams.labels" (dict "name" "publisher" "context" .) | nindent 4 }} {{- include "set-value" (dict "context" $publisher "path" "config.labels") | nindent 4 }} - app.kubernetes.io/component: publisher spec: serviceName: {{ include "fuel-streams.fullname" . }}-publisher {{- if not $publisher.autoscaling.enabled }} @@ -17,18 +16,15 @@ spec: {{- end }} selector: matchLabels: - {{- include "fuel-streams.selectorLabels" . | nindent 6 }} - {{- include "set-value" (dict "context" $publisher "path" "config.selectorLabels") | nindent 6 }} - app.kubernetes.io/component: publisher + {{- include "fuel-streams.selectorLabels" (dict "name" "publisher" "context" .) | nindent 6 }} template: metadata: annotations: {{- include "set-value" (dict "context" $publisher "path" "config.podAnnotations") | nindent 8 }} labels: - {{- include "fuel-streams.selectorLabels" . | nindent 8 }} + {{- include "fuel-streams.labels" (dict "name" "publisher" "context" .) | nindent 8 }} {{- include "set-value" (dict "context" $publisher "path" "config.labels") | nindent 8 }} - app.kubernetes.io/component: publisher spec: {{- if .Values.serviceAccount.create }} @@ -45,7 +41,7 @@ spec: emptyDir: {} - name: var-dir emptyDir: {} - + initContainers: - name: update-{{ $publisher.storage.name }} image: alpine:latest @@ -81,12 +77,74 @@ spec: - name: publisher image: "{{ $publisher.image.repository }}:{{ $publisher.image.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ $publisher.image.pullPolicy }} + command: ["/usr/src/sv-publisher"] + args: + # Common arguments + - "--enable-relayer" + - "--enable-p2p" + - "--keypair" + - "$(KEYPAIR)" + - "--relayer" + - "$(RELAYER)" + - "--ip" + - "0.0.0.0" + - "--port" + - "$(PORT)" + - "--peering-port" + - "30333" + - "--utxo-validation" + - "--poa-instant" + - "false" + - "--db-path" + - "$(DB_PATH)" + - "--nats-url" + - "$(NATS_URL)" + - "--sync-header-batch-size" + - "100" + - "--relayer-log-page-size" + - "2000" + - "--sync-block-stream-buffer-size" + - "50" + - "--max-database-cache-size" + - "17179869184" + - "--state-rewind-duration" + - "136y" + - "--request-timeout" + - "60" + - "--graphql-max-complexity" + - "1000000000" + {{- if eq $publisher.network "mainnet" }} + # Mainnet specific args + - "--service-name" + - "Publisher Node (Mainnet)" + - "--snapshot" + - "./chain-config/mainnet" + - "--reserved-nodes" + - "/dnsaddr/mainnet.fuel.network" + - "--relayer-v2-listening-contracts" + - "0xAEB0c00D0125A8a788956ade4f4F12Ead9f65DDf" + - "--relayer-da-deploy-height" + - "20620434" + {{- else if eq $publisher.network "testnet" }} + # Testnet specific args + - "--service-name" + - "Publisher Node (Testnet)" + - "--snapshot" + - "./chain-config/testnet" + - "--reserved-nodes" + - "/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmDxoChB7AheKNvCVpD4PHJwuDGn8rifMBEHmEynGHvHrf,/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmHnANNk4HjAxQV66BNCRxd2MBUU89ijboZkE69aLuSn1g,/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmVE468rpkh2X1kzz8qQXmqNFiPxU5Lrya28nZdbRUdVJX" + - "--relayer-v2-listening-contracts" + - "0x01855B78C1f8868DE70e84507ec735983bf262dA" + - "--relayer-da-deploy-height" + - "5827607" + {{- end }} + ports: - name: http - containerPort: {{ int $publisher.service.port }} + containerPort: {{ int $publisher.port }} protocol: TCP - {{- if $publisher.ports }} - {{- toYaml $publisher.ports | nindent 12 }} + {{- with $publisher.config.ports }} + {{- toYaml . | nindent 12 }} {{- end }} {{- include "set-field-and-value" (dict "context" $publisher "field" "resources" "path" "config.resources") | nindent 10 }} @@ -96,22 +154,20 @@ spec: env: - name: TMPDIR value: "/var/fuel-streams/tmp" - {{- range $key, $value := $publisher.env }} - - name: {{ $key }} - value: {{ $value | quote }} - {{- end }} - {{- with $publisher.extraEnv }} + - name: DB_PATH + value: {{ $publisher.storage.mountPath | default "/mnt/db" | quote }} + - name: PORT + value: {{ $publisher.port | quote }} + {{- with $publisher.env }} {{- toYaml . | nindent 12 }} {{- end }} envFrom: - configMapRef: - name: {{ include "fuel-streams.fullname" $ }}-network-config - - configMapRef: - name: {{ include "fuel-streams.fullname" $ }}-publisher + name: {{ include "fuel-streams.fullname" $ }}-config optional: true - secretRef: - name: {{ include "fuel-streams.fullname" $ }}-publisher + name: {{ include "fuel-streams.fullname" $ }}-keys optional: true {{- with $publisher.envFrom }} {{- toYaml . | nindent 12 }} diff --git a/cluster/charts/fuel-streams/templates/secret-creator.yaml b/cluster/charts/fuel-streams/templates/secret-creator.yaml index 04719e93..a64043ec 100755 --- a/cluster/charts/fuel-streams/templates/secret-creator.yaml +++ b/cluster/charts/fuel-streams/templates/secret-creator.yaml @@ -4,7 +4,7 @@ kind: Role metadata: {{- include "k8s.metadata" (dict "context" . "suffix" "-secret-creator") | nindent 2 }} labels: - {{- include "fuel-streams.labels" . | nindent 2 }} + {{- include "fuel-streams.labels" (dict "name" "secret-creator" "context" .) | nindent 2 }} rules: - apiGroups: [""] # "" indicates the core API group resources: ["pods"] @@ -20,6 +20,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: {{- include "k8s.metadata" (dict "context" . "suffix" "-secret-creator") | nindent 2 }} + labels: + {{- include "fuel-streams.labels" (dict "name" "secret-creator" "context" .) | nindent 2 }} subjects: - kind: ServiceAccount name: {{ include "fuel-streams.serviceAccountName" . }} diff --git a/cluster/charts/fuel-streams/templates/service-account.yaml b/cluster/charts/fuel-streams/templates/service-account.yaml index 8201ea7c..eac6647b 100755 --- a/cluster/charts/fuel-streams/templates/service-account.yaml +++ b/cluster/charts/fuel-streams/templates/service-account.yaml @@ -4,6 +4,6 @@ kind: ServiceAccount metadata: {{- include "k8s.metadata" (dict "context" . "suffix" "-service-account") | nindent 2 }} labels: - {{- include "fuel-streams.labels" . | nindent 4 }} + {{- include "fuel-streams.labels" (dict "name" "service-account" "context" .) | nindent 4 }} automountServiceAccountToken: {{ .Values.serviceAccount.automount }} {{- end -}} diff --git a/cluster/charts/fuel-streams/templates/webserver/deployment.yaml b/cluster/charts/fuel-streams/templates/webserver/deployment.yaml index 73935fac..b669f5c5 100644 --- a/cluster/charts/fuel-streams/templates/webserver/deployment.yaml +++ b/cluster/charts/fuel-streams/templates/webserver/deployment.yaml @@ -1,4 +1,5 @@ {{- $webserver := .Values.webserver -}} +{{- $service := $webserver.service -}} {{- if $webserver.enabled -}} apiVersion: apps/v1 kind: Deployment @@ -7,31 +8,25 @@ metadata: annotations: {{- include "set-value" (dict "context" $webserver "path" "config.annotations") | nindent 4 }} labels: - {{- include "fuel-streams.labels" . | nindent 4 }} + {{- include "fuel-streams.labels" (dict "name" "webserver" "context" .) | nindent 4 }} {{- include "set-value" (dict "context" $webserver "path" "config.labels") | nindent 4 }} app.kubernetes.io/component: webserver - app.kubernetes.io/service: external-service - spec: {{- if not $webserver.autoscaling.enabled }} replicas: {{ $webserver.config.replicaCount }} {{- end }} selector: matchLabels: - {{- include "fuel-streams.selectorLabels" . | nindent 6 }} - {{- include "set-value" (dict "context" $webserver "path" "config.selectorLabels") | nindent 6 }} - app.kubernetes.io/component: webserver - app.kubernetes.io/service: external-service + {{- include "fuel-streams.selectorLabels" (dict "name" "webserver" "context" .) | nindent 6 }} template: metadata: annotations: {{- include "set-value" (dict "context" $webserver "path" "config.podAnnotations") | nindent 8 }} labels: - {{- include "fuel-streams.selectorLabels" . | nindent 8 }} + {{- include "fuel-streams.labels" (dict "name" "webserver" "context" .) | nindent 8 }} {{- include "set-value" (dict "context" $webserver "path" "config.labels") | nindent 8 }} app.kubernetes.io/component: webserver - app.kubernetes.io/service: external-service spec: {{- if .Values.serviceAccount.create }} @@ -51,48 +46,32 @@ spec: ports: - name: webserver - containerPort: {{ $webserver.port }} + containerPort: {{ $service.port }} protocol: TCP - {{- if $webserver.ports }} - {{- toYaml $webserver.ports | nindent 12 }} - {{- end }} {{- include "set-field-and-value" (dict "context" $webserver "field" "resources" "path" "config.resources") | nindent 10 }} {{- include "k8s.probes" (dict "context" . "service" "webserver") | nindent 10 }} {{- include "k8s.container-security-context" (dict "context" . "service" "webserver") | nindent 10 }} - env: - {{- range $key, $value := $webserver.env }} - - name: {{ $key }} - value: {{ $value | quote }} + envFrom: + - configMapRef: + name: {{ include "fuel-streams.fullname" $ }}-config + optional: true + - secretRef: + name: {{ include "fuel-streams.fullname" $ }}-keys + optional: true + {{- with $webserver.envFrom }} + {{- toYaml . | nindent 12 }} {{- end }} - {{- with $webserver.extraEnv }} + + env: + - name: NETWORK + value: {{ $webserver.network | quote }} + - name: PORT + value: {{ $service.port | quote }} + {{- with $webserver.env }} {{- toYaml . | nindent 12 }} {{- end }} -{{- end }} ---- -apiVersion: v1 -kind: Service -metadata: - {{- include "k8s.metadata" (dict "context" . "suffix" "-webserver") | nindent 2 }} - annotations: - service.beta.kubernetes.io/aws-load-balancer-type: "nlb" - {{- include "set-value" (dict "context" $webserver "path" "config.annotations") | nindent 4 }} - labels: - {{- include "fuel-streams.labels" . | nindent 4 }} - {{- include "set-value" (dict "context" $webserver "path" "config.labels") | nindent 4 }} - app.kubernetes.io/component: webserver - app.kubernetes.io/service: external-service -spec: - type: LoadBalancer - ports: - - name: webserver - port: {{ $webserver.port }} - targetPort: {{ $webserver.port }} - protocol: TCP - selector: - {{- include "fuel-streams.selectorLabels" . | nindent 4 }} - {{- include "set-value" (dict "context" $webserver "path" "config.selectorLabels") | nindent 4 }} - app.kubernetes.io/component: webserver - app.kubernetes.io/service: external-service +{{- include "k8s.hpa" (dict "context" . "service" (dict "name" "webserver" "autoscaling" $webserver.autoscaling)) }} +{{- end }} \ No newline at end of file diff --git a/cluster/charts/fuel-streams/templates/webserver/service.yaml b/cluster/charts/fuel-streams/templates/webserver/service.yaml new file mode 100644 index 00000000..345c03a4 --- /dev/null +++ b/cluster/charts/fuel-streams/templates/webserver/service.yaml @@ -0,0 +1,37 @@ +{{- $service := .Values.webserver.service }} +{{- if and .Values.webserver.enabled $service.enabled }} +apiVersion: v1 +kind: Service +metadata: + {{- include "k8s.metadata" (dict "context" . "suffix" "-webserver-nlb") | nindent 2 }} + annotations: + {{- if $service.dns }} + external-dns.alpha.kubernetes.io/hostname: {{ $service.dns }} + external-dns.alpha.kubernetes.io/cloudflare-proxied: "false" + {{- end }} + service.beta.kubernetes.io/aws-load-balancer-attributes: load_balancing.cross_zone.enabled=true + service.beta.kubernetes.io/aws-load-balancer-backend-protocol: tcp + service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip + service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing + service.beta.kubernetes.io/aws-load-balancer-target-group-attributes: preserve_client_ip.enabled=true,stickiness.enabled=true,stickiness.type=source_ip,load_balancing.cross_zone.enabled=true + service.beta.kubernetes.io/aws-load-balancer-type: external + service.beta.kubernetes.io/aws-load-balancer-additional-resource-tags: "WebSocket=true" + {{- include "set-value" (dict "context" $service "path" "annotations") | nindent 2 }} + labels: + {{- include "fuel-streams.labels" (dict "name" "webserver" "context" .) | nindent 4 }} + {{- include "set-value" (dict "context" $service "path" "labels") | nindent 4 }} + app.kubernetes.io/component: webserver +spec: + type: {{ $service.type }} + loadBalancerClass: service.k8s.aws/nlb + externalTrafficPolicy: Local + ports: + - appProtocol: tcp + name: websocket + port: {{ $service.port }} + protocol: TCP + targetPort: {{ $service.port }} + selector: + {{- include "fuel-streams.selectorLabels" (dict "name" "webserver" "context" .) | nindent 4 }} + app.kubernetes.io/component: webserver +{{- end }} diff --git a/cluster/charts/fuel-streams/tests/certificate_test.yaml b/cluster/charts/fuel-streams/tests/certificate_test.yaml index 179f8f4c..0c93d1ad 100644 --- a/cluster/charts/fuel-streams/tests/certificate_test.yaml +++ b/cluster/charts/fuel-streams/tests/certificate_test.yaml @@ -53,7 +53,7 @@ tests: documentIndex: 1 - equal: path: metadata.name - value: RELEASE-NAME-fuel-streams-ws-cert + value: RELEASE-NAME-sv-webserver-cert documentIndex: 1 - equal: path: spec.dnsNames[0] diff --git a/cluster/charts/fuel-streams/tests/consumer/deployment_test.yaml b/cluster/charts/fuel-streams/tests/consumer/deployment_test.yaml new file mode 100644 index 00000000..534914e2 --- /dev/null +++ b/cluster/charts/fuel-streams/tests/consumer/deployment_test.yaml @@ -0,0 +1,305 @@ +suite: Testing Consumer deployment +templates: + - templates/consumer/deployment.yaml +tests: + - it: should not create deployment when consumer is disabled + set: + consumer.enabled: false + asserts: + - hasDocuments: + count: 0 + + - it: should create deployment with correct kind and metadata + set: + consumer.enabled: true + asserts: + - isKind: + of: Deployment + documentIndex: 0 + - isAPIVersion: + of: apps/v1 + documentIndex: 0 + - equal: + path: metadata.name + value: RELEASE-NAME-fuel-streams-consumer + - equal: + path: metadata.labels["app.kubernetes.io/component"] + value: consumer + - equal: + path: metadata.labels["app.kubernetes.io/service"] + value: external-service + documentIndex: 0 + + - it: should set correct selector labels + set: + consumer.enabled: true + asserts: + - equal: + path: spec.selector.matchLabels["app.kubernetes.io/component"] + value: consumer + documentIndex: 0 + - isSubset: + path: spec.selector.matchLabels + content: + app.kubernetes.io/name: fuel-streams + app.kubernetes.io/instance: RELEASE-NAME + documentIndex: 0 + + - it: should set image configuration correctly + set: + consumer.enabled: true + consumer.image.repository: ghcr.io/fuellabs/sv-webserver + consumer.image.tag: latest + consumer.image.pullPolicy: Always + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: ghcr.io/fuellabs/sv-webserver:latest + documentIndex: 0 + - equal: + path: spec.template.spec.containers[0].imagePullPolicy + value: Always + documentIndex: 0 + + - it: should use chart version when tag is not specified + set: + consumer.enabled: true + consumer.image.repository: ghcr.io/fuellabs/sv-webserver + consumer.image.tag: null + Chart: + Version: "1.0" + asserts: + - equal: + path: spec.template.spec.containers[0].image + value: ghcr.io/fuellabs/sv-webserver:1.0 + documentIndex: 0 + + - it: should configure ports correctly + set: + consumer.enabled: true + consumer.port: 8082 + consumer.ports: + - name: metrics + containerPort: 9090 + protocol: TCP + asserts: + - lengthEqual: + path: spec.template.spec.containers[0].ports + count: 2 + documentIndex: 0 + - contains: + path: spec.template.spec.containers[0].ports + content: + name: consumer + containerPort: 8082 + protocol: TCP + documentIndex: 0 + - contains: + path: spec.template.spec.containers[0].ports + content: + name: metrics + containerPort: 9090 + protocol: TCP + documentIndex: 0 + + - it: should set replicas when autoscaling is disabled + set: + consumer.enabled: true + consumer.autoscaling.enabled: false + consumer.config.replicaCount: 3 + asserts: + - equal: + path: spec.replicas + value: 3 + documentIndex: 0 + + - it: should not set replicas when autoscaling is enabled + set: + consumer.enabled: true + consumer.autoscaling.enabled: true + consumer.config.replicaCount: 3 + asserts: + - isNull: + path: spec.replicas + documentIndex: 0 + + - it: should merge environment variables correctly + set: + consumer.enabled: true + consumer.env: + RUST_LOG: info + APP_PORT: "8080" + consumer.extraEnv: + - name: EXTRA_VAR + value: "extra-value" + - name: SECRET_VAR + valueFrom: + secretKeyRef: + name: my-secret + key: my-key + asserts: + - contains: + path: spec.template.spec.containers[0].env + content: + name: RUST_LOG + value: "info" + documentIndex: 0 + - contains: + path: spec.template.spec.containers[0].env + content: + name: APP_PORT + value: "8080" + documentIndex: 0 + - contains: + path: spec.template.spec.containers[0].env + content: + name: EXTRA_VAR + value: "extra-value" + documentIndex: 0 + - contains: + path: spec.template.spec.containers[0].env + content: + name: SECRET_VAR + valueFrom: + secretKeyRef: + name: my-secret + key: my-key + documentIndex: 0 + + - it: should set security context when specified + set: + consumer.enabled: true + consumer.config.securityContext: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + asserts: + - equal: + path: spec.template.spec.securityContext.runAsUser + value: 1000 + documentIndex: 0 + - equal: + path: spec.template.spec.securityContext.runAsGroup + value: 3000 + documentIndex: 0 + - equal: + path: spec.template.spec.securityContext.fsGroup + value: 2000 + documentIndex: 0 + + - it: should set resource limits and requests + set: + consumer.enabled: true + consumer.config.resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi + asserts: + - equal: + path: spec.template.spec.containers[0].resources.limits.cpu + value: 100m + documentIndex: 0 + - equal: + path: spec.template.spec.containers[0].resources.limits.memory + value: 128Mi + documentIndex: 0 + - equal: + path: spec.template.spec.containers[0].resources.requests.cpu + value: 50m + documentIndex: 0 + - equal: + path: spec.template.spec.containers[0].resources.requests.memory + value: 64Mi + documentIndex: 0 + + - it: should create HPA with correct configuration when autoscaling is enabled + set: + consumer.enabled: true + consumer.autoscaling.enabled: true + consumer.autoscaling.minReplicas: 2 + consumer.autoscaling.maxReplicas: 5 + consumer.autoscaling.targetCPUUtilizationPercentage: 75 + consumer.autoscaling.targetMemoryUtilizationPercentage: 85 + asserts: + - hasDocuments: + count: 2 + - isKind: + of: HorizontalPodAutoscaler + documentIndex: 1 + - equal: + path: spec.minReplicas + value: 2 + documentIndex: 1 + - equal: + path: spec.maxReplicas + value: 5 + documentIndex: 1 + - equal: + path: spec.metrics[0].resource.target.averageUtilization + value: 75 + documentIndex: 1 + - equal: + path: spec.metrics[1].resource.target.averageUtilization + value: 85 + documentIndex: 1 + + - it: should configure HPA scaling behavior correctly + set: + consumer.enabled: true + consumer.autoscaling.enabled: true + consumer.autoscaling.behavior.scaleDown.stabilizationWindowSeconds: 400 + consumer.autoscaling.behavior.scaleDown.percentValue: 50 + consumer.autoscaling.behavior.scaleUp.stabilizationWindowSeconds: 60 + consumer.autoscaling.behavior.scaleUp.percentValue: 200 + consumer.autoscaling.behavior.scaleUp.podValue: 6 + asserts: + - equal: + path: spec.behavior.scaleDown.stabilizationWindowSeconds + value: 400 + documentIndex: 1 + - equal: + path: spec.behavior.scaleDown.policies[0].type + value: Percent + documentIndex: 1 + - equal: + path: spec.behavior.scaleDown.policies[0].value + value: 50 + documentIndex: 1 + - equal: + path: spec.behavior.scaleUp.stabilizationWindowSeconds + value: 60 + documentIndex: 1 + - equal: + path: spec.behavior.scaleUp.policies[0].type + value: Percent + documentIndex: 1 + - equal: + path: spec.behavior.scaleUp.policies[0].value + value: 200 + documentIndex: 1 + - equal: + path: spec.behavior.scaleUp.policies[1].type + value: Pods + documentIndex: 1 + - equal: + path: spec.behavior.scaleUp.policies[1].value + value: 6 + documentIndex: 1 + - equal: + path: spec.behavior.scaleUp.selectPolicy + value: Max + documentIndex: 1 + + - it: should not create HPA when autoscaling is disabled + set: + consumer.enabled: true + consumer.autoscaling.enabled: false + asserts: + - hasDocuments: + count: 1 + - isKind: + of: Deployment + documentIndex: 0 diff --git a/cluster/charts/fuel-streams/tests/external_service_test.yaml b/cluster/charts/fuel-streams/tests/external_service_test.yaml index 11a70a5e..611cb452 100644 --- a/cluster/charts/fuel-streams/tests/external_service_test.yaml +++ b/cluster/charts/fuel-streams/tests/external_service_test.yaml @@ -73,7 +73,7 @@ tests: path: spec.selector content: app.kubernetes.io/name: fuel-streams - app.kubernetes.io/service: external-service + app.kubernetes.io/service: external-ws - it: should set correct annotations set: diff --git a/cluster/charts/fuel-streams/tests/publisher/network-configmap.yaml b/cluster/charts/fuel-streams/tests/publisher/network-configmap.yaml deleted file mode 100644 index dfb5a7b3..00000000 --- a/cluster/charts/fuel-streams/tests/publisher/network-configmap.yaml +++ /dev/null @@ -1,87 +0,0 @@ -suite: Testing Publisher network configmap -templates: - - templates/publisher/network-configmap.yaml -tests: - - it: should configure mainnet correctly - set: - publisher.enabled: true - publisher.network: mainnet - publisher.storage.mountPath: /mnt/db - asserts: - - isKind: - of: ConfigMap - - equal: - path: metadata.name - value: RELEASE-NAME-fuel-streams-network-config - - equal: - path: data.P2P_PORT - value: "30333" - - equal: - path: data.DB_PATH - value: /mnt/db - - equal: - path: data.POA_INSTANT - value: "false" - - equal: - path: data.SERVICE_NAME - value: "Publisher Node (mainnet)" - - equal: - path: data.RELAYER_V2_LISTENING_CONTRACTS - value: "0xAEB0c00D0125A8a788956ade4f4F12Ead9f65DDf" - - equal: - path: data.RELAYER_DA_DEPLOY_HEIGHT - value: "20620434" - - equal: - path: data.RELAYER_LOG_PAGE_SIZE - value: "2000" - - equal: - path: data.SYNC_HEADER_BATCH_SIZE - value: "100" - - equal: - path: data.RESERVED_NODES - value: "/dnsaddr/mainnet.fuel.network" - - equal: - path: data.CHAIN_CONFIG - value: "mainnet" - - - it: should configure testnet correctly - set: - publisher.enabled: true - publisher.network: testnet - publisher.storage.mountPath: /mnt/db - asserts: - - isKind: - of: ConfigMap - - equal: - path: metadata.name - value: RELEASE-NAME-fuel-streams-network-config - - equal: - path: data.P2P_PORT - value: "30333" - - equal: - path: data.DB_PATH - value: /mnt/db - - equal: - path: data.POA_INSTANT - value: "false" - - equal: - path: data.SERVICE_NAME - value: "Publisher Node (testnet)" - - equal: - path: data.RELAYER_V2_LISTENING_CONTRACTS - value: "0x01855B78C1f8868DE70e84507ec735983bf262dA" - - equal: - path: data.RELAYER_DA_DEPLOY_HEIGHT - value: "5827607" - - equal: - path: data.RELAYER_LOG_PAGE_SIZE - value: "2000" - - equal: - path: data.SYNC_HEADER_BATCH_SIZE - value: "100" - - equal: - path: data.RESERVED_NODES - value: "/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmDxoChB7AheKNvCVpD4PHJwuDGn8rifMBEHmEynGHvHrf,/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmHnANNk4HjAxQV66BNCRxd2MBUU89ijboZkE69aLuSn1g,/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmVE468rpkh2X1kzz8qQXmqNFiPxU5Lrya28nZdbRUdVJX" - - equal: - path: data.CHAIN_CONFIG - value: "testnet" diff --git a/cluster/charts/fuel-streams/tests/publisher/statefulset.yaml b/cluster/charts/fuel-streams/tests/publisher/statefulset.yaml index fd3c3f6b..eaa6af79 100644 --- a/cluster/charts/fuel-streams/tests/publisher/statefulset.yaml +++ b/cluster/charts/fuel-streams/tests/publisher/statefulset.yaml @@ -10,36 +10,36 @@ tests: of: StatefulSet - equal: path: metadata.name - value: RELEASE-NAME-fuel-streams-publisher + value: RELEASE-NAME-sv-publisher - it: should set correct image and tag set: publisher.enabled: true - publisher.image.repository: ghcr.io/fuellabs/fuel-streams-publisher + publisher.image.repository: ghcr.io/fuellabs/sv-publisher publisher.image.tag: latest asserts: - equal: path: spec.template.spec.containers[0].image - value: ghcr.io/fuellabs/fuel-streams-publisher:latest + value: ghcr.io/fuellabs/sv-publisher:latest - it: should use chart version when tag is not specified set: publisher.enabled: true - publisher.image.repository: ghcr.io/fuellabs/fuel-streams-publisher + publisher.image.repository: ghcr.io/fuellabs/sv-publisher publisher.image.tag: null Chart: Version: "1.0" asserts: - equal: path: spec.template.spec.containers[0].image - value: ghcr.io/fuellabs/fuel-streams-publisher:1.0 + value: ghcr.io/fuellabs/sv-publisher:1.0 - it: should merge environment variables correctly set: publisher.enabled: true publisher.env: - CHAIN_CONFIG: "testnet" # Override default - NEW_VAR: "new-value" # Add new var + CHAIN_CONFIG: "testnet" # Override default + NEW_VAR: "new-value" # Add new var publisher.extraEnv: - name: SIMPLE_VAR value: "simple-value" diff --git a/cluster/charts/fuel-streams/tests/webserver/deployment_test.yaml b/cluster/charts/fuel-streams/tests/webserver/deployment_test.yaml index 84833c59..9d5a148d 100644 --- a/cluster/charts/fuel-streams/tests/webserver/deployment_test.yaml +++ b/cluster/charts/fuel-streams/tests/webserver/deployment_test.yaml @@ -19,7 +19,7 @@ tests: of: apps/v1 - equal: path: metadata.name - value: RELEASE-NAME-fuel-streams-webserver + value: RELEASE-NAME-sv-webserver - equal: path: metadata.labels["app.kubernetes.io/component"] value: webserver @@ -40,13 +40,13 @@ tests: - it: should set image configuration correctly set: webserver.enabled: true - webserver.image.repository: ghcr.io/fuellabs/fuel-streams-webserver + webserver.image.repository: ghcr.io/fuellabs/sv-webserver webserver.image.tag: latest webserver.image.pullPolicy: Always asserts: - equal: path: spec.template.spec.containers[0].image - value: ghcr.io/fuellabs/fuel-streams-webserver:latest + value: ghcr.io/fuellabs/sv-webserver:latest - equal: path: spec.template.spec.containers[0].imagePullPolicy value: Always @@ -54,14 +54,14 @@ tests: - it: should use chart version when tag is not specified set: webserver.enabled: true - webserver.image.repository: ghcr.io/fuellabs/fuel-streams-webserver + webserver.image.repository: ghcr.io/fuellabs/sv-webserver webserver.image.tag: null Chart: Version: "1.0" asserts: - equal: path: spec.template.spec.containers[0].image - value: ghcr.io/fuellabs/fuel-streams-webserver:1.0 + value: ghcr.io/fuellabs/sv-webserver:1.0 - it: should configure ports correctly set: @@ -186,4 +186,4 @@ tests: value: 50m - equal: path: spec.template.spec.containers[0].resources.requests.memory - value: 64Mi \ No newline at end of file + value: 64Mi diff --git a/cluster/charts/fuel-streams/values-local.yaml b/cluster/charts/fuel-streams/values-local.yaml new file mode 100644 index 00000000..46254d83 --- /dev/null +++ b/cluster/charts/fuel-streams/values-local.yaml @@ -0,0 +1,135 @@ +config: + createRoles: true + healthChecks: true + +commonConfigMap: + enabled: true + data: + AWS_S3_BUCKET_NAME: "fuel-streams-staging" + AWS_ENDPOINT_URL: "https://localhost:9000" + AWS_REGION: "us-east-1" + AWS_S3_ENABLED: "false" + USE_METRICS: "false" + NATS_URL: "fuel-streams-nats-core:4222" + NATS_PUBLISHER_URL: "fuel-streams-nats-publisher:4222" + NATS_SYSTEM_USER: "sys" + NATS_SYSTEM_PASS: "sys" + NATS_ADMIN_USER: "admin" + NATS_ADMIN_PASS: "admin" + NATS_PUBLIC_USER: "default_user" + NATS_PUBLIC_PASS: "" + +# Reduce storage requirements for local development +publisher: + image: + repository: sv-publisher + pullPolicy: IfNotPresent + tag: latest + + storage: + size: 10Gi + storageClass: "standard" # Use default storage class + + config: + replicaCount: 1 + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 500m + memory: 512Mi + +consumer: + enabled: true + image: + repository: sv-consumer + pullPolicy: IfNotPresent + tag: latest + + config: + replicaCount: 1 + resources: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 500m + memory: 512Mi + +webserver: + enabled: true + image: + repository: sv-webserver + pullPolicy: IfNotPresent + tag: latest + + service: + enabled: true + port: 9003 + + tls: + enabled: false + +# NATS Core configuration for local development +nats-core: + enabled: true + container: + env: + GOMEMLIMIT: 1GiB + merge: + envFrom: + - configMapRef: + name: fuel-streams-config + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 500m + memory: 1Gi + + config: + cluster: + replicas: 3 + + jetstream: + fileStore: + pvc: + size: 10Gi + storageClassName: "standard" + + merge: + jetstream: + max_file_store: << 10GiB >> + max_memory_store: << 1GiB >> + +# NATS Publisher configuration for local development +nats-publisher: + enabled: true + container: + env: + GOMEMLIMIT: 1GiB + merge: + envFrom: + - configMapRef: + name: fuel-streams-config + resources: + requests: + cpu: 100m + memory: 512Mi + limits: + cpu: 500m + memory: 1Gi + + config: + jetstream: + fileStore: + pvc: + size: 10Gi + storageClassName: "standard" + + merge: + jetstream: + max_file_store: << 10GiB >> + max_memory_store: << 1GiB >> diff --git a/cluster/charts/fuel-streams/values.yaml b/cluster/charts/fuel-streams/values.yaml index 912e3f0e..be933eb5 100755 --- a/cluster/charts/fuel-streams/values.yaml +++ b/cluster/charts/fuel-streams/values.yaml @@ -1,6 +1,3 @@ -docker: - registry: registry.dev.svc.cluster.local:5000 - config: # Override the name and fullname of the chart nameOverride: "" @@ -72,28 +69,32 @@ startupProbe: failureThreshold: 6 successThreshold: 1 -tls: - enabled: false - issuer: "letsencrypt-prod" - duration: "2160h" - renewBefore: "360h" - annotations: {} - labels: {} +# ------------------------------------------------------------------------------------------------- +# Global ConfigMap +# ------------------------------------------------------------------------------------------------- -externalService: +commonConfigMap: + enabled: true + data: + AWS_S3_BUCKET_NAME: "fuel-streams-staging" + AWS_ENDPOINT_URL: "https://s3.us-east-1.amazonaws.com" + AWS_REGION: "us-east-1" + AWS_S3_ENABLED: "true" + USE_METRICS: "false" + NATS_URL: "fuel-streams-nats-core:4222" + NATS_PUBLISHER_URL: "fuel-streams-nats-publisher:4222" + NATS_SYSTEM_USER: "sys" + NATS_SYSTEM_PASS: "sys" + NATS_ADMIN_USER: "admin" + NATS_ADMIN_PASS: "admin" + NATS_PUBLIC_USER: "default_user" + NATS_PUBLIC_PASS: "" + +# This is a secret that is used for local development +# It is not used in production +localSecrets: enabled: false - dns: "streams.svc.cluster.local" - labels: {} - annotations: {} - ports: - - name: websocket - port: 8443 - targetPort: websocket - protocol: TCP - - name: webserver - port: 8082 - targetPort: http - protocol: TCP + data: {} # ------------------------------------------------------------------------------------------------- # Monitoring @@ -109,15 +110,18 @@ monitoring: publisher: enabled: true network: mainnet + port: 8080 image: - repository: fuel-streams-publisher - pullPolicy: Never - tag: "latest" + repository: ghcr.io/fuellabs/sv-publisher + pullPolicy: Always + tag: latest + args: [] - service: - type: ClusterIP - port: 8080 + # You can override the env variables for the container here + # using a map or an array of key-value pairs + env: [] + envFrom: [] prometheus: enabled: false @@ -126,8 +130,8 @@ publisher: storage: name: rocks-db - size: 10Gi - storageClass: standard + size: 500Gi + storageClass: "gp3-generic" accessMode: ReadWriteOnce mountPath: /mnt/db @@ -154,48 +158,102 @@ publisher: maxReplicas: 3 targetCPUUtilizationPercentage: 80 targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + percentValue: 100 + periodSeconds: 15 + scaleUp: + stabilizationWindowSeconds: 0 + percentValue: 100 + podValue: 4 + periodSeconds: 15 + +# ------------------------------------------------------------------------------------------------- +# Consumer configuration +# ------------------------------------------------------------------------------------------------- + +consumer: + enabled: true + port: 8080 + image: + repository: ghcr.io/fuellabs/sv-consumer + pullPolicy: Always + tag: latest + args: [] + + # You can override the env variables for the container here + # using a map or an array of key-value pairs + env: [] + envFrom: [] + + config: + replicaCount: 3 + labels: {} + annotations: {} + podAnnotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + imagePullSecrets: [] + ports: [] + livenessProbe: {} + readinessProbe: {} + startupProbe: {} + securityContext: {} + containerSecurityContext: {} + resources: {} - env: - PUBLISHER_MAX_THREADS: "32" - NATS_URL: "fuel-streams-nats-publisher:4222" - HISTORICAL: "true" - - # Additional environment variables with complex structures - # extraEnv: [] - # - name: RELAYER - # valueFrom: - # secretKeyRef: - # name: fuel-streams-publisher - # key: RELAYER - # - name: KEYPAIR - # valueFrom: - # secretKeyRef: - # name: fuel-streams-publisher - # key: KEYPAIR - # - name: NATS_ADMIN_PASS - # valueFrom: - # secretKeyRef: - # name: fuel-streams-publisher - # key: NATS_ADMIN_PASS - # Optional: Bulk environment references - # envFrom: {} - # - configMapRef: - # name: additional-config - # - secretRef: - # name: additional-secrets + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + percentValue: 100 + periodSeconds: 15 + scaleUp: + stabilizationWindowSeconds: 0 + percentValue: 100 + podValue: 4 + periodSeconds: 15 # ------------------------------------------------------------------------------------------------- # WebServer configuration # ------------------------------------------------------------------------------------------------- webserver: - enabled: true - port: 9003 + enabled: false + network: mainnet image: - repository: fuel-streams-ws - pullPolicy: Never - tag: "latest" + repository: ghcr.io/fuellabs/sv-webserver + pullPolicy: Always + tag: latest + + service: + enabled: true + type: LoadBalancer + port: 9003 + dns: "stream-staging.fuel.network" + annotations: {} + labels: {} + + tls: + enabled: true + issuer: "letsencrypt-prod" + duration: "2160h" + renewBefore: "360h" + annotations: {} + labels: {} + + # You can override the env variables for the container here + # using a map or an array of key-value pairs + env: [] + envFrom: [] config: replicaCount: 1 @@ -212,64 +270,25 @@ webserver: startupProbe: {} securityContext: {} containerSecurityContext: {} - resources: - requests: - cpu: 100m - memory: 128Mi - limits: - cpu: 500m - memory: 512Mi + resources: {} autoscaling: enabled: false minReplicas: 1 - maxReplicas: 3 + maxReplicas: 5 targetCPUUtilizationPercentage: 80 targetMemoryUtilizationPercentage: 80 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + percentValue: 100 + periodSeconds: 15 + scaleUp: + stabilizationWindowSeconds: 0 + percentValue: 100 + podValue: 4 + periodSeconds: 15 - env: - STREAMER_MAX_WORKERS: "10" - API_PORT: 9003 - JWT_AUTH_SECRET: "secret" - USE_ELASTIC_LOGGING: false - USE_METRICS: true - AWS_S3_ENABLED: true - NATS_URL: "fuel-streams-nats-publisher:4222" - NETWORK: testnet - - # Additional environment variables with complex structures - # extraEnv: [] - # - name: AWS_ACCESS_KEY_ID - # valueFrom: - # secretKeyRef: - # name: fuel-streams-webserver - # key: AWS_ACCESS_KEY_ID - # - name: AWS_SECRET_ACCESS_KEY - # valueFrom: - # secretKeyRef: - # name: fuel-streams-webserver - # key: AWS_SECRET_ACCESS_KEY - # - name: AWS_REGION - # valueFrom: - # secretKeyRef: - # name: fuel-streams-webserver - # key: AWS_REGION - # - name: AWS_S3_BUCKET_NAME - # valueFrom: - # secretKeyRef: - # name: fuel-streams-webserver - # key: AWS_S3_BUCKET_NAME - # - name: AWS_ENDPOINT_URL - # valueFrom: - # secretKeyRef: - # name: fuel-streams-webserver - # key: AWS_ENDPOINT_URL - # Optional: Bulk environment references - # envFrom: {} - # - configMapRef: - # name: additional-config - # - secretRef: - # name: additional-secrets # ------------------------------------------------------------------------------------------------- # NATS Core configuration # ------------------------------------------------------------------------------------------------- @@ -295,17 +314,14 @@ nats-core: container: image: repository: nats - tag: 2.10.22-alpine + tag: 2.10.24-alpine env: - GOMEMLIMIT: 8GiB - startupProbe: - initialDelaySeconds: 60 - periodSeconds: 10 - failureThreshold: 1080 - resources: - requests: - cpu: 8 - memory: 8Gi + GOMEMLIMIT: 7GiB + merge: + resources: + requests: + cpu: 2 + memory: 8Gi service: enabled: true @@ -314,6 +330,8 @@ nats-core: enabled: true cluster: enabled: true + websocket: + enabled: true leafnodes: enabled: true monitor: @@ -323,145 +341,46 @@ nats-core: cluster: enabled: true port: 6222 - replicas: 3 + replicas: 5 routeURLs: useFQDN: true - jetstream: - enabled: true - fileStore: - dir: /data - pvc: - enabled: true - size: 500Gi - storageClassName: standard - - leafnodes: + websocket: enabled: true - port: 7422 - - monitor: - enabled: false - port: 8222 - - merge: - jetstream: - max_file_store: << 500GiB >> - max_memory_store: << 7168MiB >> - domain: "central" - max_payload: << 8MiB >> - -# ------------------------------------------------------------------------------------------------- -# NATS Client configuration -# ------------------------------------------------------------------------------------------------- - -nats-client: - enabled: true - - global: - labels: - app.kubernetes.io/service: external-service - - natsBox: - enabled: false - - promExporter: - enabled: false - - container: - image: - repository: nats - tag: 2.10.22-alpine - env: - GOMEMLIMIT: 2GiB - startupProbe: - initialDelaySeconds: 60 - periodSeconds: 10 - failureThreshold: 1080 - resources: - requests: - cpu: 2 - memory: 2Gi - - service: - enabled: true - ports: - nats: - enabled: true - websocket: - enabled: true - monitor: - enabled: false - - statefulSet: - merge: - spec: - replicas: 2 - - podTemplate: - topologySpreadConstraints: - kubernetes.io/hostname: - maxSkew: 1 - whenUnsatisfiable: DoNotSchedule + port: 8443 - config: jetstream: enabled: true fileStore: dir: /data pvc: enabled: true - size: 20Gi - storageClassName: standard - merge: - domain: "client" + size: 2000Gi + storageClassName: "gp3-generic" leafnodes: enabled: true port: 7422 - remotes: - - url: "nats://nats-core:7422" - - websocket: - enabled: true - port: 8443 - merge: - no_tls: true - no_auth_user: default_user monitor: enabled: false port: 8222 merge: + max_payload: << 32MiB >> jetstream: - max_file_store: << 20GiB >> - max_memory_store: << 2048MiB >> - max_payload: << 8MiB >> - accounts: - USERS: - jetstream: enabled - users: - - user: default_user - permissions: - subscribe: ">" - publish: - deny: - - "*.by_id.>" - - "*.blocks.>" - - "*.transactions.>" - - "*.inputs.>" - - "*.outputs.>" - - "*.receipts.>" - - "*.logs.>" - - "*.utxos.>" - - "$JS.API.STREAM.CREATE.>" - - "$JS.API.STREAM.UPDATE.>" - - "$JS.API.STREAM.DELETE.>" - - "$JS.API.STREAM.PURGE.>" - - "$JS.API.STREAM.RESTORE.>" - - "$JS.API.STREAM.MSG.DELETE.>" - - "$JS.API.CONSUMER.DURABLE.CREATE.>" + domain: CORE + sync_interval: << 30s >> + max_outstanding_catchup: << 512MiB >> + max_file_store: << 2000GiB >> + max_memory_store: << 7GiB >> + system_account: SYS + $include: auth.conf + + configMap: + merge: + $tplYaml: | + {{- include "nats-accounts" . | nindent 8 }} # ------------------------------------------------------------------------------------------------- # NATS Publisher configuration @@ -476,40 +395,33 @@ nats-publisher: promExporter: enabled: false + statefulSet: + merge: + spec: + replicas: 5 + container: image: repository: nats - tag: 2.10.22-alpine + tag: 2.10.24-alpine env: - GOMEMLIMIT: 3GiB - startupProbe: - initialDelaySeconds: 60 - periodSeconds: 10 - failureThreshold: 1080 - resources: - requests: - cpu: 4 - memory: 4Gi + GOMEMLIMIT: 7GiB + merge: + resources: + requests: + cpu: 2 + memory: 8Gi service: enabled: true ports: nats: enabled: true + leafnodes: + enabled: true monitor: enabled: false - statefulSet: - merge: - spec: - replicas: 3 - - podTemplate: - topologySpreadConstraints: - kubernetes.io/hostname: - maxSkew: 1 - whenUnsatisfiable: DoNotSchedule - config: jetstream: enabled: true @@ -517,23 +429,33 @@ nats-publisher: dir: /data pvc: enabled: true - size: 50Gi - storageClassName: standard - merge: - domain: "publisher" + size: 100Gi + storageClassName: "gp3-generic" leafnodes: enabled: true port: 7422 - remotes: - - url: "nats://nats-core:7422" + merge: + remotes: + - urls: ["nats-leaf://admin:admin@fuel-streams-nats-core:7422"] + account: ADMIN monitor: enabled: false port: 8222 merge: + max_payload: << 32MiB >> jetstream: - max_file_store: << 50GiB >> - max_memory_store: << 3072MiB >> - max_payload: << 8MiB >> + domain: PUBLISHER + sync_interval: << 30s >> + max_outstanding_catchup: << 512MiB >> + max_file_store: << 100GiB >> + max_memory_store: << 7GiB >> + system_account: SYS + $include: auth.conf + + configMap: + merge: + $tplYaml: | + {{- include "nats-accounts" . | nindent 8 }} diff --git a/cluster/docker/docker-compose.yml b/cluster/docker/docker-compose.yml index 1966f3c3..34b76756 100644 --- a/cluster/docker/docker-compose.yml +++ b/cluster/docker/docker-compose.yml @@ -1,38 +1,66 @@ services: - nats: + nats-core: + profiles: + - all + - nats image: nats:latest - container_name: nats + container_name: nats-core restart: always ports: - 4222:4222 - - 8222:8222 - - 8443:8443 volumes: - - ./nats.conf:/etc/nats/nats.conf + - ./nats-config/core.conf:/etc/nats/nats.conf + - ./nats-config/accounts.conf:/etc/nats/accounts.conf command: - - -m - - "8222" - - --name=fuel-streams-publisher-server + - --name=fuel-streams-nats-core - --js - --config=/etc/nats/nats.conf - -D env_file: - ./../../.env + + nats-publisher: + profiles: + - all + - nats + image: nats:latest + container_name: nats-publisher + restart: always + ports: + - 4333:4222 + volumes: + - ./nats-config/publisher.conf:/etc/nats/nats.conf + - ./nats-config/accounts.conf:/etc/nats/accounts.conf + command: + - --name=fuel-streams-nats-publisher + - --js + - --config=/etc/nats/nats.conf + - -D + env_file: + - ./../../.env + depends_on: + - nats-core + localstack: + profiles: + - all + - localstack image: localstack/localstack:latest container_name: localstack restart: always ports: - - "4566:4566" # LocalStack main gateway port - - "4572:4572" # S3 service port (optional) + - "4566:4566" # LocalStack main gateway port + - "4572:4572" # S3 service port (optional) environment: - - SERVICES=s3 # Enable just S3 service + - SERVICES=s3 # Enable just S3 service - DEBUG=1 - - AWS_ACCESS_KEY_ID=test - - AWS_SECRET_ACCESS_KEY=test - - DEFAULT_REGION=us-east-1 - - DEFAULT_BUCKETS=fuel-streams-local + - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} + - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} + - DEFAULT_REGION=${AWS_REGION} + - DEFAULT_BUCKETS=${AWS_S3_BUCKET_NAME} volumes: - ./localstack-data:/var/lib/localstack - /var/run/docker.sock:/var/run/docker.sock - ./init-localstack.sh:/etc/localstack/init/ready.d/init-localstack.sh + env_file: + - ./../../.env diff --git a/cluster/docker/fuel-streams-publisher.Dockerfile b/cluster/docker/fuel-streams-publisher.Dockerfile deleted file mode 100644 index a1b7f00f..00000000 --- a/cluster/docker/fuel-streams-publisher.Dockerfile +++ /dev/null @@ -1,130 +0,0 @@ -# Stage 1: Build -FROM --platform=$BUILDPLATFORM tonistiigi/xx AS xx -FROM --platform=$BUILDPLATFORM rust:1.81.0 AS chef - -ARG TARGETPLATFORM -RUN cargo install cargo-chef && rustup target add wasm32-unknown-unknown -WORKDIR /build/ - -COPY --from=xx / / - -# hadolint ignore=DL3008 -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - lld \ - clang \ - libclang-dev \ - && xx-apt-get update \ - && xx-apt-get install -y libc6-dev g++ binutils \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - - -FROM chef AS planner -ENV CARGO_NET_GIT_FETCH_WITH_CLI=true -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - - -FROM chef AS builder -ARG DEBUG_SYMBOLS=false -ENV CARGO_NET_GIT_FETCH_WITH_CLI=true -ENV CARGO_PROFILE_RELEASE_DEBUG=$DEBUG_SYMBOLS -COPY --from=planner /build/recipe.json recipe.json -RUN echo $CARGO_PROFILE_RELEASE_DEBUG -# Build our project dependencies, not our application! -RUN \ - --mount=type=cache,target=/usr/local/cargo/registry/index \ - --mount=type=cache,target=/usr/local/cargo/registry/cache \ - --mount=type=cache,target=/usr/local/cargo/git/db \ - --mount=type=cache,target=/build/target \ - xx-cargo chef cook --release --no-default-features -p fuel-streams-publisher --recipe-path recipe.json -# Up to this point, if our dependency tree stays the same, -# all layers should be cached. -COPY . . -# build application -RUN \ - --mount=type=cache,target=/usr/local/cargo/registry/index \ - --mount=type=cache,target=/usr/local/cargo/registry/cache \ - --mount=type=cache,target=/usr/local/cargo/git/db \ - --mount=type=cache,target=/build/target \ - xx-cargo build --release --no-default-features -p fuel-streams-publisher \ - && xx-verify ./target/$(xx-cargo --print-target-triple)/release/fuel-streams-publisher \ - && cp ./target/$(xx-cargo --print-target-triple)/release/fuel-streams-publisher /root/fuel-streams-publisher \ - && cp ./target/$(xx-cargo --print-target-triple)/release/fuel-streams-publisher.d /root/fuel-streams-publisher.d - -# Stage 2: Run -FROM ubuntu:22.04 AS run - -ARG IP=0.0.0.0 -ARG PORT=4000 -ARG TELEMETRY_PORT=8080 -ARG P2P_PORT=30333 -ARG DB_PATH=/mnt/db/ -ARG POA_INSTANT=false -ARG RELAYER_LOG_PAGE_SIZE=2000 -ARG SERVICE_NAME="NATS Publisher Node" -ARG SYNC_HEADER_BATCH_SIZE=100 -ARG RESERVED_NODES=/dns4/p2p-testnet.fuel.network/tcp/30333/p2p/16Uiu2HAmDxoChB7AheKNvCVpD4PHJwuDGn8rifMBEHmEynGHvHrf - -ENV IP=$IP -ENV PORT=$PORT -ENV TELEMETRY_PORT=$TELEMETRY_PORT -ENV DB_PATH=$DB_PATH -ENV POA_INSTANT=false -ENV RELAYER_LOG_PAGE_SIZE=$RELAYER_LOG_PAGE_SIZE -ENV SERVICE_NAME=$SERVICE_NAME -ENV SYNC_HEADER_BATCH_SIZE=$SYNC_HEADER_BATCH_SIZE -ENV RESERVED_NODES=$RESERVED_NODES -ENV HISTORICAL=false - -ENV KEYPAIR= -ENV RELAYER= -ENV RELAYER_V2_LISTENING_CONTRACTS= -ENV RELAYER_DA_DEPLOY_HEIGHT= -ENV CHAIN_CONFIG= -ENV NATS_URL= -ENV USE_METRICS= -ENV USE_ELASTIC_LOGGING= - -WORKDIR /usr/src - -RUN apt-get update -y \ - && apt-get install -y --no-install-recommends ca-certificates curl \ - # Clean up - && apt-get autoremove -y \ - && apt-get clean -y \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /root/fuel-streams-publisher . -COPY --from=builder /root/fuel-streams-publisher.d . - -COPY /cluster/chain-config ./chain-config -EXPOSE ${PORT} -EXPOSE ${P2P_PORT} -EXPOSE ${TELEMETRY_PORT} - -# https://stackoverflow.com/a/44671685 -# https://stackoverflow.com/a/40454758 -# hadolint ignore=DL3025 -CMD exec ./fuel-streams-publisher \ - --service-name "${SERVICE_NAME}" \ - --keypair $KEYPAIR \ - --relayer $RELAYER \ - --ip $IP \ - --port $PORT \ - --telemetry-port $TELEMETRY_PORT \ - --peering-port $P2P_PORT \ - --db-path "${DB_PATH}" \ - --utxo-validation \ - --poa-instant $POA_INSTANT \ - --snapshot ./chain-config/${CHAIN_CONFIG} \ - --enable-p2p \ - --reserved-nodes $RESERVED_NODES \ - --sync-header-batch-size $SYNC_HEADER_BATCH_SIZE \ - --enable-relayer \ - --relayer-v2-listening-contracts $RELAYER_V2_LISTENING_CONTRACTS \ - --relayer-da-deploy-height $RELAYER_DA_DEPLOY_HEIGHT \ - --relayer-log-page-size $RELAYER_LOG_PAGE_SIZE \ - --sync-block-stream-buffer-size 30 \ - $([ "$HISTORICAL" = "true" ] && echo "--historical") diff --git a/cluster/docker/init-localstack.sh b/cluster/docker/init-localstack.sh index 9ad8d0d7..befa0901 100755 --- a/cluster/docker/init-localstack.sh +++ b/cluster/docker/init-localstack.sh @@ -3,5 +3,6 @@ set -e echo "Creating S3 bucket in LocalStack..." -awslocal s3 mb s3://fuel-streams-test -echo "Bucket created: fuel-streams-test" +BUCKET_NAME=${AWS_S3_BUCKET_NAME:-fuel-streams-test} +awslocal s3 mb "s3://${BUCKET_NAME}" +echo "Bucket created: ${BUCKET_NAME}" diff --git a/cluster/docker/nats-config/accounts.conf b/cluster/docker/nats-config/accounts.conf new file mode 100644 index 00000000..59a035c8 --- /dev/null +++ b/cluster/docker/nats-config/accounts.conf @@ -0,0 +1,15 @@ +accounts { + SYS: { + users: [{user: $NATS_SYSTEM_USER, password: $NATS_SYSTEM_PASS}] + }, + ADMIN: { + users: [{user: $NATS_ADMIN_USER, password: $NATS_ADMIN_PASS}] + jetstream: enabled + }, + PUBLIC: { + users: [{user: $NATS_PUBLIC_USER, password: $NATS_PUBLIC_PASS}] + jetstream: enabled + } +} + +system_account: SYS diff --git a/cluster/docker/nats-config/client.conf b/cluster/docker/nats-config/client.conf new file mode 100644 index 00000000..08d9e2b3 --- /dev/null +++ b/cluster/docker/nats-config/client.conf @@ -0,0 +1,18 @@ +port: 4222 +server_name: client-server + +jetstream { + store_dir: "./data/store_client" + domain: CLIENT +} + +leafnodes { + remotes: [ + { + urls: ["nats://admin:admin@nats-core:7422"] + account: "ADMIN" + } + ] +} + +include ./accounts.conf diff --git a/cluster/docker/nats-config/core.conf b/cluster/docker/nats-config/core.conf new file mode 100644 index 00000000..2e8f11f4 --- /dev/null +++ b/cluster/docker/nats-config/core.conf @@ -0,0 +1,13 @@ +port: 4222 +server_name: core-server + +jetstream { + store_dir: "./data/core" + domain: CORE +} + +leafnodes { + port: 7422 +} + +include ./accounts.conf diff --git a/cluster/docker/nats-config/publisher.conf b/cluster/docker/nats-config/publisher.conf new file mode 100644 index 00000000..52de2113 --- /dev/null +++ b/cluster/docker/nats-config/publisher.conf @@ -0,0 +1,18 @@ +port: 4222 +server_name: leaf-server + +jetstream { + store_dir: "./data/store_leaf" + domain: LEAF +} + +leafnodes { + remotes: [ + { + urls: ["nats://admin:admin@nats-core:7422"] + account: "ADMIN" + } + ] +} + +include ./accounts.conf diff --git a/cluster/docker/nats.conf b/cluster/docker/nats.conf deleted file mode 100644 index 783a0450..00000000 --- a/cluster/docker/nats.conf +++ /dev/null @@ -1,52 +0,0 @@ -port = 4222 -http_port = 8222 -server_name = "fuel-nats-server" - -authorization = { - timeout = 5 - ADMIN = { - publish = ">" - subscribe = ">" - } - default_permissions = { - subscribe = ">" - publish = { - deny = [ - "*.blocks.>", - "*.transactions.>", - "*.inputs.>", - "*.outputs.>", - "*.receipts.>", - "*.logs.>", - "*.utxos.>", - "$JS.API.STREAM.CREATE.>", - "$JS.API.STREAM.UPDATE.>", - "$JS.API.STREAM.DELETE.>", - "$JS.API.STREAM.PURGE.>", - "$JS.API.STREAM.RESTORE.>", - "$JS.API.STREAM.MSG.DELETE.>", - "$JS.API.CONSUMER.DURABLE.CREATE.>", - ] - } - } - users = [ - { user = admin, password = $NATS_ADMIN_PASS, permissions = $ADMIN }, - { user = default_user } - ] -} - -jetstream = { - max_file_store = 21474836480 -} - -max_payload = 8388608 - -websocket = { - port = 8443 - no_tls = true - same_origin = false - allowed_origins = [] - compression = false - handshake_timeout = "10s" - no_auth_user = default_user -} diff --git a/cluster/docker/fuel-streams-ws.Dockerfile b/cluster/docker/sv-consumer.Dockerfile similarity index 67% rename from cluster/docker/fuel-streams-ws.Dockerfile rename to cluster/docker/sv-consumer.Dockerfile index 5e4a7a8a..5b20d1cd 100644 --- a/cluster/docker/fuel-streams-ws.Dockerfile +++ b/cluster/docker/sv-consumer.Dockerfile @@ -38,7 +38,7 @@ RUN \ --mount=type=cache,target=/usr/local/cargo/registry/cache \ --mount=type=cache,target=/usr/local/cargo/git/db \ --mount=type=cache,target=/build/target \ - xx-cargo chef cook --release --no-default-features -p fuel-streams-ws --recipe-path recipe.json + xx-cargo chef cook --release --no-default-features -p sv-consumer --recipe-path recipe.json # Up to this point, if our dependency tree stays the same, # all layers should be cached. COPY . . @@ -48,29 +48,16 @@ RUN \ --mount=type=cache,target=/usr/local/cargo/registry/cache \ --mount=type=cache,target=/usr/local/cargo/git/db \ --mount=type=cache,target=/build/target \ - xx-cargo build --release --no-default-features -p fuel-streams-ws \ - && xx-verify ./target/$(xx-cargo --print-target-triple)/release/fuel-streams-ws \ - && cp ./target/$(xx-cargo --print-target-triple)/release/fuel-streams-ws /root/fuel-streams-ws \ - && cp ./target/$(xx-cargo --print-target-triple)/release/fuel-streams-ws.d /root/fuel-streams-ws.d + xx-cargo build --release --no-default-features -p sv-consumer \ + && xx-verify ./target/$(xx-cargo --print-target-triple)/release/sv-consumer \ + && cp ./target/$(xx-cargo --print-target-triple)/release/sv-consumer /root/sv-consumer \ + && cp ./target/$(xx-cargo --print-target-triple)/release/sv-consumer.d /root/sv-consumer.d # Stage 2: Run FROM ubuntu:22.04 AS run -ARG API_PORT=9003 - -ENV API_PORT=$API_PORT -ENV NATS_URL= -ENV NETWORK= -ENV USE_METRICS= -ENV USE_ELASTIC_LOGGING= -ENV AWS_S3_ENABLED= -ENV AWS_ACCESS_KEY_ID= -ENV AWS_SECRET_ACCESS_KEY= -ENV AWS_REGION= -ENV AWS_ENDPOINT_URL= -ENV AWS_S3_BUCKET_NAME= -ENV JWT_AUTH_SECRET= - +ARG PORT=8080 +ENV PORT=$PORT WORKDIR /usr/src RUN apt-get update -y \ @@ -80,12 +67,10 @@ RUN apt-get update -y \ && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /root/fuel-streams-ws . -COPY --from=builder /root/fuel-streams-ws.d . +COPY --from=builder /root/sv-consumer . +COPY --from=builder /root/sv-consumer.d . -EXPOSE ${API_PORT} +EXPOSE ${PORT} -# https://stackoverflow.com/a/44671685 -# https://stackoverflow.com/a/40454758 -# hadolint ignore=DL3025 -CMD exec ./fuel-streams-ws +WORKDIR /usr/src +CMD ["./sv-consumer"] diff --git a/cluster/docker/sv-publisher.Dockerfile b/cluster/docker/sv-publisher.Dockerfile new file mode 100644 index 00000000..de9b042f --- /dev/null +++ b/cluster/docker/sv-publisher.Dockerfile @@ -0,0 +1,81 @@ +# Stage 1: Build +FROM --platform=$BUILDPLATFORM tonistiigi/xx AS xx +FROM --platform=$BUILDPLATFORM rust:1.81.0 AS chef + +# Add package name as build argument +ARG TARGETPLATFORM + +RUN cargo install cargo-chef && rustup target add wasm32-unknown-unknown +WORKDIR /build/ + +COPY --from=xx / / + +# hadolint ignore=DL3008 +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + lld \ + clang \ + libclang-dev \ + && xx-apt-get update \ + && xx-apt-get install -y libc6-dev g++ binutils \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +FROM chef AS planner +ENV CARGO_NET_GIT_FETCH_WITH_CLI=true +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM chef AS builder +ARG PACKAGE_NAME +ARG DEBUG_SYMBOLS=false +ENV CARGO_NET_GIT_FETCH_WITH_CLI=true +ENV CARGO_PROFILE_RELEASE_DEBUG=$DEBUG_SYMBOLS +COPY --from=planner /build/recipe.json recipe.json +RUN echo $CARGO_PROFILE_RELEASE_DEBUG +# Build our project dependencies, not our application! +RUN \ + --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/build/target \ + xx-cargo chef cook --release --no-default-features -p sv-publisher --recipe-path recipe.json +# Up to this point, if our dependency tree stays the same, +# all layers should be cached. +COPY . . +# build application +RUN \ + --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/build/target \ + xx-cargo build --release --no-default-features -p sv-publisher \ + && xx-verify ./target/$(xx-cargo --print-target-triple)/release/sv-publisher \ + && cp ./target/$(xx-cargo --print-target-triple)/release/sv-publisher /root/sv-publisher \ + && cp ./target/$(xx-cargo --print-target-triple)/release/sv-publisher.d /root/sv-publisher.d + +# Stage 2: Run +FROM ubuntu:22.04 AS run + +ARG PORT=4000 +ARG P2P_PORT=30333 +ARG DB_PATH=/mnt/db +ENV PORT="${PORT}" + +WORKDIR /usr/src + +RUN apt-get update -y \ + && apt-get install -y --no-install-recommends ca-certificates curl \ + && apt-get autoremove -y \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /root/sv-publisher . +COPY --from=builder /root/sv-publisher.d . + +COPY /cluster/chain-config ./chain-config +EXPOSE ${PORT} +EXPOSE ${P2P_PORT} + +WORKDIR /usr/src +CMD ["./sv-publisher", "--port", "${PORT}", "--peering-port", "${P2P_PORT}", "--db-path", "${DB_PATH}"] diff --git a/cluster/docker/sv-webserver.Dockerfile b/cluster/docker/sv-webserver.Dockerfile new file mode 100644 index 00000000..75140bd4 --- /dev/null +++ b/cluster/docker/sv-webserver.Dockerfile @@ -0,0 +1,75 @@ +# Stage 1: Build +FROM --platform=$BUILDPLATFORM tonistiigi/xx AS xx +FROM --platform=$BUILDPLATFORM rust:1.81.0 AS chef + +ARG TARGETPLATFORM +RUN cargo install cargo-chef && rustup target add wasm32-unknown-unknown +WORKDIR /build/ + +COPY --from=xx / / + +# hadolint ignore=DL3008 +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + lld \ + clang \ + libclang-dev \ + && xx-apt-get update \ + && xx-apt-get install -y libc6-dev g++ binutils \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + + +FROM chef AS planner +ENV CARGO_NET_GIT_FETCH_WITH_CLI=true +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + + +FROM chef AS builder +ARG DEBUG_SYMBOLS=false +ENV CARGO_NET_GIT_FETCH_WITH_CLI=true +ENV CARGO_PROFILE_RELEASE_DEBUG=$DEBUG_SYMBOLS +COPY --from=planner /build/recipe.json recipe.json +RUN echo $CARGO_PROFILE_RELEASE_DEBUG +# Build our project dependencies, not our application! +RUN \ + --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/build/target \ + xx-cargo chef cook --release --no-default-features -p sv-webserver --recipe-path recipe.json +# Up to this point, if our dependency tree stays the same, +# all layers should be cached. +COPY . . +# build application +RUN \ + --mount=type=cache,target=/usr/local/cargo/registry/index \ + --mount=type=cache,target=/usr/local/cargo/registry/cache \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/build/target \ + xx-cargo build --release --no-default-features -p sv-webserver \ + && xx-verify ./target/$(xx-cargo --print-target-triple)/release/sv-webserver \ + && cp ./target/$(xx-cargo --print-target-triple)/release/sv-webserver /root/sv-webserver \ + && cp ./target/$(xx-cargo --print-target-triple)/release/sv-webserver.d /root/sv-webserver.d + +# Stage 2: Run +FROM ubuntu:22.04 AS run + +ARG PORT=9003 +ENV PORT=$PORT + +WORKDIR /usr/src + +RUN apt-get update -y \ + && apt-get install -y --no-install-recommends ca-certificates curl \ + # Clean up + && apt-get autoremove -y \ + && apt-get clean -y \ + && rm -rf /var/lib/apt/lists/* + +COPY --from=builder /root/sv-webserver . +COPY --from=builder /root/sv-webserver.d . + +EXPOSE ${PORT} +CMD ["./sv-webserver"] diff --git a/cluster/scripts/build_docker.sh b/cluster/scripts/build_docker.sh new file mode 100755 index 00000000..0a5eebb7 --- /dev/null +++ b/cluster/scripts/build_docker.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash + +# Remove the -g flag from set +set -euo pipefail + +# Help/Usage function +usage() { + cat << EOF +Usage: $(basename "$0") [OPTIONS] + +Build a Docker image using specified parameters. + +Options: + --dockerfile Path to Dockerfile (default: cluster/docker/sv-publisher.Dockerfile) + --build-args Additional Docker build arguments (optional) + -h, --help Show this help message + +Environment variables: + TAG Docker image tag (default: latest) + DOCKER_HOST Docker daemon socket (optional) + +Examples: + $(basename "$0") --dockerfile ./Dockerfile + $(basename "$0") --dockerfile ./Dockerfile --build-args "--build-arg KEY=VALUE" +EOF + exit 1 +} + +# Show help if no arguments or help flag +if [[ $# -eq 0 ]] || [[ "$1" == "-h" ]] || [[ "$1" == "--help" ]]; then + usage +fi + +# Default values +DOCKERFILE="cluster/docker/sv-publisher.Dockerfile" +IMAGE_NAME=${EXPECTED_IMAGE:-"sv-publisher"} +TAG=${EXPECTED_TAG:-"latest"} +BUILD_ARGS="" + +# Parse named arguments +while [[ $# -gt 0 ]]; do + case $1 in + --dockerfile) + DOCKERFILE="$2" + shift 2 + ;; + --build-args) + BUILD_ARGS="$2" + shift 2 + ;; + *) + echo "Error: Unknown argument '$1'" + usage + ;; + esac +done + +# Validate required files exist +if [[ ! -f "$DOCKERFILE" ]]; then + echo "Error: Dockerfile not found at $DOCKERFILE" + exit 1 +fi + +# Ensure we're using minikube's docker daemon +if [[ -n "${DOCKER_HOST:-}" ]]; then + echo "Using provided DOCKER_HOST: $DOCKER_HOST" +else + eval $(minikube docker-env) +fi + +echo "Building image ${IMAGE_NAME}:${TAG} using ${DOCKERFILE}" +echo "Build args: ${BUILD_ARGS}" + +# Build the docker image with build args if provided +if [[ -n "${BUILD_ARGS}" ]]; then + docker build ${BUILD_ARGS} -t "${IMAGE_NAME}:${TAG}" -f "${DOCKERFILE}" . +else + docker build -t "${IMAGE_NAME}:${TAG}" -f "${DOCKERFILE}" . +fi diff --git a/cluster/scripts/build_publisher.sh b/cluster/scripts/build_publisher.sh deleted file mode 100755 index 0fe43d4b..00000000 --- a/cluster/scripts/build_publisher.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Use environment variables provided by Tilt if available -IMAGE_NAME=${EXPECTED_IMAGE:-"fuel-streams-publisher"} -TAG=${EXPECTED_TAG:-"latest"} -DOCKERFILE="cluster/docker/fuel-streams-publisher.Dockerfile" - -# Ensure we're using minikube's docker daemon if not already set -if [ -z "${DOCKER_HOST:-}" ]; then - eval $(minikube docker-env) -fi - -# Build the docker image -docker build -t ${IMAGE_NAME}:${TAG} -f ${DOCKERFILE} . diff --git a/cluster/scripts/build_streamer.sh b/cluster/scripts/build_streamer.sh index 48dd5375..90d90edd 100755 --- a/cluster/scripts/build_streamer.sh +++ b/cluster/scripts/build_streamer.sh @@ -3,9 +3,9 @@ set -euo pipefail # Use environment variables provided by Tilt if available -IMAGE_NAME=${EXPECTED_IMAGE:-"fuel-streams-ws"} +IMAGE_NAME=${EXPECTED_IMAGE:-"sv-webserver"} TAG=${EXPECTED_TAG:-"latest"} -DOCKERFILE="docker/fuel-streams-ws.Dockerfile" +DOCKERFILE="docker/sv-webserver.Dockerfile" # Ensure we're using minikube's docker daemon if not already set if [ -z "${DOCKER_HOST:-}" ]; then diff --git a/cluster/scripts/gen_env_secret.sh b/cluster/scripts/gen_env_secret.sh index df0680d3..46a589f8 100755 --- a/cluster/scripts/gen_env_secret.sh +++ b/cluster/scripts/gen_env_secret.sh @@ -4,15 +4,12 @@ source .env # Generate the YAML configuration -cat <cluster/charts/fuel-streams/values-publisher-secrets.yaml -publisher: - extraEnv: - - name: RELAYER - value: "${RELAYER:-}" - - name: KEYPAIR - value: "${KEYPAIR:-}" - - name: NATS_ADMIN_PASS - value: "${NATS_ADMIN_PASS:-}" +cat << EOF > cluster/charts/fuel-streams/values-secrets.yaml +localSecrets: + enabled: true + data: + RELAYER: "${RELAYER:-}" + KEYPAIR: "${KEYPAIR:-}" EOF -echo "Generated values-publisher-secrets.yaml with environment variables" +echo "Generated values-secrets.yaml with environment variables" diff --git a/cluster/scripts/setup_k8s.sh b/cluster/scripts/setup_k8s.sh deleted file mode 100755 index bb7537e8..00000000 --- a/cluster/scripts/setup_k8s.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -[[ $DEBUG = true ]] && set -x -set -euo pipefail - -# Parse command line arguments -NAMESPACE="${1:-fuel-streams}" # Use first argument, default to "fuel-streams" if not provided - -# Configure namespace and context -echo -e "\n\033[1;33mConfiguring ${NAMESPACE} namespace and context:\033[0m" - -# Check if namespace exists -if kubectl get namespace ${NAMESPACE} &>/dev/null; then - echo "Namespace ${NAMESPACE} already exists" -else - echo "Creating namespace ${NAMESPACE}..." - kubectl create namespace ${NAMESPACE} -fi - -# Switch to minikube context -if ! kubectl config current-context | grep -q "minikube"; then - echo "Switching to minikube context..." - kubectl config use-context minikube -else - echo "Already in minikube context" -fi - -# Set namespace for current context -CURRENT_NAMESPACE=$(kubectl config view --minify --output 'jsonpath={..namespace}') -if [ "$CURRENT_NAMESPACE" != "${NAMESPACE}" ]; then - echo "Setting current namespace to ${NAMESPACE}..." - kubectl config set-context --current --cluster=minikube --namespace=${NAMESPACE} -else - echo "Context namespace is already set to ${NAMESPACE}" -fi - -# Verify context configuration -echo -e "\n\033[1;33mVerifying cluster context:\033[0m" -kubectl config get-contexts diff --git a/cluster/scripts/setup_minikube.sh b/cluster/scripts/setup_minikube.sh index 0c01374a..cf4009cd 100755 --- a/cluster/scripts/setup_minikube.sh +++ b/cluster/scripts/setup_minikube.sh @@ -4,12 +4,12 @@ set -euo pipefail # Check if minikube is installed -if ! command -v minikube &>/dev/null; then +if ! command -v minikube &> /dev/null; then echo "Installing minikube..." - sudo curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && - sudo chmod +x minikube && - sudo cp minikube /usr/local/bin/ && - sudo rm minikube + sudo curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \ + && sudo chmod +x minikube \ + && sudo cp minikube /usr/local/bin/ \ + && sudo rm minikube else echo "minikube is already installed" fi @@ -17,13 +17,9 @@ fi # Delete any existing minikube cluster minikube delete -# Default values for resources -DEFAULT_DISK_SIZE='50000mb' -DEFAULT_MEMORY='12000mb' - -# Get parameters with defaults -DISK_SIZE=${1:-$DEFAULT_DISK_SIZE} -MEMORY=${2:-$DEFAULT_MEMORY} +# Set disk and memory size, using defaults if not provided +DISK_SIZE=${1:-'50000mb'} +MEMORY=${2:-'12000mb'} # Start minikube with specified resources minikube start \ @@ -32,6 +28,56 @@ minikube start \ --memory="$MEMORY" \ --cpus 8 +minikube addons enable metrics-server +minikube addons enable registry + +# Remove existing registry proxy container if running +if docker ps -a | grep -q "minikube-registry-proxy"; then + echo "Removing existing registry proxy container..." + docker rm -f minikube-registry-proxy +fi + +# Forward minikube registry to localhost +docker run --rm -d \ + --network=host \ + --name minikube-registry-proxy \ + alpine ash -c "apk add socat && socat TCP-LISTEN:5000,reuseaddr,fork TCP:$(minikube ip):5000" + # Display minikube status echo -e "\n\033[1;33mMinikube Status:\033[0m" minikube status + +# Parse command line arguments +NAMESPACE="${1:-fuel-streams}" # Use first argument, default to "fuel-streams" if not provided + +# Configure namespace and context +echo -e "\n\033[1;33mConfiguring ${NAMESPACE} namespace and context:\033[0m" + +# Check if namespace exists +if kubectl get namespace ${NAMESPACE} &> /dev/null; then + echo "Namespace ${NAMESPACE} already exists" +else + echo "Creating namespace ${NAMESPACE}..." + kubectl create namespace ${NAMESPACE} +fi + +# Switch to minikube context +if ! kubectl config current-context | grep -q "minikube"; then + echo "Switching to minikube context..." + kubectl config use-context minikube +else + echo "Already in minikube context" +fi + +# Set namespace for current context +CURRENT_NAMESPACE=$(kubectl config view --minify --output 'jsonpath={..namespace}') +if [ "$CURRENT_NAMESPACE" != "${NAMESPACE}" ]; then + echo "Setting current namespace to ${NAMESPACE}..." + kubectl config set-context --current --cluster=minikube --namespace=${NAMESPACE} +else + echo "Context namespace is already set to ${NAMESPACE}" +fi + +# Verify context configuration +echo -e "\n\033[1;33mVerifying cluster context:\033[0m" +kubectl config get-contexts diff --git a/crates/fuel-streams-core/Cargo.toml b/crates/fuel-streams-core/Cargo.toml index a2f5acb2..9e6178fc 100644 --- a/crates/fuel-streams-core/Cargo.toml +++ b/crates/fuel-streams-core/Cargo.toml @@ -11,24 +11,40 @@ version = { workspace = true } rust-version = { workspace = true } [dependencies] +anyhow = { workspace = true } async-nats = { workspace = true } async-trait = { workspace = true } chrono = { workspace = true } displaydoc = { workspace = true } -fuel-core-client = { workspace = true } +fuel-core = { workspace = true, default-features = false, features = [ + "p2p", + "relayer", + "rocksdb", + "test-helpers", +] } +fuel-core-bin = { workspace = true, default-features = false, features = [ + "p2p", + "relayer", + "rocksdb", +] } +fuel-core-client = { workspace = true, default-features = false, features = ["std"] } fuel-core-importer = { workspace = true } -fuel-core-types = { workspace = true } +fuel-core-services = { workspace = true, default-features = false, features = ["test-helpers"] } +fuel-core-storage = { workspace = true } +fuel-core-types = { workspace = true, default-features = false, features = ["std", "serde"] } fuel-data-parser = { workspace = true } fuel-networks = { workspace = true } fuel-streams-macros = { workspace = true } +fuel-streams-nats = { workspace = true } fuel-streams-storage = { workspace = true } futures = { workspace = true } hex = { workspace = true } pretty_assertions = { workspace = true, optional = true } serde = { workspace = true } -sha2 = { workspace = true } +serde_json = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } +tracing = { workspace = true } [dev-dependencies] pretty_assertions = { workspace = true } diff --git a/crates/fuel-streams-core/README.md b/crates/fuel-streams-core/README.md index 221aca64..69684062 100644 --- a/crates/fuel-streams-core/README.md +++ b/crates/fuel-streams-core/README.md @@ -61,10 +61,10 @@ use futures::StreamExt; #[tokio::main] async fn main() -> BoxedResult<()> { // Connect to NATS server - let nats_opts = NatsClientOpts::new(FuelNetwork::Local); + let nats_opts = NatsClientOpts::admin_opts(); let nats_client = NatsClient::connect(&nats_opts).await?; - let s3_opts = S3ClientOpts::new(FuelNetwork::Local); + let s3_opts = S3ClientOpts::new(S3Env::Local, S3Role::Admin); let s3_client = Arc::new(S3Client::new(&s3_opts).await?); // Create a stream for blocks diff --git a/crates/fuel-streams-publisher/src/publisher/fuel_core_like.rs b/crates/fuel-streams-core/src/fuel_core_like.rs similarity index 91% rename from crates/fuel-streams-publisher/src/publisher/fuel_core_like.rs rename to crates/fuel-streams-core/src/fuel_core_like.rs index 921cf743..85d2458f 100644 --- a/crates/fuel-streams-publisher/src/publisher/fuel_core_like.rs +++ b/crates/fuel-streams-core/src/fuel_core_like.rs @@ -2,15 +2,8 @@ use std::{sync::Arc, time::Duration}; use fuel_core::{ combined_database::CombinedDatabase, - database::{ - database_description::{on_chain::OnChain, DatabaseHeight}, - Database, - }, + database::{database_description::on_chain::OnChain, Database}, fuel_core_graphql_api::ports::DatabaseBlocks, - state::{ - generic_database::GenericDatabase, - iterable_key_value_view::IterableKeyValueViewWrapper, - }, }; use fuel_core_bin::FuelService; use fuel_core_importer::ports::ImporterDatabase; @@ -19,17 +12,12 @@ use fuel_core_types::{ blockchain::consensus::{Consensus, Sealed}, fuel_types::BlockHeight, }; -use fuel_streams_core::types::*; use tokio::{sync::broadcast::Receiver, time::sleep}; -pub type OffchainDatabase = GenericDatabase< - IterableKeyValueViewWrapper< - fuel_core::fuel_core_graphql_api::storage::Column, - >, ->; +use crate::types::*; /// Interface for `fuel-core` related logic. -/// This was introduced to simplify mocking and testing the `fuel-streams-publisher` crate. +/// This was introduced to simplify mocking and testing the `sv-publisher` crate. #[async_trait::async_trait] pub trait FuelCoreLike: Sync + Send { async fn start(&self) -> anyhow::Result<()>; @@ -42,6 +30,7 @@ pub trait FuelCoreLike: Sync + Send { fn base_asset_id(&self) -> &FuelCoreAssetId; fn chain_id(&self) -> &FuelCoreChainId; + fn fuel_service(&self) -> &FuelService; fn database(&self) -> &CombinedDatabase; fn onchain_database(&self) -> &Database { @@ -60,14 +49,19 @@ pub trait FuelCoreLike: Sync + Send { &self, ) -> Receiver; - fn get_latest_block_height(&self) -> anyhow::Result { + fn get_latest_block_height(&self) -> anyhow::Result { Ok(self .onchain_database() .latest_block_height()? - .map(|h| h.as_u64()) + .map(|h| *h) .unwrap_or_default()) } + fn get_tx_status( + &self, + tx_id: &FuelCoreBytes32, + ) -> anyhow::Result>; + fn get_receipts( &self, tx_id: &FuelCoreBytes32, @@ -198,6 +192,10 @@ impl FuelCoreLike for FuelCore { Ok(()) } + fn fuel_service(&self) -> &FuelService { + &self.fuel_service + } + async fn stop(&self) { if matches!( self.fuel_service.state(), @@ -262,12 +260,18 @@ impl FuelCoreLike for FuelCore { .subscribe() } + fn get_tx_status( + &self, + tx_id: &FuelCoreBytes32, + ) -> anyhow::Result> { + Ok(self.offchain_database()?.get_tx_status(tx_id)?) + } + fn get_receipts( &self, tx_id: &FuelCoreBytes32, ) -> anyhow::Result>> { let receipts = self - .offchain_database()? .get_tx_status(tx_id)? .map(|status| match &status { FuelCoreTransactionStatus::Success { receipts, .. } diff --git a/crates/fuel-streams-core/src/fuel_core_types.rs b/crates/fuel-streams-core/src/fuel_core_types.rs index d42f0009..b5461427 100644 --- a/crates/fuel-streams-core/src/fuel_core_types.rs +++ b/crates/fuel-streams-core/src/fuel_core_types.rs @@ -1,5 +1,7 @@ -/// FuelCore Types -/// Allows flexilibity of aggregating and transforming them for different payload types +use fuel_core::state::{ + generic_database::GenericDatabase, + iterable_key_value_view::IterableKeyValueViewWrapper, +}; pub use fuel_core_client::client::schema::Tai64Timestamp as FuelCoreTai64Timestamp; pub use fuel_core_importer::ImporterResult as FuelCoreImporterResult; pub use fuel_core_types::{ @@ -29,7 +31,9 @@ pub use fuel_core_types::{ Input as FuelCoreInput, MessageId as FuelCoreMessageId, Output as FuelCoreOutput, + PanicInstruction as FuelCorePanicInstruction, Receipt as FuelCoreReceipt, + ScriptExecutionResult as FuelCoreScriptExecutionResult, StorageSlot as FuelCoreStorageSlot, Transaction as FuelCoreTransaction, TxId as FuelCoreTxId, @@ -52,3 +56,9 @@ pub use fuel_core_types::{ }, tai64::Tai64 as FuelCoreTai64, }; + +pub type OffchainDatabase = GenericDatabase< + IterableKeyValueViewWrapper< + fuel_core::fuel_core_graphql_api::storage::Column, + >, +>; diff --git a/crates/fuel-streams-core/src/inputs/types.rs b/crates/fuel-streams-core/src/inputs/types.rs index fcd5b01d..91ff8c1b 100644 --- a/crates/fuel-streams-core/src/inputs/types.rs +++ b/crates/fuel-streams-core/src/inputs/types.rs @@ -1,3 +1,5 @@ +use fuel_core_types::fuel_crypto; + use crate::types::*; // Input enum @@ -171,3 +173,24 @@ pub struct InputMessage { pub sender: Address, pub witness_index: u16, } + +impl InputMessage { + pub fn compute_message_id(&self) -> MessageId { + let hasher = fuel_crypto::Hasher::default() + .chain(self.sender.as_ref()) + .chain(self.recipient.as_ref()) + .chain(self.nonce.as_ref()) + .chain(self.amount.to_be_bytes()) + .chain(self.data.as_ref()); + + (*hasher.finalize()).into() + } + + pub fn computed_utxo_id(&self) -> UtxoId { + let message_id = self.compute_message_id(); + UtxoId { + tx_id: Bytes32::from(message_id), + output_index: 0, + } + } +} diff --git a/crates/fuel-streams-core/src/lib.rs b/crates/fuel-streams-core/src/lib.rs index 11fe85b5..ba8b30f8 100644 --- a/crates/fuel-streams-core/src/lib.rs +++ b/crates/fuel-streams-core/src/lib.rs @@ -9,7 +9,7 @@ pub mod transactions; pub mod utxos; pub mod nats { - pub use fuel_streams_storage::nats::*; + pub use fuel_streams_nats::*; } pub mod s3 { @@ -20,6 +20,7 @@ pub mod stream; pub mod subjects; +pub mod fuel_core_like; mod fuel_core_types; mod primitive_types; pub mod types; @@ -28,7 +29,15 @@ pub use stream::*; pub mod prelude { pub use fuel_networks::*; + #[allow(unused_imports)] pub use fuel_streams_macros::subject::*; - pub use crate::{nats::*, s3::*, stream::*, subjects::*, types::*}; + pub use crate::{ + fuel_core_like::*, + nats::*, + s3::*, + stream::*, + subjects::*, + types::*, + }; } diff --git a/crates/fuel-streams-core/src/logs/types.rs b/crates/fuel-streams-core/src/logs/types.rs index 4fdb7473..927216d4 100644 --- a/crates/fuel-streams-core/src/logs/types.rs +++ b/crates/fuel-streams-core/src/logs/types.rs @@ -27,94 +27,30 @@ pub enum Log { }, } -impl From for Log { - fn from(value: FuelCoreReceipt) -> Self { +impl From for Log { + fn from(value: Receipt) -> Self { match value { - FuelCoreReceipt::Log { - id, - ra, - rb, - rc, - rd, - pc, - is, - } => Log::WithoutData { - id: id.into(), - ra, - rb, - rc, - rd, - pc, - is, + Receipt::Log(log) => Log::WithoutData { + id: log.id, + ra: log.ra, + rb: log.rb, + rc: log.rc, + rd: log.rd, + pc: log.pc, + is: log.is, }, - FuelCoreReceipt::LogData { - id, - ra, - rb, - ptr, - len, - digest, - pc, - is, - data, - } => Log::WithData { - id: id.into(), - ra, - rb, - ptr, - len, - digest: digest.into(), - pc, - is, - data, + Receipt::LogData(log) => Log::WithData { + id: log.id, + ra: log.ra, + rb: log.rb, + ptr: log.ptr, + len: log.len, + digest: log.digest, + pc: log.pc, + is: log.is, + data: log.data, }, _ => panic!("Invalid receipt type"), } } } - -/// Introduced majorly allow delegating serialization and deserialization to `fuel-core`'s Receipt -impl From for FuelCoreReceipt { - fn from(log: Log) -> FuelCoreReceipt { - match log { - Log::WithoutData { - id, - ra, - rb, - rc, - rd, - pc, - is, - } => FuelCoreReceipt::Log { - id: id.into(), - ra, - rb, - rc, - rd, - pc, - is, - }, - Log::WithData { - id, - ra, - rb, - ptr, - len, - digest, - pc, - is, - data, - } => FuelCoreReceipt::LogData { - id: id.into(), - ra, - rb, - ptr, - len, - digest: digest.into(), - pc, - is, - data, - }, - } - } -} diff --git a/crates/fuel-streams-core/src/outputs/types.rs b/crates/fuel-streams-core/src/outputs/types.rs index 607bcd72..ddedd9a0 100644 --- a/crates/fuel-streams-core/src/outputs/types.rs +++ b/crates/fuel-streams-core/src/outputs/types.rs @@ -4,11 +4,11 @@ use crate::types::*; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(tag = "type")] pub enum Output { - Coin(CoinOutput), + Coin(OutputCoin), Contract(OutputContract), - Change(ChangeOutput), - Variable(VariableOutput), - ContractCreated(ContractCreated), + Change(OutputChange), + Variable(OutputVariable), + ContractCreated(OutputContractCreated), } impl From<&FuelCoreOutput> for Output { @@ -18,7 +18,7 @@ impl From<&FuelCoreOutput> for Output { amount, asset_id, to, - } => Output::Coin(CoinOutput { + } => Output::Coin(OutputCoin { amount: *amount, asset_id: asset_id.into(), to: to.into(), @@ -30,7 +30,7 @@ impl From<&FuelCoreOutput> for Output { amount, asset_id, to, - } => Output::Change(ChangeOutput { + } => Output::Change(OutputChange { amount: *amount, asset_id: asset_id.into(), to: to.into(), @@ -39,7 +39,7 @@ impl From<&FuelCoreOutput> for Output { amount, asset_id, to, - } => Output::Variable(VariableOutput { + } => Output::Variable(OutputVariable { amount: *amount, asset_id: asset_id.into(), to: to.into(), @@ -47,7 +47,7 @@ impl From<&FuelCoreOutput> for Output { FuelCoreOutput::ContractCreated { contract_id, state_root, - } => Output::ContractCreated(ContractCreated { + } => Output::ContractCreated(OutputContractCreated { contract_id: contract_id.into(), state_root: state_root.into(), }), @@ -57,7 +57,7 @@ impl From<&FuelCoreOutput> for Output { #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct CoinOutput { +pub struct OutputCoin { pub amount: u64, pub asset_id: AssetId, pub to: Address, @@ -65,7 +65,7 @@ pub struct CoinOutput { #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct ChangeOutput { +pub struct OutputChange { pub amount: u64, pub asset_id: AssetId, pub to: Address, @@ -73,7 +73,7 @@ pub struct ChangeOutput { #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct VariableOutput { +pub struct OutputVariable { pub amount: u64, pub asset_id: AssetId, pub to: Address, @@ -99,7 +99,7 @@ impl From<&FuelCoreOutputContract> for OutputContract { #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct ContractCreated { - contract_id: ContractId, - state_root: Bytes32, +pub struct OutputContractCreated { + pub contract_id: ContractId, + pub state_root: Bytes32, } diff --git a/crates/fuel-streams-core/src/primitive_types.rs b/crates/fuel-streams-core/src/primitive_types.rs index 997b4221..158f23af 100644 --- a/crates/fuel-streams-core/src/primitive_types.rs +++ b/crates/fuel-streams-core/src/primitive_types.rs @@ -1,4 +1,8 @@ -use fuel_core_types::fuel_types; +use fuel_core_types::{ + fuel_asm::RawInstruction, + fuel_tx::PanicReason, + fuel_types, +}; pub use serde::{Deserialize, Serialize}; use crate::fuel_core_types::*; @@ -158,6 +162,58 @@ generate_byte_type_wrapper!(Salt, fuel_types::Salt, 32); generate_byte_type_wrapper!(MessageId, fuel_types::MessageId, 32); generate_byte_type_wrapper!(BlockId, fuel_types::Bytes32, 32); generate_byte_type_wrapper!(Signature, fuel_types::Bytes64, 64); +generate_byte_type_wrapper!(TxId, fuel_types::TxId, 32); + +/// Implements bidirectional conversions between `Bytes32` and a given type. +/// +/// This macro generates implementations of the `From` trait to convert: +/// - From `Bytes32` to the target type +/// - From a reference to `Bytes32` to the target type +/// - From the target type to `Bytes32` +/// - From a reference of the target type to `Bytes32` +/// +/// The target type must be a 32-byte type that can be converted to/from `[u8; 32]`. +/// +/// # Example +/// ```ignore +/// impl_bytes32_conversions!(ContractId); +/// ``` +macro_rules! impl_bytes32_conversions { + ($type:ty) => { + impl From for $type { + fn from(value: Bytes32) -> Self { + let bytes: [u8; 32] = value.0.into(); + <$type>::from(bytes) + } + } + impl From<&Bytes32> for $type { + fn from(value: &Bytes32) -> Self { + value.clone().into() + } + } + impl From<$type> for Bytes32 { + fn from(value: $type) -> Self { + let bytes: [u8; 32] = value.0.into(); + Bytes32::from(bytes) + } + } + impl From<&$type> for Bytes32 { + fn from(value: &$type) -> Self { + value.clone().into() + } + } + }; +} + +impl_bytes32_conversions!(MessageId); +impl_bytes32_conversions!(ContractId); +impl_bytes32_conversions!(AssetId); +impl_bytes32_conversions!(Address); +impl_bytes32_conversions!(BlobId); +impl_bytes32_conversions!(Nonce); +impl_bytes32_conversions!(Salt); +impl_bytes32_conversions!(BlockId); +impl_bytes32_conversions!(TxId); impl From for BlockId { fn from(value: FuelCoreBlockId) -> Self { @@ -165,18 +221,6 @@ impl From for BlockId { } } -impl From for MessageId { - fn from(value: Bytes32) -> Self { - let bytes: [u8; 32] = value.0.into(); - MessageId::from(bytes) - } -} -impl From<&Bytes32> for MessageId { - fn from(value: &Bytes32) -> Self { - value.clone().into() - } -} - #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] pub struct HexString(pub Vec); impl_hex_serde!(HexString); @@ -186,13 +230,26 @@ impl From<&[u8]> for HexString { HexString(value.to_vec()) } } - +impl From for HexString { + fn from(value: Bytes32) -> Self { + Self::from(value.0.as_ref()) + } +} +impl TryFrom for Bytes32 { + type Error = String; + fn try_from(value: HexString) -> Result { + let bytes: [u8; 32] = value + .0 + .try_into() + .map_err(|_| "Invalid length for Bytes32".to_string())?; + Ok(Bytes32::from(bytes)) + } +} impl std::fmt::Display for HexString { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "0x{}", hex::encode(&self.0)) } } - impl std::str::FromStr for HexString { type Err = String; fn from_str(s: &str) -> Result { @@ -200,6 +257,16 @@ impl std::str::FromStr for HexString { hex::decode(s).map(HexString).map_err(|e| e.to_string()) } } +impl AsRef<[u8]> for HexString { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} +impl HexString { + pub fn zeroed() -> Self { + HexString(vec![0u8; 32]) + } +} #[derive( Debug, @@ -230,22 +297,17 @@ impl From for TxPointer { } #[derive( - Debug, - Default, - Copy, - Clone, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, + Debug, Default, Clone, PartialEq, Eq, Hash, Serialize, Deserialize, )] #[serde(rename_all = "camelCase")] pub struct UtxoId { - tx_id: FuelCoreTxId, - output_index: u16, + pub tx_id: Bytes32, + pub output_index: u16, +} +impl From<&UtxoId> for HexString { + fn from(value: &UtxoId) -> Self { + value.to_owned().into() + } } impl From for UtxoId { fn from(value: FuelCoreUtxoId) -> Self { @@ -255,11 +317,67 @@ impl From for UtxoId { impl From<&FuelCoreUtxoId> for UtxoId { fn from(value: &FuelCoreUtxoId) -> Self { Self { - tx_id: *value.tx_id(), + tx_id: value.tx_id().into(), output_index: value.output_index(), } } } +impl From for HexString { + fn from(value: UtxoId) -> Self { + let mut bytes = Vec::with_capacity(34); + bytes.extend_from_slice(value.tx_id.0.as_ref()); + bytes.extend_from_slice(&value.output_index.to_be_bytes()); + HexString(bytes) + } +} + +#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +pub struct PanicInstruction { + pub reason: PanicReason, + pub instruction: RawInstruction, +} +impl From for PanicInstruction { + fn from(value: FuelCorePanicInstruction) -> Self { + Self { + reason: value.reason().to_owned(), + instruction: value.instruction().to_owned(), + } + } +} + +#[derive( + Debug, + Copy, + Clone, + PartialEq, + Eq, + Hash, + Default, + serde::Serialize, + serde::Deserialize, +)] +#[repr(u64)] +pub enum ScriptExecutionResult { + Success, + Revert, + Panic, + // Generic failure case since any u64 is valid here + GenericFailure(u64), + #[default] + Unknown, +} +impl From for ScriptExecutionResult { + fn from(value: FuelCoreScriptExecutionResult) -> Self { + match value { + FuelCoreScriptExecutionResult::Success => Self::Success, + FuelCoreScriptExecutionResult::Revert => Self::Revert, + FuelCoreScriptExecutionResult::Panic => Self::Panic, + FuelCoreScriptExecutionResult::GenericFailure(value) => { + Self::GenericFailure(value) + } + } + } +} /// Macro to implement conversion from a type to `Bytes32`. /// diff --git a/crates/fuel-streams-core/src/receipts/types.rs b/crates/fuel-streams-core/src/receipts/types.rs index b32e654e..565185d8 100644 --- a/crates/fuel-streams-core/src/receipts/types.rs +++ b/crates/fuel-streams-core/src/receipts/types.rs @@ -1,106 +1,365 @@ +use fuel_core_types::fuel_asm::Word; +use serde::{self, Deserialize, Serialize}; + use crate::types::*; -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum Receipt { + Call(CallReceipt), + Return(ReturnReceipt), + ReturnData(ReturnDataReceipt), + Panic(PanicReceipt), + Revert(RevertReceipt), + Log(LogReceipt), + LogData(LogDataReceipt), + Transfer(TransferReceipt), + TransferOut(TransferOutReceipt), + ScriptResult(ScriptResultReceipt), + MessageOut(MessageOutReceipt), + Mint(MintReceipt), + Burn(BurnReceipt), +} + +// Individual Receipt Types +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CallReceipt { + pub id: ContractId, + pub to: ContractId, + pub amount: Word, + pub asset_id: AssetId, + pub gas: Word, + pub param1: Word, + pub param2: Word, + pub pc: Word, + pub is: Word, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct Receipt { - pub amount: Option, - pub asset_id: Option, +pub struct ReturnReceipt { + pub id: ContractId, + pub val: Word, + pub pc: Word, + pub is: Word, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ReturnDataReceipt { + pub id: ContractId, + pub ptr: Word, + pub len: Word, + pub digest: Bytes32, + pub pc: Word, + pub is: Word, + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option>, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct PanicReceipt { + pub id: ContractId, + pub reason: PanicInstruction, + pub pc: Word, + pub is: Word, + #[serde(skip_serializing_if = "Option::is_none")] pub contract_id: Option, - pub data: Option, - pub digest: Option, - pub gas: Option, - pub gas_used: Option, - pub id: Option, - pub is: Option, - pub len: Option, - pub nonce: Option, - pub param1: Option, - pub param2: Option, - pub pc: Option, - pub ptr: Option, - pub ra: Option, - pub rb: Option, - pub rc: Option, - pub rd: Option, - pub reason: Option, - pub receipt_type: ReceiptType, - pub recipient: Option
, - pub result: Option, - pub sender: Option
, - pub sub_id: Option, - pub to: Option, - pub to_address: Option
, - pub val: Option, } -impl From<&FuelCoreReceipt> for Receipt { - fn from(r: &FuelCoreReceipt) -> Self { - Receipt { - amount: r.amount().map(Into::into), - asset_id: r.asset_id().copied().map(Into::into), - contract_id: r.contract_id().map(Into::into), - data: r.data().map(Into::into), - digest: r.digest().copied().map(Into::into), - gas: r.gas().map(Into::into), - gas_used: r.gas_used().map(Into::into), - id: r.id().map(Into::into), - is: r.is().map(Into::into), - len: r.len().map(Into::into), - nonce: r.nonce().copied().map(Into::into), - param1: r.param1().map(Into::into), - param2: r.param2().map(Into::into), - pc: r.pc().map(Into::into), - ptr: r.ptr().map(Into::into), - ra: r.ra().map(Into::into), - rb: r.rb().map(Into::into), - rc: r.rc().map(Into::into), - rd: r.rd().map(Into::into), - reason: r.reason().map(Into::into), - receipt_type: r.into(), - recipient: r.recipient().copied().map(Into::into), - result: r.result().map(|r| FuelCoreWord::from(*r)), - sender: r.sender().copied().map(Into::into), - sub_id: r.sub_id().copied().map(Into::into), - to: r.to().copied().map(Into::into), - to_address: r.to_address().copied().map(Into::into), - val: r.val().map(Into::into), +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RevertReceipt { + pub id: ContractId, + pub ra: Word, + pub pc: Word, + pub is: Word, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LogReceipt { + pub id: ContractId, + pub ra: Word, + pub rb: Word, + pub rc: Word, + pub rd: Word, + pub pc: Word, + pub is: Word, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct LogDataReceipt { + pub id: ContractId, + pub ra: Word, + pub rb: Word, + pub ptr: Word, + pub len: Word, + pub digest: Bytes32, + pub pc: Word, + pub is: Word, + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option>, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransferReceipt { + pub id: ContractId, + pub to: ContractId, + pub amount: Word, + pub asset_id: AssetId, + pub pc: Word, + pub is: Word, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransferOutReceipt { + pub id: ContractId, + pub to: Address, + pub amount: Word, + pub asset_id: AssetId, + pub pc: Word, + pub is: Word, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ScriptResultReceipt { + pub result: ScriptExecutionResult, + pub gas_used: Word, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MessageOutReceipt { + pub sender: Address, + pub recipient: Address, + pub amount: Word, + pub nonce: Nonce, + pub len: Word, + pub digest: Bytes32, + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option>, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct MintReceipt { + pub sub_id: Bytes32, + pub contract_id: ContractId, + pub val: Word, + pub pc: Word, + pub is: Word, +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BurnReceipt { + pub sub_id: Bytes32, + pub contract_id: ContractId, + pub val: Word, + pub pc: Word, + pub is: Word, +} + +impl From for Receipt { + fn from(value: FuelCoreReceipt) -> Self { + match value { + FuelCoreReceipt::Call { + id, + to, + amount, + asset_id, + gas, + param1, + param2, + pc, + is, + } => Self::Call(CallReceipt { + id: id.into(), + to: to.into(), + amount, + asset_id: asset_id.into(), + gas, + param1, + param2, + pc, + is, + }), + FuelCoreReceipt::Return { id, val, pc, is } => { + Self::Return(ReturnReceipt { + id: id.into(), + val, + pc, + is, + }) + } + FuelCoreReceipt::ReturnData { + id, + ptr, + len, + digest, + pc, + is, + data, + } => Self::ReturnData(ReturnDataReceipt { + id: id.into(), + ptr, + len, + digest: digest.into(), + pc, + is, + data, + }), + FuelCoreReceipt::Panic { + id, + reason, + pc, + is, + contract_id, + } => Self::Panic(PanicReceipt { + id: id.into(), + reason: reason.into(), + pc, + is, + contract_id: contract_id.map(|id| id.into()), + }), + FuelCoreReceipt::Revert { id, ra, pc, is } => { + Self::Revert(RevertReceipt { + id: id.into(), + ra, + pc, + is, + }) + } + FuelCoreReceipt::Log { + id, + ra, + rb, + rc, + rd, + pc, + is, + } => Self::Log(LogReceipt { + id: id.into(), + ra, + rb, + rc, + rd, + pc, + is, + }), + FuelCoreReceipt::LogData { + id, + ra, + rb, + ptr, + len, + digest, + pc, + is, + data, + } => Self::LogData(LogDataReceipt { + id: id.into(), + ra, + rb, + ptr, + len, + digest: digest.into(), + pc, + is, + data, + }), + FuelCoreReceipt::Transfer { + id, + to, + amount, + asset_id, + pc, + is, + } => Self::Transfer(TransferReceipt { + id: id.into(), + to: to.into(), + amount, + asset_id: asset_id.into(), + pc, + is, + }), + FuelCoreReceipt::TransferOut { + id, + to, + amount, + asset_id, + pc, + is, + } => Self::TransferOut(TransferOutReceipt { + id: id.into(), + to: to.into(), + amount, + asset_id: asset_id.into(), + pc, + is, + }), + FuelCoreReceipt::ScriptResult { result, gas_used } => { + Self::ScriptResult(ScriptResultReceipt { + result: result.into(), + gas_used, + }) + } + FuelCoreReceipt::MessageOut { + sender, + recipient, + amount, + nonce, + len, + digest, + data, + } => Self::MessageOut(MessageOutReceipt { + sender: sender.into(), + recipient: recipient.into(), + amount, + nonce: nonce.into(), + len, + digest: digest.into(), + data, + }), + FuelCoreReceipt::Mint { + sub_id, + contract_id, + val, + pc, + is, + } => Self::Mint(MintReceipt { + sub_id: sub_id.into(), + contract_id: contract_id.into(), + val, + pc, + is, + }), + FuelCoreReceipt::Burn { + sub_id, + contract_id, + val, + pc, + is, + } => Self::Burn(BurnReceipt { + sub_id: sub_id.into(), + contract_id: contract_id.into(), + val, + pc, + is, + }), } } } -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -pub enum ReceiptType { - Burn, - Call, - Log, - LogData, - MessageOut, - Mint, - Panic, - Return, - ReturnData, - Revert, - ScriptResult, - Transfer, - TransferOut, -} - -impl From<&FuelCoreReceipt> for ReceiptType { - fn from(r: &FuelCoreReceipt) -> Self { - match r { - FuelCoreReceipt::Call { .. } => ReceiptType::Call, - FuelCoreReceipt::Return { .. } => ReceiptType::Return, - FuelCoreReceipt::ReturnData { .. } => ReceiptType::ReturnData, - FuelCoreReceipt::Panic { .. } => ReceiptType::Panic, - FuelCoreReceipt::Revert { .. } => ReceiptType::Revert, - FuelCoreReceipt::Log { .. } => ReceiptType::Log, - FuelCoreReceipt::LogData { .. } => ReceiptType::LogData, - FuelCoreReceipt::Transfer { .. } => ReceiptType::Transfer, - FuelCoreReceipt::TransferOut { .. } => ReceiptType::TransferOut, - FuelCoreReceipt::ScriptResult { .. } => ReceiptType::ScriptResult, - FuelCoreReceipt::MessageOut { .. } => ReceiptType::MessageOut, - FuelCoreReceipt::Mint { .. } => ReceiptType::Mint, - FuelCoreReceipt::Burn { .. } => ReceiptType::Burn, - } +impl From<&FuelCoreReceipt> for Receipt { + fn from(value: &FuelCoreReceipt) -> Self { + value.clone().into() } } diff --git a/crates/fuel-streams-core/src/stream/error.rs b/crates/fuel-streams-core/src/stream/error.rs index 27994989..168155da 100644 --- a/crates/fuel-streams-core/src/stream/error.rs +++ b/crates/fuel-streams-core/src/stream/error.rs @@ -12,37 +12,37 @@ use thiserror::Error; #[derive(Error, DisplayDoc, Debug)] pub enum StreamError { - /// Failed to publish to stream: {subject_name} + /// Failed to publish to stream: {subject_name}, error: {source} PublishFailed { subject_name: String, #[source] source: error::Error, }, - /// Failed to publish to S3 + /// Failed to publish to S3: {0} S3PublishError(#[from] fuel_streams_storage::s3::S3ClientError), - /// Failed to retrieve last published message from stream + /// Failed to retrieve last published message from stream: {0} GetLastPublishedFailed(#[from] error::Error), - /// Failed to create Key-Value Store + /// Failed to create Key-Value Store: {0} StoreCreation(#[from] error::Error), - /// Failed to publish item to Key-Value Store + /// Failed to publish item to Key-Value Store: {0} StorePublish(#[from] PutError), - /// Failed to subscribe to subject in Key-Value Store + /// Failed to subscribe to subject in Key-Value Store: {0} StoreSubscribe(#[from] error::Error), - /// Failed to publish item to stream + /// Failed to publish item to stream: {0} StreamPublish(#[from] CreateError), - /// Failed to create stream + /// Failed to create stream: {0} StreamCreation(#[from] error::Error), - /// Failed to create consumer for stream + /// Failed to create consumer for stream: {0} ConsumerCreate(#[from] error::Error), - /// Failed to consume messages from stream + /// Failed to consume messages from stream: {0} ConsumerMessages(#[from] error::Error), } diff --git a/crates/fuel-streams-ws/src/server/ws/fuel_streams.rs b/crates/fuel-streams-core/src/stream/fuel_streams.rs similarity index 76% rename from crates/fuel-streams-ws/src/server/ws/fuel_streams.rs rename to crates/fuel-streams-core/src/stream/fuel_streams.rs index 526550cb..5e8781ba 100644 --- a/crates/fuel-streams-ws/src/server/ws/fuel_streams.rs +++ b/crates/fuel-streams-core/src/stream/fuel_streams.rs @@ -4,12 +4,11 @@ use async_nats::{ jetstream::{context::CreateStreamErrorKind, stream::State as StreamState}, RequestErrorKind, }; -use fuel_streams::types::Log; -use fuel_streams_core::{prelude::*, SubscriptionConfig}; use futures::stream::BoxStream; -#[derive(Clone)] -/// Streams we currently support publishing to. +use crate::prelude::*; + +#[derive(Clone, Debug)] pub struct FuelStreams { pub transactions: Stream, pub blocks: Stream, @@ -20,6 +19,41 @@ pub struct FuelStreams { pub logs: Stream, } +pub struct FuelStreamsUtils; +impl FuelStreamsUtils { + pub fn is_within_subject_names(subject_name: &str) -> bool { + let subject_names = Self::subjects_names(); + subject_names.contains(&subject_name) + } + + pub fn subjects_names() -> &'static [&'static str] { + &[ + Transaction::NAME, + Block::NAME, + Input::NAME, + Receipt::NAME, + Utxo::NAME, + Log::NAME, + ] + } + + pub fn wildcards() -> Vec<&'static str> { + let nested_wildcards = [ + Transaction::WILDCARD_LIST, + Block::WILDCARD_LIST, + Input::WILDCARD_LIST, + Receipt::WILDCARD_LIST, + Utxo::WILDCARD_LIST, + Log::WILDCARD_LIST, + ]; + nested_wildcards + .into_iter() + .flatten() + .copied() + .collect::>() + } +} + impl FuelStreams { pub async fn new( nats_client: &NatsClient, @@ -37,6 +71,16 @@ impl FuelStreams { } } + pub async fn setup_all( + core_client: &NatsClient, + publisher_client: &NatsClient, + s3_client: &Arc, + ) -> (Self, Self) { + let core_stream = Self::new(core_client, s3_client).await; + let publisher_stream = Self::new(publisher_client, s3_client).await; + (core_stream, publisher_stream) + } + pub async fn subscribe( &self, sub_subject: &str, @@ -61,10 +105,14 @@ impl FuelStreams { )), } } + + pub fn arc(self) -> Arc { + Arc::new(self) + } } #[async_trait::async_trait] -pub trait FuelStreamsExt: Sync + Send + 'static { +pub trait FuelStreamsExt: Sync + Send { fn blocks(&self) -> &Stream; fn transactions(&self) -> &Stream; fn inputs(&self) -> &Stream; @@ -75,70 +123,11 @@ pub trait FuelStreamsExt: Sync + Send + 'static { async fn get_last_published_block(&self) -> anyhow::Result>; - fn subjects_names() -> &'static [&'static str] { - &[ - Transaction::NAME, - Block::NAME, - Input::NAME, - Receipt::NAME, - Utxo::NAME, - Log::NAME, - ] - } - - fn is_within_subject_names(subject_name: &str) -> bool { - let subject_names = Self::subjects_names(); - subject_names.contains(&subject_name) - } - - fn subjects_wildcards(&self) -> &[&'static str] { - &[ - TransactionsSubject::WILDCARD, - BlocksSubject::WILDCARD, - InputsByIdSubject::WILDCARD, - InputsCoinSubject::WILDCARD, - InputsMessageSubject::WILDCARD, - InputsContractSubject::WILDCARD, - ReceiptsLogSubject::WILDCARD, - ReceiptsBurnSubject::WILDCARD, - ReceiptsByIdSubject::WILDCARD, - ReceiptsCallSubject::WILDCARD, - ReceiptsMintSubject::WILDCARD, - ReceiptsPanicSubject::WILDCARD, - ReceiptsReturnSubject::WILDCARD, - ReceiptsRevertSubject::WILDCARD, - ReceiptsLogDataSubject::WILDCARD, - ReceiptsTransferSubject::WILDCARD, - ReceiptsMessageOutSubject::WILDCARD, - ReceiptsReturnDataSubject::WILDCARD, - ReceiptsTransferOutSubject::WILDCARD, - ReceiptsScriptResultSubject::WILDCARD, - UtxosSubject::WILDCARD, - LogsSubject::WILDCARD, - ] - } - - fn wildcards() -> Vec<&'static str> { - let nested_wildcards = [ - Transaction::WILDCARD_LIST, - Block::WILDCARD_LIST, - Input::WILDCARD_LIST, - Receipt::WILDCARD_LIST, - Utxo::WILDCARD_LIST, - Log::WILDCARD_LIST, - ]; - nested_wildcards - .into_iter() - .flatten() - .copied() - .collect::>() - } - async fn get_consumers_and_state( &self, ) -> Result, StreamState)>, RequestErrorKind>; - #[cfg(feature = "test-helpers")] + #[cfg(any(test, feature = "test-helpers"))] async fn is_empty(&self) -> bool; } @@ -167,10 +156,10 @@ impl FuelStreamsExt for FuelStreams { } async fn get_last_published_block(&self) -> anyhow::Result> { - Ok(self - .blocks + self.blocks .get_last_published(BlocksSubject::WILDCARD) - .await?) + .await + .map_err(|e| e.into()) } async fn get_consumers_and_state( @@ -187,7 +176,7 @@ impl FuelStreamsExt for FuelStreams { ]) } - #[cfg(feature = "test-helpers")] + #[cfg(any(test, feature = "test-helpers"))] async fn is_empty(&self) -> bool { self.blocks.is_empty(BlocksSubject::WILDCARD).await && self diff --git a/crates/fuel-streams-core/src/stream/mod.rs b/crates/fuel-streams-core/src/stream/mod.rs index 4ea63aff..a204e8d1 100644 --- a/crates/fuel-streams-core/src/stream/mod.rs +++ b/crates/fuel-streams-core/src/stream/mod.rs @@ -1,7 +1,9 @@ mod error; +mod fuel_streams; mod stream_encoding; mod stream_impl; pub use error::*; +pub use fuel_streams::*; pub use stream_encoding::*; pub use stream_impl::*; diff --git a/crates/fuel-streams-core/src/stream/stream_impl.rs b/crates/fuel-streams-core/src/stream/stream_impl.rs index 186f0896..fc8d974b 100644 --- a/crates/fuel-streams-core/src/stream/stream_impl.rs +++ b/crates/fuel-streams-core/src/stream/stream_impl.rs @@ -10,30 +10,29 @@ use async_nats::{ }; use async_trait::async_trait; use fuel_streams_macros::subject::IntoSubject; -use fuel_streams_storage::*; use futures::{stream::BoxStream, StreamExt, TryStreamExt}; -use sha2::{Digest, Sha256}; use tokio::sync::OnceCell; -use super::{error::StreamError, stream_encoding::StreamEncoder}; +use crate::prelude::*; #[derive(Debug, Clone)] pub struct PublishPacket { pub subject: Arc, pub payload: Arc, - pub s3_path: String, } impl PublishPacket { pub fn new(payload: T, subject: Arc) -> Self { - let s3_path = payload.get_s3_path(); - Self { payload: Arc::new(payload), subject, - s3_path, } } + + pub fn get_s3_path(&self) -> String { + let subject = self.subject.parse(); + subject.replace('.', "/").to_string() + } } /// Trait for types that can be streamed. @@ -65,19 +64,6 @@ pub trait Streamable: StreamEncoder + std::marker::Sized { fn to_packet(&self, subject: Arc) -> PublishPacket { PublishPacket::new(self.clone(), subject) } - - fn get_s3_path(&self) -> String { - format!("v1/{}/{}.json", Self::NAME, self.get_consistent_hash()) - } - - fn get_consistent_hash(&self) -> String { - let serialized = self.encode_self(); - - let mut hasher = Sha256::new(); - hasher.update(serialized); - - format!("{:x}", hasher.finalize()) - } } /// Houses nats-agnostic APIs for publishing and consuming a streamable type @@ -150,13 +136,16 @@ impl Stream { ) -> Self { let namespace = &nats_client.namespace; let bucket_name = namespace.stream_name(S::NAME); + let config = kv::Config { + bucket: bucket_name.to_owned(), + storage: stream::StorageType::File, + history: 1, + compression: true, + ..Default::default() + }; + let store = nats_client - .get_or_create_kv_store(kv::Config { - bucket: bucket_name.to_owned(), - storage: stream::StorageType::File, - history: 1, - ..Default::default() - }) + .get_or_create_kv_store(config) .await .expect("Streams must be created"); @@ -172,15 +161,14 @@ impl Stream { packet: &PublishPacket, ) -> Result { let payload = &packet.payload; - let s3_path = &packet.s3_path; + let s3_path = packet.get_s3_path(); let subject_name = &packet.subject.parse(); - // publish payload to S3 self.s3_client - .put_object(s3_path, payload.encode(subject_name)) + .put_object(&s3_path, payload.encode(subject_name)) .await?; - self.publish_s3_path_to_nats(subject_name, s3_path).await + self.publish_s3_path_to_nats(subject_name, &s3_path).await } async fn publish_s3_path_to_nats( @@ -188,9 +176,9 @@ impl Stream { subject_name: &str, s3_path: &str, ) -> Result { + tracing::debug!("S3 path published: {:?}", s3_path); let data = s3_path.to_string().into_bytes(); let data_size = data.len(); - let result = self.store.create(subject_name, data.into()).await; match result { @@ -377,7 +365,6 @@ impl Stream { wildcard: &str, ) -> Result, StreamError> { let subject_name = &Self::prefix_filter_subject(wildcard); - let message = self .store .stream @@ -400,7 +387,6 @@ impl Stream { nats_payload: Vec, ) -> Result { let s3_path = String::from_utf8(nats_payload).expect("Must be S3 path"); - let s3_object = self .s3_client .get_object(&s3_path) @@ -444,6 +430,10 @@ impl Stream { pub fn store(&self) -> &kv::Store { &self.store } + + pub fn arc(&self) -> Arc { + Arc::new(self.to_owned()) + } } /// Configuration for subscribing to a consumer. diff --git a/crates/fuel-streams-core/src/transactions/types.rs b/crates/fuel-streams-core/src/transactions/types.rs index fe6233b5..adeb64fc 100644 --- a/crates/fuel-streams-core/src/transactions/types.rs +++ b/crates/fuel-streams-core/src/transactions/types.rs @@ -1,3 +1,4 @@ +pub use fuel_core_client::client::types::TransactionStatus as ClientTransactionStatus; use fuel_core_types::fuel_tx; use crate::types::*; @@ -405,7 +406,7 @@ impl Transaction { tx_pointer, upgrade_purpose, witnesses, - receipts: receipts.iter().map(Into::into).collect(), + receipts: receipts.iter().map(|r| r.to_owned().into()).collect(), } } } @@ -502,6 +503,31 @@ impl From<&FuelCoreTransactionStatus> for TransactionStatus { } } +impl From<&ClientTransactionStatus> for TransactionStatus { + fn from(value: &ClientTransactionStatus) -> Self { + match value { + ClientTransactionStatus::Failure { .. } => { + TransactionStatus::Failed + } + ClientTransactionStatus::Submitted { .. } => { + TransactionStatus::Submitted + } + ClientTransactionStatus::SqueezedOut { .. } => { + TransactionStatus::SqueezedOut + } + ClientTransactionStatus::Success { .. } => { + TransactionStatus::Success + } + } + } +} + +impl From for TransactionStatus { + fn from(value: ClientTransactionStatus) -> Self { + (&value).into() + } +} + pub trait FuelCoreTransactionExt { fn inputs(&self) -> &[FuelCoreInput]; fn outputs(&self) -> &Vec; diff --git a/crates/fuel-streams-core/src/types.rs b/crates/fuel-streams-core/src/types.rs index efacbacc..9d1c880f 100644 --- a/crates/fuel-streams-core/src/types.rs +++ b/crates/fuel-streams-core/src/types.rs @@ -14,4 +14,5 @@ pub use crate::{ // ------------------------------------------------------------------------ // General // ------------------------------------------------------------------------ -pub type BoxedResult = Result>; +pub type BoxedError = Box; +pub type BoxedResult = Result; diff --git a/crates/fuel-streams-core/src/utxos/subjects.rs b/crates/fuel-streams-core/src/utxos/subjects.rs index 6238f49e..2d89d4c7 100644 --- a/crates/fuel-streams-core/src/utxos/subjects.rs +++ b/crates/fuel-streams-core/src/utxos/subjects.rs @@ -16,12 +16,12 @@ use crate::types::*; /// # use fuel_streams_core::types::*; /// # use fuel_streams_macros::subject::*; /// let subject = UtxosSubject { -/// hash: Some(MessageId::from([1u8; 32])), +/// utxo_id: Some(HexString::zeroed()), /// utxo_type: Some(UtxoType::Message), /// }; /// assert_eq!( /// subject.parse(), -/// "utxos.message.0x0101010101010101010101010101010101010101010101010101010101010101" +/// "utxos.message.0x0000000000000000000000000000000000000000000000000000000000000000" /// ); /// ``` /// @@ -40,10 +40,10 @@ use crate::types::*; /// # use fuel_streams_core::types::*; /// # use fuel_streams_macros::subject::*; /// let wildcard = UtxosSubject::wildcard( -/// Some(MessageId::from([1u8; 32])), +/// Some(HexString::zeroed()), /// None, /// ); -/// assert_eq!(wildcard, "utxos.*.0x0101010101010101010101010101010101010101010101010101010101010101"); +/// assert_eq!(wildcard, "utxos.*.0x0000000000000000000000000000000000000000000000000000000000000000"); /// ``` /// /// Using the builder pattern: @@ -53,16 +53,16 @@ use crate::types::*; /// # use fuel_streams_core::types::*; /// # use fuel_streams_macros::subject::*; /// let subject = UtxosSubject::new() -/// .with_hash(Some(MessageId::from([1u8; 32]))) +/// .with_utxo_id(Some(HexString::zeroed())) /// .with_utxo_type(Some(UtxoType::Message)); -/// assert_eq!(subject.parse(), "utxos.message.0x0101010101010101010101010101010101010101010101010101010101010101"); +/// assert_eq!(subject.parse(), "utxos.message.0x0000000000000000000000000000000000000000000000000000000000000000"); /// ``` #[derive(Subject, Debug, Clone, Default)] #[subject_wildcard = "utxos.>"] -#[subject_format = "utxos.{utxo_type}.{hash}"] +#[subject_format = "utxos.{utxo_type}.{utxo_id}"] pub struct UtxosSubject { - pub hash: Option, + pub utxo_id: Option, pub utxo_type: Option, } @@ -80,7 +80,7 @@ mod tests { #[test] fn test_utxos_message_subject_creation() { let utxo_subject = UtxosSubject::new() - .with_hash(Some(MessageId::zeroed())) + .with_utxo_id(Some(HexString::zeroed())) .with_utxo_type(Some(UtxoType::Message)); assert_eq!( utxo_subject.to_string(), @@ -91,7 +91,7 @@ mod tests { #[test] fn test_utxos_coin_subject_creation() { let utxo_subject = UtxosSubject::new() - .with_hash(Some(MessageId::zeroed())) + .with_utxo_id(Some(HexString::zeroed())) .with_utxo_type(Some(UtxoType::Coin)); assert_eq!( utxo_subject.to_string(), @@ -102,7 +102,7 @@ mod tests { #[test] fn test_utxos_contract_subject_creation() { let utxo_subject = UtxosSubject::new() - .with_hash(Some(MessageId::zeroed())) + .with_utxo_id(Some(HexString::zeroed())) .with_utxo_type(Some(UtxoType::Contract)); assert_eq!( utxo_subject.to_string(), diff --git a/crates/fuel-streams-core/src/utxos/types.rs b/crates/fuel-streams-core/src/utxos/types.rs index 2e067f29..1fd8daec 100644 --- a/crates/fuel-streams-core/src/utxos/types.rs +++ b/crates/fuel-streams-core/src/utxos/types.rs @@ -7,7 +7,7 @@ pub struct Utxo { pub sender: Option
, pub recipient: Option
, pub nonce: Option, - pub data: Option>, + pub data: Option, pub amount: Option, pub tx_id: Bytes32, } diff --git a/benches/nats-publisher/Cargo.toml b/crates/fuel-streams-executors/Cargo.toml similarity index 52% rename from benches/nats-publisher/Cargo.toml rename to crates/fuel-streams-executors/Cargo.toml index 365a8df3..7b70773d 100644 --- a/benches/nats-publisher/Cargo.toml +++ b/crates/fuel-streams-executors/Cargo.toml @@ -1,5 +1,6 @@ [package] -name = "nats-publisher" +name = "fuel-streams-executors" +description = "Executors for Fuel Streams entities" authors = { workspace = true } keywords = { workspace = true } edition = { workspace = true } @@ -13,16 +14,18 @@ publish = false [dependencies] anyhow = { workspace = true } async-nats = { workspace = true } -clap = { workspace = true } fuel-core = { workspace = true } -fuel-core-bin = { workspace = true } -fuel-core-importer = { workspace = true } -fuel-core-storage = { workspace = true } -fuel-core-types = { workspace = true } -fuel-data-parser = { workspace = true } -fuel-streams-core = { workspace = true } +fuel-streams-core = { workspace = true, features = ["test-helpers"] } +futures = { workspace = true } +num_cpus = { workspace = true } +rayon = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sha2 = { workspace = true } +thiserror = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } -[dev-dependencies] -criterion = { version = "0.5", features = ["html_reports", "async_tokio"] } +[features] +default = [] +test-helpers = [] diff --git a/crates/fuel-streams-executors/src/blocks.rs b/crates/fuel-streams-executors/src/blocks.rs new file mode 100644 index 00000000..5a145e2d --- /dev/null +++ b/crates/fuel-streams-executors/src/blocks.rs @@ -0,0 +1,70 @@ +use std::sync::Arc; + +use fuel_streams_core::prelude::*; +use futures::stream::FuturesUnordered; +use tokio::task::JoinHandle; + +use crate::*; + +impl Executor { + pub fn process(&self) -> JoinHandle> { + let metadata = self.metadata(); + let block = self.block(); + let block_height = (*metadata.block_height).clone(); + let block_producer = (*metadata.block_producer).clone(); + let packet = PublishPacket::::new( + block.to_owned(), + BlocksSubject { + height: Some(block_height), + producer: Some(block_producer), + } + .arc(), + ); + self.publish(&packet) + } + + pub fn process_all( + payload: Arc, + fuel_streams: &Arc, + semaphore: &Arc, + ) -> FuturesUnordered>> { + let block_stream = fuel_streams.blocks().arc(); + let tx_stream = fuel_streams.transactions().arc(); + let input_stream = fuel_streams.inputs().arc(); + let output_stream = fuel_streams.outputs().arc(); + let receipt_stream = fuel_streams.receipts().arc(); + let log_stream = fuel_streams.logs().arc(); + let utxo_stream = fuel_streams.utxos().arc(); + + let block_executor = Executor::new(&payload, &block_stream, semaphore); + let tx_executor = Executor::new(&payload, &tx_stream, semaphore); + let input_executor = Executor::new(&payload, &input_stream, semaphore); + let output_executor = + Executor::new(&payload, &output_stream, semaphore); + let receipt_executor = + Executor::new(&payload, &receipt_stream, semaphore); + let log_executor = Executor::new(&payload, &log_stream, semaphore); + let utxo_executor = Executor::new(&payload, &utxo_stream, semaphore); + + let transactions = payload.transactions.to_owned(); + let tx_tasks = + transactions + .iter() + .enumerate() + .flat_map(|tx_item @ (_, tx)| { + vec![ + tx_executor.process(tx_item), + input_executor.process(tx), + output_executor.process(tx), + receipt_executor.process(tx), + log_executor.process(tx), + utxo_executor.process(tx), + ] + }); + + let block_task = block_executor.process(); + std::iter::once(block_task) + .chain(tx_tasks.into_iter().flatten()) + .collect::>() + } +} diff --git a/crates/fuel-streams-executors/src/inputs.rs b/crates/fuel-streams-executors/src/inputs.rs new file mode 100644 index 00000000..0c201b3b --- /dev/null +++ b/crates/fuel-streams-executors/src/inputs.rs @@ -0,0 +1,129 @@ +use std::sync::Arc; + +use fuel_streams_core::prelude::*; +use rayon::prelude::*; +use tokio::task::JoinHandle; + +use crate::*; + +impl Executor { + pub fn process( + &self, + tx: &Transaction, + ) -> Vec>> { + let tx_id = tx.id.clone(); + let packets = tx + .inputs + .par_iter() + .enumerate() + .flat_map(move |(index, input)| { + let main_subject = main_subject(input, tx_id.clone(), index); + let identifier_subjects = + identifiers(input, &tx_id, index as u8) + .into_par_iter() + .map(|identifier| identifier.into()) + .map(|subject: InputsByIdSubject| subject.arc()) + .collect::>(); + + let mut packets = vec![input.to_packet(main_subject)]; + packets.extend( + identifier_subjects + .into_iter() + .map(|subject| input.to_packet(subject)), + ); + + packets + }) + .collect::>(); + + packets.iter().map(|packet| self.publish(packet)).collect() + } +} + +fn main_subject( + input: &Input, + tx_id: Bytes32, + index: usize, +) -> Arc { + match input { + Input::Contract(contract) => InputsContractSubject { + tx_id: Some(tx_id), + index: Some(index), + contract_id: Some(contract.contract_id.to_owned().into()), + } + .arc(), + Input::Coin(coin) => InputsCoinSubject { + tx_id: Some(tx_id), + index: Some(index), + owner: Some(coin.owner.to_owned()), + asset_id: Some(coin.asset_id.to_owned()), + } + .arc(), + Input::Message(message) => InputsMessageSubject { + tx_id: Some(tx_id), + index: Some(index), + sender: Some(message.sender.to_owned()), + recipient: Some(message.recipient.to_owned()), + } + .arc(), + } +} + +pub fn identifiers( + input: &Input, + tx_id: &Bytes32, + index: u8, +) -> Vec { + let mut identifiers = match input { + Input::Coin(coin) => { + vec![ + Identifier::Address( + tx_id.to_owned(), + index, + coin.owner.to_owned().into(), + ), + Identifier::AssetID( + tx_id.to_owned(), + index, + coin.asset_id.to_owned().into(), + ), + ] + } + Input::Message(message) => { + vec![ + Identifier::Address( + tx_id.to_owned(), + index, + message.sender.to_owned().into(), + ), + Identifier::Address( + tx_id.to_owned(), + index, + message.recipient.to_owned().into(), + ), + ] + } + Input::Contract(contract) => { + vec![Identifier::ContractID( + tx_id.to_owned(), + index, + contract.contract_id.to_owned(), + )] + } + }; + + match input { + Input::Coin(InputCoin { predicate, .. }) + | Input::Message(InputMessage { predicate, .. }) => { + let predicate_tag = super::sha256(&predicate.0); + identifiers.push(Identifier::PredicateID( + tx_id.to_owned(), + index, + predicate_tag, + )); + } + _ => {} + }; + + identifiers +} diff --git a/crates/fuel-streams-executors/src/lib.rs b/crates/fuel-streams-executors/src/lib.rs new file mode 100644 index 00000000..a227758d --- /dev/null +++ b/crates/fuel-streams-executors/src/lib.rs @@ -0,0 +1,255 @@ +pub mod blocks; +pub mod inputs; +pub mod logs; +pub mod outputs; +pub mod receipts; +pub mod transactions; +pub mod utxos; + +use std::{ + env, + marker::PhantomData, + sync::{Arc, LazyLock}, +}; + +use async_nats::jetstream::context::Publish; +use fuel_streams_core::prelude::*; +use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; +use tokio::task::JoinHandle; + +pub static PUBLISHER_MAX_THREADS: LazyLock = LazyLock::new(|| { + let available_cpus = num_cpus::get(); + env::var("PUBLISHER_MAX_THREADS") + .ok() + .and_then(|val| val.parse().ok()) + .unwrap_or(available_cpus) +}); + +pub fn sha256(bytes: &[u8]) -> Bytes32 { + let mut sha256 = Sha256::new(); + sha256.update(bytes); + let bytes: [u8; 32] = sha256 + .finalize() + .as_slice() + .try_into() + .expect("Must be 32 bytes"); + + bytes.into() +} + +#[derive(Debug, thiserror::Error)] +pub enum ExecutorError { + #[error("Failed to publish: {0}")] + PublishFailed(String), + #[error("Failed to acquire semaphore: {0}")] + SemaphoreError(#[from] tokio::sync::AcquireError), + #[error("Failed to serialize block payload: {0}")] + Serialization(#[from] serde_json::Error), + #[error("Failed to fetch transaction status: {0}")] + TransactionStatus(String), + #[error("Failed to access offchain database")] + OffchainDatabase(#[from] anyhow::Error), + #[error("Failed to join tasks: {0}")] + JoinError(#[from] tokio::task::JoinError), +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Metadata { + pub chain_id: Arc, + pub base_asset_id: Arc, + pub block_producer: Arc
, + pub block_height: Arc, + pub consensus: Arc, +} + +impl Metadata { + pub fn new( + fuel_core: &Arc, + sealed_block: &FuelCoreSealedBlock, + ) -> Self { + let block = sealed_block.entity.clone(); + let consensus = sealed_block.consensus.clone(); + let height = *block.header().consensus().height; + let producer = + consensus.block_producer(&block.id()).unwrap_or_default(); + Self { + chain_id: Arc::new(*fuel_core.chain_id()), + base_asset_id: Arc::new(*fuel_core.base_asset_id()), + block_producer: Arc::new(producer.into()), + block_height: Arc::new(height.into()), + consensus: Arc::new(consensus.into()), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockPayload { + pub block: Block, + pub transactions: Vec, + metadata: Metadata, +} + +impl BlockPayload { + pub fn new( + fuel_core: Arc, + sealed_block: &FuelCoreSealedBlock, + metadata: &Metadata, + ) -> Result { + let block = sealed_block.entity.clone(); + let txs = Self::txs_from_fuelcore( + fuel_core.to_owned(), + sealed_block, + metadata, + )?; + let txs_ids = txs.iter().map(|i| i.id.clone()).collect(); + let block_height = block.header().height(); + let consensus = fuel_core.get_consensus(block_height)?; + let block = Block::new(&block, consensus.into(), txs_ids); + Ok(Self { + block, + transactions: txs, + metadata: metadata.to_owned(), + }) + } + + pub fn encode(&self) -> Result { + serde_json::to_string(self).map_err(ExecutorError::from) + } + + pub fn decode(json: &str) -> Result { + serde_json::from_str(json).map_err(ExecutorError::from) + } + + pub fn tx_ids(&self) -> Vec { + self.transactions + .iter() + .map(|tx| tx.id.clone()) + .collect::>() + } + + pub fn message_id(&self) -> String { + let height = self.metadata.block_height.clone(); + format!("block_{height}") + } + + pub fn subject(&self) -> String { + let producer = self.metadata.block_producer.clone(); + let height = self.metadata.block_height.clone(); + format!("block_submitted.{producer}.{height}") + } + + pub fn metadata(&self) -> &Metadata { + &self.metadata + } + + pub fn block_height(&self) -> u32 { + self.block.height + } + + pub fn arc(&self) -> Arc { + Arc::new(self.clone()) + } + + pub fn txs_from_fuelcore( + fuel_core: Arc, + sealed_block: &FuelCoreSealedBlock, + metadata: &Metadata, + ) -> Result, ExecutorError> { + let mut transactions: Vec = vec![]; + let blocks_txs = sealed_block.entity.transactions_vec(); + for tx_item in blocks_txs.iter() { + let tx_id = tx_item.id(&metadata.chain_id); + let receipts = fuel_core.get_receipts(&tx_id)?.unwrap_or_default(); + let tx_status = fuel_core.get_tx_status(&tx_id)?; + let tx_status: TransactionStatus = match tx_status { + Some(status) => (&status).into(), + _ => TransactionStatus::None, + }; + let new_transaction = Transaction::new( + &tx_id.into(), + tx_item, + &tx_status, + &metadata.base_asset_id, + &receipts, + ); + transactions.push(new_transaction); + } + Ok(transactions) + } +} + +impl TryFrom for Publish { + type Error = ExecutorError; + fn try_from(payload: BlockPayload) -> Result { + let message_id = payload.message_id(); + Ok(Publish::build() + .message_id(message_id) + .payload(payload.encode()?.into())) + } +} + +pub struct Executor { + pub stream: Arc>, + payload: Arc, + semaphore: Arc, + __marker: PhantomData, +} + +impl Executor { + pub fn new( + payload: &Arc, + stream: &Arc>, + semaphore: &Arc, + ) -> Self { + Self { + payload: payload.to_owned(), + stream: stream.to_owned(), + semaphore: semaphore.to_owned(), + __marker: PhantomData, + } + } + + fn publish( + &self, + packet: &PublishPacket, + ) -> JoinHandle> { + let wildcard = packet.subject.parse(); + let stream = Arc::clone(&self.stream); + let permit = Arc::clone(&self.semaphore); + + // TODO: add telemetry back again + let packet = packet.clone(); + tokio::spawn({ + async move { + let _permit = permit.acquire().await?; + match stream.publish(&packet).await { + Ok(_) => { + tracing::debug!( + "Successfully published for stream: {wildcard}" + ); + Ok(()) + } + Err(e) => { + tracing::error!("Failed to publish for stream: {wildcard}, error: {e}"); + Err(ExecutorError::PublishFailed(e.to_string())) + } + } + } + }) + } + + pub fn payload(&self) -> Arc { + Arc::clone(&self.payload) + } + pub fn metadata(&self) -> &Metadata { + &self.payload.metadata + } + pub fn block(&self) -> &Block { + &self.payload.block + } + pub fn block_height(&self) -> BlockHeight { + let height = self.block().height; + BlockHeight::from(height) + } +} diff --git a/crates/fuel-streams-executors/src/logs.rs b/crates/fuel-streams-executors/src/logs.rs new file mode 100644 index 00000000..f4ac0308 --- /dev/null +++ b/crates/fuel-streams-executors/src/logs.rs @@ -0,0 +1,38 @@ +use fuel_streams_core::prelude::*; +use rayon::prelude::*; +use tokio::task::JoinHandle; + +use crate::*; + +impl Executor { + pub fn process( + &self, + tx: &Transaction, + ) -> Vec>> { + let block_height = self.block_height(); + let tx_id = tx.id.clone(); + let receipts = tx.receipts.clone(); + let packets = receipts + .par_iter() + .enumerate() + .filter_map(|(index, receipt)| match receipt { + Receipt::Log(LogReceipt { id, .. }) + | Receipt::LogData(LogDataReceipt { id, .. }) => { + Some(PublishPacket::new( + receipt.to_owned().into(), + LogsSubject { + block_height: Some(block_height.clone()), + tx_id: Some(tx_id.to_owned()), + receipt_index: Some(index), + log_id: Some(id.into()), + } + .arc(), + )) + } + _ => None, + }) + .collect::>(); + + packets.iter().map(|packet| self.publish(packet)).collect() + } +} diff --git a/crates/fuel-streams-executors/src/outputs.rs b/crates/fuel-streams-executors/src/outputs.rs new file mode 100644 index 00000000..48e9baa8 --- /dev/null +++ b/crates/fuel-streams-executors/src/outputs.rs @@ -0,0 +1,155 @@ +use std::sync::Arc; + +use fuel_streams_core::prelude::*; +use rayon::prelude::*; +use tokio::task::JoinHandle; + +use crate::*; + +impl Executor { + pub fn process( + &self, + tx: &Transaction, + ) -> Vec>> { + let tx_id = tx.id.clone(); + let packets: Vec> = tx + .outputs + .par_iter() + .enumerate() + .flat_map(|(index, output)| { + let main_subject = main_subject(output, tx, &tx_id, index); + let identifier_subjects = + identifiers(output, tx, &tx_id, index as u8) + .into_par_iter() + .map(|identifier| identifier.into()) + .map(|subject: OutputsByIdSubject| subject.arc()) + .collect::>(); + + let mut packets = vec![output.to_packet(main_subject)]; + packets.extend( + identifier_subjects + .into_iter() + .map(|subject| output.to_packet(subject)), + ); + + packets + }) + .collect(); + + packets.iter().map(|packet| self.publish(packet)).collect() + } +} + +fn main_subject( + output: &Output, + transaction: &Transaction, + tx_id: &Bytes32, + index: usize, +) -> Arc { + match output { + Output::Coin(OutputCoin { to, asset_id, .. }) => OutputsCoinSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index as u16), + to: Some(to.to_owned()), + asset_id: Some(asset_id.to_owned()), + } + .arc(), + Output::Contract(contract) => { + let contract_id = + match find_output_contract_id(transaction, contract) { + Some(contract_id) => contract_id, + None => { + tracing::warn!( + "Contract ID not found for output: {:?}", + output + ); + + Default::default() + } + }; + + OutputsContractSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index as u16), + contract_id: Some(contract_id), + } + .arc() + } + Output::Change(OutputChange { to, asset_id, .. }) => { + OutputsChangeSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index as u16), + to: Some(to.to_owned()), + asset_id: Some(asset_id.to_owned()), + } + .arc() + } + Output::Variable(OutputVariable { to, asset_id, .. }) => { + OutputsVariableSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index as u16), + to: Some(to.to_owned()), + asset_id: Some(asset_id.to_owned()), + } + .arc() + } + Output::ContractCreated(OutputContractCreated { + contract_id, .. + }) => OutputsContractCreatedSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index as u16), + contract_id: Some(contract_id.to_owned()), + } + .arc(), + } +} + +pub fn identifiers( + output: &Output, + tx: &Transaction, + tx_id: &Bytes32, + index: u8, +) -> Vec { + match output { + Output::Change(OutputChange { to, asset_id, .. }) + | Output::Variable(OutputVariable { to, asset_id, .. }) + | Output::Coin(OutputCoin { to, asset_id, .. }) => { + vec![ + Identifier::Address(tx_id.to_owned(), index, to.into()), + Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), + ] + } + Output::Contract(contract) => find_output_contract_id(tx, contract) + .map(|contract_id| { + vec![Identifier::ContractID( + tx_id.to_owned(), + index, + contract_id.into(), + )] + }) + .unwrap_or_default(), + Output::ContractCreated(OutputContractCreated { + contract_id, .. + }) => { + vec![Identifier::ContractID( + tx_id.to_owned(), + index, + contract_id.into(), + )] + } + } +} + +pub fn find_output_contract_id( + tx: &Transaction, + contract: &OutputContract, +) -> Option { + let input_index = contract.input_index as usize; + tx.inputs.get(input_index).and_then(|input| { + if let Input::Contract(input_contract) = input { + Some(input_contract.contract_id.to_owned().into()) + } else { + None + } + }) +} diff --git a/crates/fuel-streams-executors/src/receipts.rs b/crates/fuel-streams-executors/src/receipts.rs new file mode 100644 index 00000000..c99f03fc --- /dev/null +++ b/crates/fuel-streams-executors/src/receipts.rs @@ -0,0 +1,238 @@ +use std::sync::Arc; + +use fuel_streams_core::prelude::*; +use rayon::prelude::*; +use tokio::task::JoinHandle; + +use crate::*; + +impl Executor { + pub fn process( + &self, + tx: &Transaction, + ) -> Vec>> { + let tx_id = tx.id.clone(); + let receipts = tx.receipts.clone(); + let packets: Vec> = receipts + .par_iter() + .enumerate() + .flat_map(|(index, receipt)| { + let main_subject = main_subject(receipt, &tx_id, index); + let identifier_subjects = + identifiers(receipt, &tx_id, index as u8) + .into_par_iter() + .map(|identifier| identifier.into()) + .map(|subject: ReceiptsByIdSubject| subject.arc()) + .collect::>(); + + let receipt: Receipt = receipt.to_owned(); + let mut packets = vec![receipt.to_packet(main_subject)]; + packets.extend( + identifier_subjects + .into_iter() + .map(|subject| receipt.to_packet(subject)), + ); + + packets + }) + .collect(); + + packets.iter().map(|packet| self.publish(packet)).collect() + } +} + +fn main_subject( + receipt: &Receipt, + tx_id: &Bytes32, + index: usize, +) -> Arc { + match receipt { + Receipt::Call(CallReceipt { + id: from, + to, + asset_id, + .. + }) => ReceiptsCallSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + from: Some(from.to_owned()), + to: Some(to.to_owned()), + asset_id: Some(asset_id.to_owned()), + } + .arc(), + Receipt::Return(ReturnReceipt { id, .. }) => ReceiptsReturnSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + id: Some(id.to_owned()), + } + .arc(), + Receipt::ReturnData(ReturnDataReceipt { id, .. }) => { + ReceiptsReturnDataSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + id: Some(id.to_owned()), + } + .arc() + } + Receipt::Panic(PanicReceipt { id, .. }) => ReceiptsPanicSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + id: Some(id.to_owned()), + } + .arc(), + Receipt::Revert(RevertReceipt { id, .. }) => ReceiptsRevertSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + id: Some(id.to_owned()), + } + .arc(), + Receipt::Log(LogReceipt { id, .. }) => ReceiptsLogSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + id: Some(id.to_owned()), + } + .arc(), + Receipt::LogData(LogDataReceipt { id, .. }) => ReceiptsLogDataSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + id: Some(id.to_owned()), + } + .arc(), + Receipt::Transfer(TransferReceipt { + id: from, + to, + asset_id, + .. + }) => ReceiptsTransferSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + from: Some(from.to_owned()), + to: Some(to.to_owned()), + asset_id: Some(asset_id.to_owned()), + } + .arc(), + + Receipt::TransferOut(TransferOutReceipt { + id: from, + to, + asset_id, + .. + }) => ReceiptsTransferOutSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + from: Some(from.to_owned()), + to: Some(to.to_owned()), + asset_id: Some(asset_id.to_owned()), + } + .arc(), + + Receipt::ScriptResult(ScriptResultReceipt { .. }) => { + ReceiptsScriptResultSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + } + .arc() + } + Receipt::MessageOut(MessageOutReceipt { + sender, recipient, .. + }) => ReceiptsMessageOutSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + sender: Some(sender.to_owned()), + recipient: Some(recipient.to_owned()), + } + .arc(), + Receipt::Mint(MintReceipt { + contract_id, + sub_id, + .. + }) => ReceiptsMintSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + contract_id: Some(contract_id.to_owned()), + sub_id: Some((*sub_id).to_owned()), + } + .arc(), + Receipt::Burn(BurnReceipt { + contract_id, + sub_id, + .. + }) => ReceiptsBurnSubject { + tx_id: Some(tx_id.to_owned()), + index: Some(index), + contract_id: Some(contract_id.to_owned()), + sub_id: Some((*sub_id).to_owned()), + } + .arc(), + } +} + +pub fn identifiers( + receipt: &Receipt, + tx_id: &Bytes32, + index: u8, +) -> Vec { + match receipt { + Receipt::Call(CallReceipt { + id: from, + to, + asset_id, + .. + }) => { + vec![ + Identifier::ContractID(tx_id.to_owned(), index, from.into()), + Identifier::ContractID(tx_id.to_owned(), index, to.into()), + Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), + ] + } + Receipt::Return(ReturnReceipt { id, .. }) + | Receipt::ReturnData(ReturnDataReceipt { id, .. }) + | Receipt::Panic(PanicReceipt { id, .. }) + | Receipt::Revert(RevertReceipt { id, .. }) + | Receipt::Log(LogReceipt { id, .. }) + | Receipt::LogData(LogDataReceipt { id, .. }) => { + vec![Identifier::ContractID(tx_id.to_owned(), index, id.into())] + } + Receipt::Transfer(TransferReceipt { + id: from, + to, + asset_id, + .. + }) => { + vec![ + Identifier::ContractID(tx_id.to_owned(), index, from.into()), + Identifier::ContractID(tx_id.to_owned(), index, to.into()), + Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), + ] + } + Receipt::TransferOut(TransferOutReceipt { + id: from, + to, + asset_id, + .. + }) => { + vec![ + Identifier::ContractID(tx_id.to_owned(), index, from.into()), + Identifier::ContractID(tx_id.to_owned(), index, to.into()), + Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), + ] + } + Receipt::MessageOut(MessageOutReceipt { + sender, recipient, .. + }) => { + vec![ + Identifier::Address(tx_id.to_owned(), index, sender.into()), + Identifier::Address(tx_id.to_owned(), index, recipient.into()), + ] + } + Receipt::Mint(MintReceipt { contract_id, .. }) + | Receipt::Burn(BurnReceipt { contract_id, .. }) => { + vec![Identifier::ContractID( + tx_id.to_owned(), + index, + contract_id.into(), + )] + } + _ => Vec::new(), + } +} diff --git a/crates/fuel-streams-executors/src/transactions.rs b/crates/fuel-streams-executors/src/transactions.rs new file mode 100644 index 00000000..396364ca --- /dev/null +++ b/crates/fuel-streams-executors/src/transactions.rs @@ -0,0 +1,82 @@ +use fuel_streams_core::prelude::*; +use rayon::prelude::*; +use tokio::task::JoinHandle; + +use crate::*; + +impl Executor { + pub fn process( + &self, + tx_item: (usize, &Transaction), + ) -> Vec>> { + let block_height = self.block_height(); + packets_from_tx(tx_item, &block_height) + .iter() + .map(|packet| self.publish(packet)) + .collect() + } +} + +fn packets_from_tx( + (index, tx): (usize, &Transaction), + block_height: &BlockHeight, +) -> Vec> { + let estimated_capacity = + 1 + tx.inputs.len() + tx.outputs.len() + tx.receipts.len(); + let tx_id = tx.id.clone(); + let tx_status = tx.status.clone(); + let receipts = tx.receipts.clone(); + + // Main subject + let mut packets = Vec::with_capacity(estimated_capacity); + packets.push( + tx.to_packet( + TransactionsSubject { + block_height: Some(block_height.to_owned()), + index: Some(index), + tx_id: Some(tx_id.to_owned()), + status: Some(tx_status), + kind: Some(tx.kind.to_owned()), + } + .arc(), + ), + ); + + let index_u8 = index as u8; + let mut additional_packets: Vec> = + rayon::iter::once(&tx.kind) + .flat_map(|kind| identifiers(tx, kind, &tx_id, index_u8)) + .chain( + tx.inputs.par_iter().flat_map(|input| { + inputs::identifiers(input, &tx_id, index_u8) + }), + ) + .chain(tx.outputs.par_iter().flat_map(|output| { + outputs::identifiers(output, tx, &tx_id, index_u8) + })) + .chain(receipts.par_iter().flat_map(|receipt| { + receipts::identifiers(receipt, &tx_id, index_u8) + })) + .map(|identifier| TransactionsByIdSubject::from(identifier).arc()) + .map(|subject| tx.to_packet(subject)) + .collect(); + + packets.append(&mut additional_packets); + packets +} + +fn identifiers( + tx: &Transaction, + kind: &TransactionKind, + tx_id: &Bytes32, + index: u8, +) -> Vec { + match kind { + TransactionKind::Script => { + let script_data = &tx.script_data.to_owned().unwrap_or_default().0; + let script_tag = sha256(script_data); + vec![Identifier::ScriptID(tx_id.to_owned(), index, script_tag)] + } + _ => Vec::new(), + } +} diff --git a/crates/fuel-streams-executors/src/utxos.rs b/crates/fuel-streams-executors/src/utxos.rs new file mode 100644 index 00000000..81fdaaf2 --- /dev/null +++ b/crates/fuel-streams-executors/src/utxos.rs @@ -0,0 +1,85 @@ +use fuel_streams_core::prelude::*; +use rayon::prelude::*; +use tokio::task::JoinHandle; + +use crate::*; + +impl Executor { + pub fn process( + &self, + tx: &Transaction, + ) -> Vec>> { + let tx_id = tx.id.clone(); + let packets = tx + .inputs + .par_iter() + .filter_map(|input| utxo_packet(input, &tx_id)) + .collect::>(); + + packets + .into_iter() + .map(|packet| self.publish(&packet)) + .collect() + } +} + +fn utxo_packet(input: &Input, tx_id: &Bytes32) -> Option> { + match input { + Input::Contract(InputContract { utxo_id, .. }) => { + let utxo = Utxo { + utxo_id: utxo_id.to_owned(), + tx_id: tx_id.to_owned(), + ..Default::default() + }; + let subject = UtxosSubject { + utxo_type: Some(UtxoType::Contract), + utxo_id: Some(utxo_id.into()), + } + .arc(); + Some(utxo.to_packet(subject)) + } + Input::Coin(InputCoin { + utxo_id, amount, .. + }) => { + let utxo = Utxo { + utxo_id: utxo_id.to_owned(), + amount: Some(*amount), + tx_id: tx_id.to_owned(), + ..Default::default() + }; + let subject = UtxosSubject { + utxo_type: Some(UtxoType::Coin), + utxo_id: Some(utxo_id.into()), + } + .arc(); + Some(utxo.to_packet(subject)) + } + Input::Message( + input @ InputMessage { + amount, + nonce, + recipient, + sender, + data, + .. + }, + ) => { + let utxo_id = input.computed_utxo_id(); + let utxo = Utxo { + tx_id: tx_id.to_owned(), + utxo_id: utxo_id.to_owned(), + sender: Some(sender.to_owned()), + recipient: Some(recipient.to_owned()), + nonce: Some(nonce.to_owned()), + amount: Some(*amount), + data: Some(data.to_owned()), + }; + let subject = UtxosSubject { + utxo_type: Some(UtxoType::Message), + utxo_id: None, + } + .arc(); + Some(utxo.to_packet(subject)) + } + } +} diff --git a/crates/fuel-streams-nats/Cargo.toml b/crates/fuel-streams-nats/Cargo.toml new file mode 100644 index 00000000..60254899 --- /dev/null +++ b/crates/fuel-streams-nats/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "fuel-streams-nats" +description = "Strategies and adapters for storing fuel streams in NATS" +authors = { workspace = true } +keywords = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +version = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +async-nats = { workspace = true } +displaydoc = { workspace = true } +dotenvy = { workspace = true } +rand = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } + +[dev-dependencies] +pretty_assertions = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true, features = ["rt-multi-thread", "macros", "test-util"] } + +[features] +default = [] +test-helpers = [] +bench-helpers = [] diff --git a/crates/fuel-streams-storage/src/nats/error.rs b/crates/fuel-streams-nats/src/error.rs similarity index 100% rename from crates/fuel-streams-storage/src/nats/error.rs rename to crates/fuel-streams-nats/src/error.rs diff --git a/crates/fuel-streams-nats/src/lib.rs b/crates/fuel-streams-nats/src/lib.rs new file mode 100644 index 00000000..762ae550 --- /dev/null +++ b/crates/fuel-streams-nats/src/lib.rs @@ -0,0 +1,15 @@ +/// Houses shared APIs for interacting with NATS for sv-publisher and fuel-streams crates +/// As much as possible, the public interface/APIS should be agnostic of NATS. These can then be extended +/// in the sv-publisher and fuel-streams crates to provide a more opinionated API towards +/// their specific use-cases. +pub mod error; +pub mod nats_client; +pub mod nats_client_opts; +pub mod nats_namespace; +pub mod types; + +pub use error::*; +pub use nats_client::*; +pub use nats_client_opts::*; +pub use nats_namespace::*; +pub use types::*; diff --git a/crates/fuel-streams-storage/src/nats/nats_client.rs b/crates/fuel-streams-nats/src/nats_client.rs similarity index 86% rename from crates/fuel-streams-storage/src/nats/nats_client.rs rename to crates/fuel-streams-nats/src/nats_client.rs index 3c09323b..fd3474fa 100644 --- a/crates/fuel-streams-storage/src/nats/nats_client.rs +++ b/crates/fuel-streams-nats/src/nats_client.rs @@ -14,11 +14,10 @@ use super::{types::*, NatsClientOpts, NatsError, NatsNamespace}; /// Creating a new `NatsClient`: /// /// ```no_run -/// use fuel_streams_storage::nats::*; -/// use fuel_networks::FuelNetwork; +/// use fuel_streams_nats::*; /// /// async fn example() -> Result<(), Box> { -/// let opts = NatsClientOpts::new(FuelNetwork::Local); +/// let opts = NatsClientOpts::public_opts(); /// let client = NatsClient::connect(&opts).await?; /// Ok(()) /// } @@ -27,12 +26,11 @@ use super::{types::*, NatsClientOpts, NatsError, NatsNamespace}; /// Creating a key-value store: /// /// ```no_run -/// use fuel_streams_storage::nats::*; -/// use fuel_networks::FuelNetwork; +/// use fuel_streams_nats::*; /// use async_nats::jetstream::kv; /// /// async fn example() -> Result<(), Box> { -/// let opts = NatsClientOpts::new(FuelNetwork::Local); +/// let opts = NatsClientOpts::public_opts(); /// let client = NatsClient::connect(&opts).await?; /// let kv_config = kv::Config { /// bucket: "my-bucket".into(), @@ -66,7 +64,13 @@ impl NatsClient { source: e, } })?; - let jetstream = async_nats::jetstream::new(nats_client.to_owned()); + + let jetstream = match opts.domain.clone() { + None => async_nats::jetstream::new(nats_client.clone()), + Some(domain) => { + async_nats::jetstream::with_domain(nats_client.clone(), domain) + } + }; info!("Connected to NATS server at {}", url); Ok(Self { diff --git a/crates/fuel-streams-nats/src/nats_client_opts.rs b/crates/fuel-streams-nats/src/nats_client_opts.rs new file mode 100644 index 00000000..f6aaa0a3 --- /dev/null +++ b/crates/fuel-streams-nats/src/nats_client_opts.rs @@ -0,0 +1,227 @@ +use std::time::Duration; + +use async_nats::ConnectOptions; + +use super::NatsNamespace; + +#[derive(Debug, Clone, Eq, PartialEq, Default)] +pub enum NatsAuth { + Admin, + System, + #[default] + Public, + Custom(String, String), +} + +impl NatsAuth { + fn credentials_from_env(&self) -> (String, String) { + match self { + NatsAuth::Admin => ( + dotenvy::var("NATS_ADMIN_USER") + .expect("NATS_ADMIN_USER must be set"), + dotenvy::var("NATS_ADMIN_PASS") + .expect("NATS_ADMIN_PASS must be set"), + ), + NatsAuth::System => ( + dotenvy::var("NATS_SYSTEM_USER") + .expect("NATS_SYSTEM_USER must be set"), + dotenvy::var("NATS_SYSTEM_PASS") + .expect("NATS_SYSTEM_PASS must be set"), + ), + NatsAuth::Public => ("default_user".to_string(), "".to_string()), + NatsAuth::Custom(user, pass) => { + (user.to_string(), pass.to_string()) + } + } + } +} + +/// Configuration options for connecting to NATS +/// +/// # Examples +/// +/// ```no_run +/// use fuel_streams_nats::*; +/// +/// // Create with URL +/// let opts = NatsClientOpts::new("nats://localhost:4222".to_string(), Some(NatsAuth::Admin)); +/// +/// // Create with admin credentials from environment +/// let opts = NatsClientOpts::admin_opts(); +/// +/// // Create with system credentials from environment +/// let opts = NatsClientOpts::system_opts(); +/// +/// // Create with public credentials +/// let opts = NatsClientOpts::public_opts(); +/// ``` +/// +/// Customize options: +/// +/// ```no_run +/// use fuel_streams_nats::*; +/// +/// let opts = NatsClientOpts::new("nats://localhost:4222".to_string(), Some(NatsAuth::Admin)) +/// .with_domain("mydomain") +/// .with_user("myuser") +/// .with_password("mypass") +/// .with_timeout(10); +/// ``` +#[derive(Debug, Clone)] +pub struct NatsClientOpts { + /// The URL of the NATS server. + pub(crate) url: String, + /// The namespace used as a prefix for NATS streams, consumers, and subject names. + pub(crate) namespace: NatsNamespace, + /// The timeout in seconds for NATS operations. + pub(crate) timeout_secs: u64, + /// The domain to use for the NATS client. + pub(crate) domain: Option, + /// The user to use for the NATS client. + pub(crate) user: Option, + /// The password to use for the NATS client. + pub(crate) password: Option, +} + +impl NatsClientOpts { + pub fn new(url: String, auth: Option) -> Self { + let (user, pass) = auth.unwrap_or_default().credentials_from_env(); + Self { + url, + namespace: NatsNamespace::default(), + timeout_secs: 5, + domain: None, + user: Some(user), + password: Some(pass), + } + } + + pub fn from_env(auth: Option) -> Self { + let url = dotenvy::var("NATS_URL").expect("NATS_URL must be set"); + Self::new(url, auth) + } + pub fn admin_opts() -> Self { + Self::from_env(Some(NatsAuth::Admin)) + } + pub fn system_opts() -> Self { + Self::from_env(Some(NatsAuth::System)) + } + pub fn public_opts() -> Self { + Self::from_env(Some(NatsAuth::Public)) + } + + pub fn get_url(&self) -> String { + self.url.clone() + } + + pub fn with_url>(self, url: S) -> Self { + Self { + url: url.into(), + ..self + } + } + + pub fn with_domain>(self, domain: S) -> Self { + Self { + domain: Some(domain.into()), + ..self + } + } + + pub fn with_user>(self, user: S) -> Self { + Self { + user: Some(user.into()), + ..self + } + } + + pub fn with_password>(self, password: S) -> Self { + Self { + password: Some(password.into()), + ..self + } + } + + #[cfg(any(test, feature = "test-helpers"))] + pub fn with_rdn_namespace(self) -> Self { + let namespace = format!(r"namespace-{}", Self::random_int()); + self.with_namespace(&namespace) + } + + #[cfg(any(test, feature = "test-helpers"))] + pub fn with_namespace(self, namespace: &str) -> Self { + let namespace = NatsNamespace::Custom(namespace.to_string()); + Self { namespace, ..self } + } + + pub fn with_timeout(self, secs: u64) -> Self { + Self { + timeout_secs: secs, + ..self + } + } + + pub(super) fn connect_opts(&self) -> ConnectOptions { + let opts = match (self.user.clone(), self.password.clone()) { + (Some(user), Some(pass)) => { + ConnectOptions::with_user_and_password(user, pass) + } + _ => ConnectOptions::new(), + }; + + opts.connection_timeout(Duration::from_secs(self.timeout_secs)) + .max_reconnects(1) + .name(Self::conn_id()) + } + + // This will be useful for debugging and monitoring connections + fn conn_id() -> String { + format!(r"connection-{}", Self::random_int()) + } + + fn random_int() -> u32 { + use rand::Rng; + rand::thread_rng().gen() + } +} + +#[cfg(test)] +mod tests { + use std::env; + + use super::*; + + #[test] + fn test_role_credentials() { + // Setup + env::set_var("NATS_ADMIN_USER", "admin"); + env::set_var("NATS_ADMIN_PASS", "admin_pass"); + + // Test Admin role credentials + let (user, pass) = NatsAuth::Admin.credentials_from_env(); + assert_eq!(user, "admin"); + assert_eq!(pass, "admin_pass"); + + // Cleanup + env::remove_var("NATS_ADMIN_USER"); + env::remove_var("NATS_ADMIN_PASS"); + } + + #[test] + fn test_from_env_with_role() { + // Setup + env::set_var("NATS_URL", "nats://localhost:4222"); + env::set_var("NATS_ADMIN_USER", "admin"); + env::set_var("NATS_ADMIN_PASS", "admin_pass"); + + // Test Admin role + let opts = NatsClientOpts::from_env(Some(NatsAuth::Admin)); + assert_eq!(opts.user, Some("admin".to_string())); + assert_eq!(opts.password, Some("admin_pass".to_string())); + + // Cleanup + env::remove_var("NATS_URL"); + env::remove_var("NATS_ADMIN_USER"); + env::remove_var("NATS_ADMIN_PASS"); + } +} diff --git a/crates/fuel-streams-storage/src/nats/nats_namespace.rs b/crates/fuel-streams-nats/src/nats_namespace.rs similarity index 92% rename from crates/fuel-streams-storage/src/nats/nats_namespace.rs rename to crates/fuel-streams-nats/src/nats_namespace.rs index c89f5fd2..947e8760 100644 --- a/crates/fuel-streams-storage/src/nats/nats_namespace.rs +++ b/crates/fuel-streams-nats/src/nats_namespace.rs @@ -7,7 +7,7 @@ static DEFAULT_NAMESPACE: &str = "fuel"; /// # Examples /// /// ``` -/// use fuel_streams_storage::nats::NatsNamespace; +/// use fuel_streams_nats::NatsNamespace; /// /// let default_namespace = NatsNamespace::default(); /// assert_eq!(default_namespace.to_string(), "fuel"); @@ -44,7 +44,7 @@ impl NatsNamespace { /// # Examples /// /// ``` - /// use fuel_streams_storage::nats::NatsNamespace; + /// use fuel_streams_nats::NatsNamespace; /// /// let namespace = NatsNamespace::default(); /// assert_eq!(namespace.subject_name("test"), "fuel.test"); @@ -61,7 +61,7 @@ impl NatsNamespace { /// # Examples /// /// ``` - /// use fuel_streams_storage::nats::NatsNamespace; + /// use fuel_streams_nats::NatsNamespace; /// /// let namespace = NatsNamespace::default(); /// assert_eq!(namespace.stream_name("test"), "fuel_test"); diff --git a/crates/fuel-streams-storage/src/nats/types.rs b/crates/fuel-streams-nats/src/types.rs similarity index 100% rename from crates/fuel-streams-storage/src/nats/types.rs rename to crates/fuel-streams-nats/src/types.rs diff --git a/crates/fuel-streams-publisher/Cargo.toml b/crates/fuel-streams-publisher/Cargo.toml deleted file mode 100644 index fb19b174..00000000 --- a/crates/fuel-streams-publisher/Cargo.toml +++ /dev/null @@ -1,74 +0,0 @@ -[package] -name = "fuel-streams-publisher" -description = "Fuel library for publishing data streams from events that happen in Fuel chain(s)" -authors = { workspace = true } -keywords = { workspace = true } -edition = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -repository = { workspace = true } -version = { workspace = true } -rust-version = { workspace = true } -publish = false - -[dependencies] -actix-cors = { workspace = true } -actix-server = { workspace = true } -actix-web = { workspace = true } -anyhow = { workspace = true } -async-nats = { workspace = true } -async-trait = { workspace = true } -chrono = { workspace = true } -clap = { workspace = true } -derive_more = { version = "1.0", features = ["full"] } -displaydoc = { workspace = true } -dotenvy = { workspace = true } -elasticsearch = "8.15.0-alpha.1" -fuel-core = { workspace = true } -fuel-core-bin = { workspace = true } -fuel-core-importer = { workspace = true } -fuel-core-services = { workspace = true } -fuel-core-storage = { workspace = true } -fuel-core-types = { workspace = true } -fuel-streams = { workspace = true, features = ["test-helpers"] } -fuel-streams-core = { workspace = true, features = ["test-helpers"] } -fuel-streams-storage = { workspace = true, features = ["test-helpers"] } -futures = { workspace = true } -num_cpus = "1.16" -parking_lot = { version = "0.12", features = ["serde"] } -prometheus = { version = "0.13", features = ["process"] } -rand = { workspace = true } -rayon = "1.10.0" -rust_decimal = { version = "1.13" } -serde = { workspace = true } -serde_json = { workspace = true } -serde_prometheus = { version = "0.2" } -sha2 = { workspace = true } -sysinfo = { version = "0.29" } -thiserror = "2.0" -tokio = { workspace = true } -tokio-stream = { workspace = true } -tracing = { workspace = true } -tracing-actix-web = { workspace = true } -url = "2.5" - -[dev-dependencies] -assert_matches = { workspace = true } -mockall = { workspace = true } -mockall_double = { workspace = true } - -[features] -default = [] -test-helpers = [] - -[target.x86_64-unknown-linux-gnu.dependencies] -openssl = { version = "0.10.68", features = ["vendored"] } - -[target.x86_64-unknown-linux-musl.dependencies] -openssl = { version = "0.10.68", features = ["vendored"] } - -[target.aarch64-unknown-linux-gnu.dependencies] -openssl = { version = "0.10.68", features = ["vendored"] } - -[target.aarch64-unknown-linux-musl.dependencies] -openssl = { version = "0.10.68", features = ["vendored"] } diff --git a/crates/fuel-streams-publisher/README.md b/crates/fuel-streams-publisher/README.md deleted file mode 100644 index 9d7eb196..00000000 --- a/crates/fuel-streams-publisher/README.md +++ /dev/null @@ -1,72 +0,0 @@ -
-
- - Logo - -

Fuel Streams Publisher

-

- A binary that subscribes to events from a Fuel client or node and publishes streams consumable via the fuel-streams SDK -

-

- - CI - - - Coverage - -

-

- 📚 Documentation -   - 🐛 Report Bug -   - ✨ Request Feature -

-
- -## 📝 About The Project - -The Fuel Streams Publisher is a binary that subscribes to events emitted from a Fuel client or node and publishes streams that can be consumed via the `fuel-streams` SDK. - -## ⚡️ Getting Started - -### Prerequisites - -- [Rust toolchain](https://www.rust-lang.org/tools/install) -- [Docker](https://www.docker.com/get-started/) (optional) - -### Development - -1. Generate the `KEYPAIR` environment variable: - - ```sh - fuel-core-keygen new --key-type peering -p - ``` - -2. Generate an `INFURA_API_KEY` from [Infura](https://app.infura.io/) - -3. Copy `.env.sample` to `.env` and update the `KEYPAIR` and `INFURA_API_KEY` with the values generated above - -4. Run the binary: - - - From the monorepo's root: - - ```sh - ./scripts/start-publisher.sh - ``` - - - Or using `make` and `docker`: - - ```sh - make start/publisher - ``` - -## 🤝 Contributing - -Contributions are welcome! Please feel free to submit a Pull Request. - -For more information on contributing, please see the [CONTRIBUTING.md](../../CONTRIBUTING.md) file in the root of the repository. - -## 📜 License - -This project is licensed under the `Apache-2.0` license. See [`LICENSE`](../../LICENSE) for more information. diff --git a/crates/fuel-streams-publisher/src/lib.rs b/crates/fuel-streams-publisher/src/lib.rs deleted file mode 100644 index dbbb3cc0..00000000 --- a/crates/fuel-streams-publisher/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -pub mod cli; -pub mod publisher; -pub mod server; -pub mod telemetry; - -use std::{env, sync::LazyLock}; - -pub use publisher::*; - -pub static PUBLISHER_MAX_THREADS: LazyLock = LazyLock::new(|| { - let available_cpus = num_cpus::get(); - let default_threads = (available_cpus / 3).max(1); // Use 1/3 of CPUs, minimum 1 - - env::var("PUBLISHER_MAX_THREADS") - .ok() - .and_then(|val| val.parse().ok()) - .unwrap_or(default_threads) -}); - -#[cfg(test)] -#[macro_use] -extern crate assert_matches; diff --git a/crates/fuel-streams-publisher/src/main.rs b/crates/fuel-streams-publisher/src/main.rs deleted file mode 100644 index cc667404..00000000 --- a/crates/fuel-streams-publisher/src/main.rs +++ /dev/null @@ -1,68 +0,0 @@ -use std::{ - net::{Ipv4Addr, SocketAddrV4}, - sync::Arc, -}; - -use clap::Parser; -use fuel_streams_publisher::{ - cli::Cli, - publisher::shutdown::ShutdownController, - server::{http::create_web_server, state::ServerState}, - shutdown, - telemetry::Telemetry, - FuelCore, - FuelCoreLike, -}; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let cli = Cli::parse(); - let historical = cli.clone().historical; - - let fuel_core: Arc = - FuelCore::new(cli.fuel_core_config).await?; - fuel_core.start().await?; - - let telemetry = Telemetry::new().await?; - telemetry.start().await?; - - let publisher = fuel_streams_publisher::Publisher::new( - Arc::clone(&fuel_core), - telemetry.clone(), - ) - .await?; - - let state = ServerState::new(publisher.clone()).await; - // create the actix webserver - let server_addr = std::net::SocketAddr::V4(SocketAddrV4::new( - Ipv4Addr::UNSPECIFIED, - cli.telemetry_port, - )); - let server = create_web_server(state, server_addr)?; - // get server handle - let server_handle = server.handle(); - // spawn the server in the background - tokio::spawn(async move { - if let Err(err) = server.await { - tracing::error!("Actix Web server error: {:?}", err); - } - }); - tracing::info!("Publisher started."); - - let (shutdown_controller, shutdown_token) = - shutdown::get_controller_and_token(); - ShutdownController::spawn_signal_listener(&shutdown_controller); - - // run publisher until shutdown signal intercepted - if let Err(err) = publisher.run(shutdown_token, historical).await { - tracing::error!("Publisher encountered an error: {:?}", err); - } - tracing::info!("Publisher stopped"); - - // Await the Actix server shutdown - tracing::info!("Stopping actix server ..."); - server_handle.stop(true).await; - tracing::info!("Actix server stopped. Goodbye!"); - - Ok(()) -} diff --git a/crates/fuel-streams-publisher/src/publisher/blocks_streams.rs b/crates/fuel-streams-publisher/src/publisher/blocks_streams.rs deleted file mode 100644 index a40c0e00..00000000 --- a/crates/fuel-streams-publisher/src/publisher/blocks_streams.rs +++ /dev/null @@ -1,394 +0,0 @@ -use std::{cmp::max, sync::Arc}; - -use fuel_streams_core::prelude::*; -use futures::{ - stream::{self, BoxStream}, - StreamExt, - TryStreamExt, -}; -use tokio_stream::wrappers::BroadcastStream; - -use crate::{fuel_core_like::FuelCoreLike, fuel_streams::FuelStreamsExt}; - -pub fn build_blocks_stream<'a>( - fuel_streams: &'a Arc, - fuel_core: &'a Arc, - max_retained_blocks: u64, -) -> BoxStream<'a, anyhow::Result> { - #[derive(Debug, Default, Clone)] - struct State { - has_published_latest: bool, - has_reached_new_blocks_stream: bool, - } - let stream_state = State::default(); - - stream::try_unfold(stream_state, move |mut stream_state| { - let fuel_core = Arc::clone(fuel_core); - let fuel_streams = Arc::clone(fuel_streams); - - async move { - let latest_block_height = fuel_core.get_latest_block_height()?; - - let last_published_block_height = get_last_published_block_height( - fuel_streams, - latest_block_height, - max_retained_blocks, - ) - .await?; - - stream_state.has_published_latest = - latest_block_height == last_published_block_height; - - match stream_state { - State { - has_published_latest: false, - has_reached_new_blocks_stream: false, - } => { - let old_blocks_stream = stream::iter( - last_published_block_height..latest_block_height, - ) - .map({ - let fuel_core = fuel_core.clone(); - - move |height| { - fuel_core.get_sealed_block_by_height(height as u32) - } - }) - .map(Ok) - .boxed(); - - anyhow::Ok(Some((old_blocks_stream, stream_state.clone()))) - } - State { - has_published_latest: true, - has_reached_new_blocks_stream: false, - } => { - let new_blocks_stream = - BroadcastStream::new(fuel_core.blocks_subscription()) - .map(|import_result| { - import_result - .expect("Must get ImporterResult") - .sealed_block - .clone() - }) - .map(Ok) - .boxed(); - - stream_state.has_reached_new_blocks_stream = true; - anyhow::Ok(Some((new_blocks_stream, stream_state.clone()))) - } - State { - has_reached_new_blocks_stream: true, - .. - } => anyhow::Ok(None), - } - } - }) - .try_flatten() - .boxed() -} - -async fn get_last_published_block_height( - fuel_streams: Arc, - latest_block_height: u64, - max_retained_blocks: u64, -) -> anyhow::Result { - let max_last_published_block_height = - max(0, latest_block_height as i64 - max_retained_blocks as i64) as u64; - - Ok(fuel_streams - .get_last_published_block() - .await? - .map(|block| block.height.into()) - .map(|block_height: u64| { - max(block_height, max_last_published_block_height) - }) - .unwrap_or(max_last_published_block_height)) -} - -#[cfg(test)] -mod tests { - use std::{sync::Arc, time::Duration}; - - // TODO: Fix this leaky abstraction - use async_nats::{ - jetstream::stream::State as StreamState, - RequestErrorKind, - }; - use fuel_core::combined_database::CombinedDatabase; - use futures::StreamExt; - use mockall::{ - mock, - predicate::{self, *}, - }; - use tokio::{ - sync::broadcast, - time::{error::Elapsed, timeout}, - }; - - use super::*; - - #[tokio::test] - async fn test_no_old_blocks() { - let mut mock_fuel_core = MockFuelCoreLike::new(); - let mut mock_fuel_streams = MockFuelStreams::default(); - - mock_fuel_core - .expect_get_latest_block_height() - .returning(|| Ok(100)); - mock_fuel_streams - .expect_get_last_published_block() - .returning(|| Ok(Some(create_mock_block(100)))); // No old blocks - - mock_fuel_core - .expect_blocks_subscription() - .returning(move || { - let (empty_tx, rx) = broadcast::channel(1); - drop(empty_tx); - rx - }); - - let fuel_core: Arc = Arc::new(mock_fuel_core); - let fuel_streams: Arc = Arc::new(mock_fuel_streams); - - let mut stream = build_blocks_stream(&fuel_streams, &fuel_core, 10); - - assert!(stream.next().await.is_none()); - } - - #[tokio::test] - async fn test_old_blocks_stream() { - let mut mock_fuel_core = MockFuelCoreLike::new(); - let mut mock_fuel_streams = MockFuelStreams::default(); - - mock_fuel_core - .expect_get_latest_block_height() - .returning(|| Ok(105)); - mock_fuel_streams - .expect_get_last_published_block() - .returning(|| Ok(Some(create_mock_block(100)))); - for height in 100..105 { - mock_fuel_core - .expect_get_sealed_block_by_height() - .with(predicate::eq(height as u32)) - .returning(move |height| { - create_mock_fuel_core_sealed_block(height as u64) - }); - } - - let fuel_core: Arc = Arc::new(mock_fuel_core); - let fuel_streams: Arc = Arc::new(mock_fuel_streams); - - let mut stream = build_blocks_stream(&fuel_streams, &fuel_core, 10); - - for height in 100..105 { - let block = stream.next().await.unwrap().unwrap(); - assert_eq!(block.entity.header().consensus().height, height.into()); - } - } - - #[tokio::test] - async fn test_infinite_new_blocks_streams() { - let mut mock_fuel_core = MockFuelCoreLike::new(); - let mut mock_fuel_streams = MockFuelStreams::default(); - - mock_fuel_core - .expect_get_latest_block_height() - .returning(|| Ok(100)); - mock_fuel_streams - .expect_get_last_published_block() - .returning(|| Ok(Some(create_mock_block(100)))); // has published latest block already - - let (tx, _) = broadcast::channel(4); - - mock_fuel_core - .expect_blocks_subscription() - .returning(move || tx.clone().subscribe()); - - let fuel_core: Arc = Arc::new(mock_fuel_core); - let fuel_streams: Arc = Arc::new(mock_fuel_streams); - - let mut blocks_stream = - build_blocks_stream(&fuel_streams, &fuel_core, 10); - - assert_matches!( - timeout(Duration::from_secs(1), async { - blocks_stream.next().await - }) - .await, - Err(Elapsed { .. }) - ); - } - - #[tokio::test] - async fn test_new_blocks_streams_that_ends() { - let mut mock_fuel_core = MockFuelCoreLike::new(); - let mut mock_fuel_streams = MockFuelStreams::default(); - - mock_fuel_core - .expect_get_latest_block_height() - .returning(|| Ok(100)); - mock_fuel_streams - .expect_get_last_published_block() - .returning(|| Ok(Some(create_mock_block(100)))); // has published latest block already - - let (tx, _) = broadcast::channel(4); - - mock_fuel_core - .expect_blocks_subscription() - .returning(move || { - let tx = tx.clone(); - let subscription = tx.subscribe(); - - tx.send(create_mock_importer_result(101)).ok(); - tx.send(create_mock_importer_result(102)).ok(); - - subscription - }); - - let fuel_core: Arc = Arc::new(mock_fuel_core); - let fuel_streams: Arc = Arc::new(mock_fuel_streams); - - let mut stream = build_blocks_stream(&fuel_streams, &fuel_core, 10); - - for height in 101..=102 { - let block = stream.next().await.unwrap().unwrap(); - assert_eq!(block.entity.header().consensus().height, height.into()); - } - } - - #[tokio::test] - async fn test_get_last_published_block_height() { - let mut mock_fuel_streams = MockFuelStreams::default(); - - // Case 1: `get_last_published_block` returns Some(block) - mock_fuel_streams - .expect_get_last_published_block() - .returning(|| Ok(Some(create_mock_block(50)))); - - let fuel_streams = Arc::new(mock_fuel_streams); - - let result = - get_last_published_block_height(fuel_streams.clone(), 100, 40) - .await - .unwrap(); - assert_eq!(result, 60); // max(50, max_last_published_block_height=60) - - // Case 2: `get_last_published_block` returns None - let mut mock_fuel_streams = MockFuelStreams::default(); - mock_fuel_streams - .expect_get_last_published_block() - .returning(|| Ok(None)); - - let fuel_streams = Arc::new(mock_fuel_streams); - - let result = - get_last_published_block_height(fuel_streams.clone(), 100, 40) - .await - .unwrap(); - assert_eq!(result, 60); // No block, fallback to max_last_published_block_height - - // Case 3: `get_last_published_block` returns an error - let mut mock_fuel_streams = MockFuelStreams::default(); - mock_fuel_streams - .expect_get_last_published_block() - .returning(|| Err(anyhow::anyhow!("Error fetching block"))); - - let fuel_streams = Arc::new(mock_fuel_streams); - - let result = - get_last_published_block_height(fuel_streams.clone(), 100, 40) - .await; - assert!(result.is_err()); - assert_eq!(result.unwrap_err().to_string(), "Error fetching block"); - - // Case 4: `get_last_published_block` returns Some(block) where block.height < max_last_published_block_height - let mut mock_fuel_streams = MockFuelStreams::default(); - mock_fuel_streams - .expect_get_last_published_block() - .returning(|| Ok(Some(create_mock_block(30)))); - - let fuel_streams = Arc::new(mock_fuel_streams); - - let result = - get_last_published_block_height(fuel_streams.clone(), 100, 40) - .await - .unwrap(); - assert_eq!(result, 60); // max(30, max_last_published_block_height=60) - } - - mock! { - FuelCoreLike {} - - #[async_trait::async_trait] - impl FuelCoreLike for FuelCoreLike { - fn get_latest_block_height(&self) -> anyhow::Result; - fn get_sealed_block_by_height(&self, height: u32) -> FuelCoreSealedBlock; - fn blocks_subscription(&self) -> broadcast::Receiver; - async fn start(&self) -> anyhow::Result<()>; - fn is_started(&self) -> bool; - async fn await_synced_at_least_once(&self, historical: bool) -> anyhow::Result<()>; - async fn stop(&self); - fn base_asset_id(&self) -> &FuelCoreAssetId; - fn chain_id(&self) -> &FuelCoreChainId; - fn database(&self) -> &CombinedDatabase; - async fn await_offchain_db_sync( - &self, - block_id: &FuelCoreBlockId, - ) -> anyhow::Result<()>; - fn get_receipts( - &self, - tx_id: &FuelCoreBytes32, - ) -> anyhow::Result>>; - } - } - - mock! { - FuelStreams {} - - #[async_trait::async_trait] - impl FuelStreamsExt for FuelStreams { - async fn get_last_published_block(&self) -> anyhow::Result>; - fn blocks(&self) -> &Stream; - fn transactions(&self) -> &Stream; - fn inputs(&self) -> &Stream; - fn outputs(&self) -> &Stream; - fn receipts(&self) -> &Stream; - fn utxos(&self) -> &Stream; - fn logs(&self) -> &Stream; - async fn get_consumers_and_state( - &self, - ) -> Result, StreamState)>, RequestErrorKind> ; - #[cfg(feature = "test-helpers")] - async fn is_empty(&self) -> bool; - } - } - - fn create_mock_importer_result(height: u64) -> FuelCoreImporterResult { - FuelCoreImporterResult { - shared_result: Arc::new(FuelCoreImportResult { - sealed_block: create_mock_fuel_core_sealed_block(height), - ..Default::default() - }), - #[cfg(feature = "test-helpers")] - changes: Arc::new(std::collections::HashMap::new()), - } - } - - fn create_mock_block(height: u64) -> Block { - Block::new( - &create_mock_fuel_core_sealed_block(height).entity, - FuelCoreConsensus::default().into(), - vec![], - ) - } - - fn create_mock_fuel_core_sealed_block(height: u64) -> FuelCoreSealedBlock { - let mut block = FuelCoreSealedBlock::default(); - - block.entity.header_mut().consensus_mut().height = - FuelCoreBlockHeight::new(height as u32); - - block - } -} diff --git a/crates/fuel-streams-publisher/src/publisher/fuel_streams.rs b/crates/fuel-streams-publisher/src/publisher/fuel_streams.rs deleted file mode 100644 index 05f9fd0f..00000000 --- a/crates/fuel-streams-publisher/src/publisher/fuel_streams.rs +++ /dev/null @@ -1,137 +0,0 @@ -use std::sync::Arc; - -use async_nats::{jetstream::stream::State as StreamState, RequestErrorKind}; -use fuel_streams::types::Log; -use fuel_streams_core::prelude::*; - -#[derive(Clone, Debug)] -/// Streams we currently support publishing to. -pub struct FuelStreams { - pub transactions: Stream, - pub blocks: Stream, - pub inputs: Stream, - pub outputs: Stream, - pub receipts: Stream, - pub utxos: Stream, - pub logs: Stream, -} - -impl FuelStreams { - pub async fn new( - nats_client: &NatsClient, - s3_client: &Arc, - ) -> Self { - Self { - transactions: Stream::::new(nats_client, s3_client) - .await, - blocks: Stream::::new(nats_client, s3_client).await, - inputs: Stream::::new(nats_client, s3_client).await, - outputs: Stream::::new(nats_client, s3_client).await, - receipts: Stream::::new(nats_client, s3_client).await, - utxos: Stream::::new(nats_client, s3_client).await, - logs: Stream::::new(nats_client, s3_client).await, - } - } -} - -#[async_trait::async_trait] -pub trait FuelStreamsExt: Sync + Send { - fn blocks(&self) -> &Stream; - fn transactions(&self) -> &Stream; - fn inputs(&self) -> &Stream; - fn outputs(&self) -> &Stream; - fn receipts(&self) -> &Stream; - fn utxos(&self) -> &Stream; - fn logs(&self) -> &Stream; - - async fn get_last_published_block(&self) -> anyhow::Result>; - - fn subjects_wildcards(&self) -> &[&'static str] { - &[ - TransactionsSubject::WILDCARD, - BlocksSubject::WILDCARD, - InputsByIdSubject::WILDCARD, - InputsCoinSubject::WILDCARD, - InputsMessageSubject::WILDCARD, - InputsContractSubject::WILDCARD, - ReceiptsLogSubject::WILDCARD, - ReceiptsBurnSubject::WILDCARD, - ReceiptsByIdSubject::WILDCARD, - ReceiptsCallSubject::WILDCARD, - ReceiptsMintSubject::WILDCARD, - ReceiptsPanicSubject::WILDCARD, - ReceiptsReturnSubject::WILDCARD, - ReceiptsRevertSubject::WILDCARD, - ReceiptsLogDataSubject::WILDCARD, - ReceiptsTransferSubject::WILDCARD, - ReceiptsMessageOutSubject::WILDCARD, - ReceiptsReturnDataSubject::WILDCARD, - ReceiptsTransferOutSubject::WILDCARD, - ReceiptsScriptResultSubject::WILDCARD, - UtxosSubject::WILDCARD, - LogsSubject::WILDCARD, - ] - } - - async fn get_consumers_and_state( - &self, - ) -> Result, StreamState)>, RequestErrorKind>; - - #[cfg(feature = "test-helpers")] - async fn is_empty(&self) -> bool; -} - -#[async_trait::async_trait] -impl FuelStreamsExt for FuelStreams { - fn blocks(&self) -> &Stream { - &self.blocks - } - fn transactions(&self) -> &Stream { - &self.transactions - } - fn inputs(&self) -> &Stream { - &self.inputs - } - fn outputs(&self) -> &Stream { - &self.outputs - } - fn receipts(&self) -> &Stream { - &self.receipts - } - fn utxos(&self) -> &Stream { - &self.utxos - } - fn logs(&self) -> &Stream { - &self.logs - } - - async fn get_last_published_block(&self) -> anyhow::Result> { - Ok(self - .blocks - .get_last_published(BlocksSubject::WILDCARD) - .await?) - } - - async fn get_consumers_and_state( - &self, - ) -> Result, StreamState)>, RequestErrorKind> { - Ok(vec![ - self.transactions.get_consumers_and_state().await?, - self.blocks.get_consumers_and_state().await?, - self.inputs.get_consumers_and_state().await?, - self.outputs.get_consumers_and_state().await?, - self.receipts.get_consumers_and_state().await?, - self.utxos.get_consumers_and_state().await?, - self.logs.get_consumers_and_state().await?, - ]) - } - - #[cfg(feature = "test-helpers")] - async fn is_empty(&self) -> bool { - self.blocks.is_empty(BlocksSubject::WILDCARD).await - && self - .transactions - .is_empty(TransactionsSubject::WILDCARD) - .await - } -} diff --git a/crates/fuel-streams-publisher/src/publisher/mod.rs b/crates/fuel-streams-publisher/src/publisher/mod.rs deleted file mode 100644 index 2873857c..00000000 --- a/crates/fuel-streams-publisher/src/publisher/mod.rs +++ /dev/null @@ -1,300 +0,0 @@ -pub mod fuel_core_like; -pub mod fuel_streams; -pub mod payloads; -pub mod shutdown; - -mod blocks_streams; - -use std::sync::Arc; - -use anyhow::Context; -use blocks_streams::build_blocks_stream; -pub use fuel_core_like::{FuelCore, FuelCoreLike}; -pub use fuel_streams::{FuelStreams, FuelStreamsExt}; -use fuel_streams_core::prelude::*; -use fuel_streams_storage::S3Client; -use futures::{future::try_join_all, stream::FuturesUnordered, StreamExt}; -use tokio::sync::Semaphore; - -use super::{ - payloads::blocks, - shutdown::{ShutdownToken, GRACEFUL_SHUTDOWN_TIMEOUT}, - telemetry::Telemetry, - PUBLISHER_MAX_THREADS, -}; - -#[derive(Clone)] -pub struct Publisher { - pub fuel_core: Arc, - pub nats_client: NatsClient, - pub fuel_streams: Arc, - pub telemetry: Arc, - pub s3_client: Arc, -} - -impl Publisher { - pub async fn new( - fuel_core: Arc, - telemetry: Arc, - ) -> anyhow::Result { - let nats_client_opts = NatsClientOpts::admin_opts(); - let nats_client = NatsClient::connect(&nats_client_opts).await?; - - let s3_client_opts = S3ClientOpts::admin_opts(); - let s3_client = Arc::new(S3Client::new(&s3_client_opts).await?); - if let Err(e) = s3_client.create_bucket().await { - tracing::error!("Failed to create S3 bucket: {:?}", e); - } - - let fuel_streams = - Arc::new(FuelStreams::new(&nats_client, &s3_client).await); - - telemetry.record_streams_count( - fuel_core.chain_id(), - fuel_streams.subjects_wildcards().len(), - ); - - Ok(Publisher { - fuel_core, - fuel_streams, - nats_client, - telemetry, - s3_client, - }) - } - - pub fn is_healthy(&self) -> bool { - // TODO: Update this condition to include more health checks - self.fuel_core.is_started() && self.nats_client.is_connected() - } - - #[cfg(feature = "test-helpers")] - pub async fn new_for_testing( - nats_client: &NatsClient, - s3_client: &Arc, - fuel_core: Arc, - ) -> anyhow::Result { - Ok(Publisher { - fuel_core, - fuel_streams: Arc::new( - FuelStreams::new(nats_client, s3_client).await, - ), - nats_client: nats_client.clone(), - telemetry: Telemetry::new().await?, - s3_client: Arc::clone(s3_client), - }) - } - - #[cfg(feature = "test-helpers")] - pub fn get_fuel_streams(&self) -> &Arc<(dyn FuelStreamsExt + 'static)> { - &self.fuel_streams - } - - async fn shutdown_services_with_timeout(&self) -> anyhow::Result<()> { - tokio::time::timeout(GRACEFUL_SHUTDOWN_TIMEOUT, async { - Publisher::flush_await_all_streams(&self.nats_client).await; - self.fuel_core.stop().await; - }) - .await?; - - Ok(()) - } - - async fn flush_await_all_streams(nats_client: &NatsClient) { - tracing::info!("Flushing in-flight messages to nats ..."); - match nats_client.nats_client.flush().await { - Ok(_) => { - tracing::info!("Flushed all streams successfully!"); - } - Err(e) => { - tracing::error!("Failed to flush all streams: {:?}", e); - } - } - } - - const MAX_RETAINED_BLOCKS: u64 = 100; - pub async fn run( - &self, - mut shutdown_token: ShutdownToken, - historical: bool, - ) -> anyhow::Result<()> { - tracing::info!("Awaiting FuelCore Sync..."); - - self.fuel_core - .await_synced_at_least_once(historical) - .await?; - - tracing::info!("FuelCore has synced successfully!"); - - tracing::info!("Publishing started..."); - - let mut blocks_stream = build_blocks_stream( - &self.fuel_streams, - &self.fuel_core, - Self::MAX_RETAINED_BLOCKS, - ); - - loop { - tokio::select! { - Some(sealed_block) = blocks_stream.next() => { - let sealed_block = sealed_block.context("block streams failed to produce sealed block")?; - - tracing::info!("Processing blocks stream"); - - let fuel_core = &self.fuel_core; - let (block, block_producer) = - fuel_core.get_block_and_producer(sealed_block); - - // TODO: Avoid awaiting Offchain DB sync for all streams by grouping in their own service - fuel_core - .await_offchain_db_sync(&block.id()) - .await - .context("Failed to await Offchain DB sync")?; - - if let Err(err) = self.publish(&block, &block_producer).await { - tracing::error!("Failed to publish block data: {}", err); - self.telemetry.record_failed_publishing(self.fuel_core.chain_id(), &block_producer); - } - - }, - shutdown = shutdown_token.wait_for_shutdown() => { - if shutdown { - tracing::info!("Shutdown signal received. Stopping services ..."); - self.shutdown_services_with_timeout().await?; - break; - } - }, - }; - } - - tracing::info!("Publishing stopped successfully!"); - - Ok(()) - } - - async fn publish( - &self, - block: &FuelCoreBlock, - block_producer: &Address, - ) -> anyhow::Result<()> { - let start_time = std::time::Instant::now(); - - let semaphore = Arc::new(Semaphore::new(*PUBLISHER_MAX_THREADS)); - let chain_id = Arc::new(*self.fuel_core.chain_id()); - let base_asset_id = Arc::new(*self.fuel_core.base_asset_id()); - let block_producer = Arc::new(block_producer.clone()); - let block_height = block.header().consensus().height; - let txs = block.transactions(); - let transaction_ids = txs - .iter() - .map(|tx| tx.id(&chain_id).into()) - .collect::>(); - - let consensus: Consensus = - self.fuel_core.get_consensus(&block_height)?.into(); - - let fuel_core = &*self.fuel_core; - let offchain_database = fuel_core.offchain_database()?; - - let fuel_streams = &*self.fuel_streams; - let blocks_stream = Arc::new(fuel_streams.blocks().to_owned()); - - let opts = &Arc::new(PublishOpts { - semaphore, - chain_id, - base_asset_id, - block_producer: Arc::clone(&block_producer), - block_height: Arc::new(block_height.into()), - telemetry: self.telemetry.clone(), - consensus: Arc::new(consensus), - offchain_database, - }); - - let publish_tasks = payloads::transactions::publish_all_tasks( - txs, - fuel_streams, - opts, - fuel_core, - )? - .into_iter() - .chain(std::iter::once(blocks::publish_task( - block, - blocks_stream, - opts, - transaction_ids, - ))) - .collect::>(); - - try_join_all(publish_tasks).await?; - - let elapsed = start_time.elapsed(); - tracing::info!( - "Published streams for BlockHeight: {} in {:?}", - *block_height, - elapsed - ); - - Ok(()) - } -} - -use tokio::task::JoinHandle; - -use crate::fuel_core_like::OffchainDatabase; - -#[derive(Clone)] -pub struct PublishOpts { - pub semaphore: Arc, - pub chain_id: Arc, - pub base_asset_id: Arc, - pub block_producer: Arc
, - pub block_height: Arc, - pub telemetry: Arc, - pub consensus: Arc, - pub offchain_database: Arc, -} - -pub fn publish( - packet: &PublishPacket, - stream: Arc>, - opts: &Arc, -) -> JoinHandle> { - let opts = Arc::clone(opts); - let packet = packet.clone(); - let telemetry = Arc::clone(&opts.telemetry); - let wildcard = packet.subject.wildcard(); - - tokio::spawn(async move { - let _permit = opts.semaphore.acquire().await?; - - // Publish to NATS - match stream.publish(&packet).await { - Ok(published_data_size) => { - telemetry.log_info(&format!( - "Successfully published for stream: {}", - wildcard - )); - telemetry.update_publisher_success_metrics( - wildcard, - published_data_size, - &opts.chain_id, - &opts.block_producer, - ); - - Ok(()) - } - Err(e) => { - tracing::error!("Failed to publish: {:?}", e); - telemetry.log_error(&e.to_string()); - telemetry.update_publisher_error_metrics( - wildcard, - &opts.chain_id, - &opts.block_producer, - &e.to_string(), - ); - - anyhow::bail!("Failed to publish: {}", e.to_string()) - } - } - }) -} diff --git a/crates/fuel-streams-publisher/src/publisher/payloads/blocks.rs b/crates/fuel-streams-publisher/src/publisher/payloads/blocks.rs deleted file mode 100644 index 5e59f728..00000000 --- a/crates/fuel-streams-publisher/src/publisher/payloads/blocks.rs +++ /dev/null @@ -1,29 +0,0 @@ -use std::sync::Arc; - -use fuel_streams_core::prelude::*; -use tokio::task::JoinHandle; - -use crate::{publish, PublishOpts}; - -pub fn publish_task( - block: &FuelCoreBlock, - stream: Arc>, - opts: &Arc, - transaction_ids: Vec, -) -> JoinHandle> { - let block_height = (*opts.block_height).clone(); - let block_producer = (*opts.block_producer).clone(); - let consensus = (*opts.consensus).clone(); - - let block = Block::new(block, consensus, transaction_ids); - let packet = PublishPacket::new( - block, - BlocksSubject { - height: Some(block_height), - producer: Some(block_producer), - } - .arc(), - ); - - publish(&packet, stream, opts) -} diff --git a/crates/fuel-streams-publisher/src/publisher/payloads/inputs.rs b/crates/fuel-streams-publisher/src/publisher/payloads/inputs.rs deleted file mode 100644 index 88048d54..00000000 --- a/crates/fuel-streams-publisher/src/publisher/payloads/inputs.rs +++ /dev/null @@ -1,178 +0,0 @@ -use std::sync::Arc; - -use fuel_core_types::fuel_tx::input::{ - coin::{CoinPredicate, CoinSigned}, - message::{ - MessageCoinPredicate, - MessageCoinSigned, - MessageDataPredicate, - MessageDataSigned, - }, -}; -use fuel_streams_core::prelude::*; -use rayon::prelude::*; -use tokio::task::JoinHandle; - -use crate::{publish, PublishOpts}; - -pub fn publish_tasks( - tx: &FuelCoreTransaction, - tx_id: &Bytes32, - stream: &Stream, - opts: &Arc, -) -> Vec>> { - let packets = tx - .inputs() - .par_iter() - .enumerate() - .flat_map(move |(index, input)| { - let main_subject = main_subject(input, tx_id.clone(), index); - let identifier_subjects = identifiers(input, tx_id, index as u8) - .into_par_iter() - .map(|identifier| identifier.into()) - .map(|subject: InputsByIdSubject| subject.arc()) - .collect::>(); - - let input: Input = input.into(); - - let mut packets = vec![input.to_packet(main_subject)]; - packets.extend( - identifier_subjects - .into_iter() - .map(|subject| input.to_packet(subject)), - ); - - packets - }) - .collect::>(); - - packets - .iter() - .map(|packet| publish(packet, Arc::new(stream.to_owned()), opts)) - .collect() -} - -fn main_subject( - input: &FuelCoreInput, - tx_id: Bytes32, - index: usize, -) -> Arc { - match input { - FuelCoreInput::Contract(contract) => { - let contract_id = contract.contract_id; - - InputsContractSubject { - tx_id: Some(tx_id), - index: Some(index), - contract_id: Some(contract_id.into()), - } - .arc() - } - FuelCoreInput::CoinSigned(CoinSigned { - owner, asset_id, .. - }) - | FuelCoreInput::CoinPredicate(CoinPredicate { - owner, asset_id, .. - }) => InputsCoinSubject { - tx_id: Some(tx_id), - index: Some(index), - owner: Some(owner.into()), - asset_id: Some(asset_id.into()), - } - .arc(), - FuelCoreInput::MessageCoinSigned(MessageCoinSigned { - sender, - recipient, - .. - }) - | FuelCoreInput::MessageCoinPredicate(MessageCoinPredicate { - sender, - recipient, - .. - }) - | FuelCoreInput::MessageDataSigned(MessageDataSigned { - sender, - recipient, - .. - }) - | FuelCoreInput::MessageDataPredicate(MessageDataPredicate { - sender, - recipient, - .. - }) => InputsMessageSubject { - tx_id: Some(tx_id), - index: Some(index), - sender: Some(sender.into()), - recipient: Some(recipient.into()), - } - .arc(), - } -} - -pub fn identifiers( - input: &FuelCoreInput, - tx_id: &Bytes32, - index: u8, -) -> Vec { - let mut identifiers = match input { - FuelCoreInput::CoinSigned(CoinSigned { - owner, asset_id, .. - }) => { - vec![ - Identifier::Address(tx_id.to_owned(), index, owner.into()), - Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), - ] - } - FuelCoreInput::CoinPredicate(CoinPredicate { - owner, asset_id, .. - }) => { - vec![ - Identifier::Address(tx_id.to_owned(), index, owner.into()), - Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), - ] - } - FuelCoreInput::MessageCoinSigned(MessageCoinSigned { - sender, - recipient, - .. - }) - | FuelCoreInput::MessageCoinPredicate(MessageCoinPredicate { - sender, - recipient, - .. - }) - | FuelCoreInput::MessageDataSigned(MessageDataSigned { - sender, - recipient, - .. - }) - | FuelCoreInput::MessageDataPredicate(MessageDataPredicate { - sender, - recipient, - .. - }) => { - vec![ - Identifier::Address(tx_id.to_owned(), index, sender.into()), - Identifier::Address(tx_id.to_owned(), index, recipient.into()), - ] - } - FuelCoreInput::Contract(contract) => { - vec![Identifier::ContractID( - tx_id.to_owned(), - index, - contract.contract_id.into(), - )] - } - }; - - if let Some((predicate_bytecode, _, _)) = input.predicate() { - let predicate_tag = super::sha256(predicate_bytecode); - identifiers.push(Identifier::PredicateID( - tx_id.to_owned(), - index, - predicate_tag, - )); - } - - identifiers -} diff --git a/crates/fuel-streams-publisher/src/publisher/payloads/logs.rs b/crates/fuel-streams-publisher/src/publisher/payloads/logs.rs deleted file mode 100644 index d443c1fc..00000000 --- a/crates/fuel-streams-publisher/src/publisher/payloads/logs.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::sync::Arc; - -use fuel_streams_core::prelude::*; -use rayon::prelude::*; -use tokio::task::JoinHandle; - -use crate::{publish, PublishOpts}; - -pub fn publish_tasks( - tx_id: &Bytes32, - stream: &Stream, - opts: &Arc, - receipts: &Vec, -) -> Vec>> { - let block_height = (*opts.block_height).clone(); - let packets = receipts - .par_iter() - .enumerate() - .filter_map(|(index, receipt)| match receipt { - FuelCoreReceipt::Log { id, .. } - | FuelCoreReceipt::LogData { id, .. } => Some(PublishPacket::new( - receipt.to_owned().into(), - LogsSubject { - block_height: Some(block_height.clone()), - tx_id: Some(tx_id.to_owned()), - receipt_index: Some(index), - log_id: Some((*id).into()), - } - .arc(), - )), - _ => None, - }) - .collect::>(); - - packets - .iter() - .map(|packet| publish(packet, Arc::new(stream.to_owned()), opts)) - .collect() -} diff --git a/crates/fuel-streams-publisher/src/publisher/payloads/mod.rs b/crates/fuel-streams-publisher/src/publisher/payloads/mod.rs deleted file mode 100644 index ac0ce74a..00000000 --- a/crates/fuel-streams-publisher/src/publisher/payloads/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -pub mod blocks; -pub mod inputs; -pub mod logs; -pub mod outputs; -pub mod receipts; -pub mod transactions; -pub mod utxos; - -use fuel_streams_core::prelude::Bytes32; -use sha2::{Digest, Sha256}; - -pub fn sha256(bytes: &[u8]) -> Bytes32 { - let mut sha256 = Sha256::new(); - sha256.update(bytes); - let bytes: [u8; 32] = sha256 - .finalize() - .as_slice() - .try_into() - .expect("Must be 32 bytes"); - - bytes.into() -} diff --git a/crates/fuel-streams-publisher/src/publisher/payloads/outputs.rs b/crates/fuel-streams-publisher/src/publisher/payloads/outputs.rs deleted file mode 100644 index 0a61a974..00000000 --- a/crates/fuel-streams-publisher/src/publisher/payloads/outputs.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::sync::Arc; - -use fuel_streams_core::prelude::*; -use rayon::prelude::*; -use tokio::task::JoinHandle; - -use crate::{publish, PublishOpts}; - -pub fn publish_tasks( - tx: &FuelCoreTransaction, - tx_id: &Bytes32, - stream: &Stream, - opts: &Arc, -) -> Vec>> { - let packets: Vec> = tx - .outputs() - .par_iter() - .enumerate() - .flat_map(|(index, output)| { - let main_subject = main_subject(output, tx, tx_id, index); - let identifier_subjects = - identifiers(output, tx, tx_id, index as u8) - .into_par_iter() - .map(|identifier| identifier.into()) - .map(|subject: OutputsByIdSubject| subject.arc()) - .collect::>(); - - let output: Output = output.into(); - - let mut packets = vec![output.to_packet(main_subject)]; - packets.extend( - identifier_subjects - .into_iter() - .map(|subject| output.to_packet(subject)), - ); - - packets - }) - .collect(); - - packets - .iter() - .map(|packet| publish(packet, Arc::new(stream.to_owned()), opts)) - .collect() -} - -fn main_subject( - output: &FuelCoreOutput, - transaction: &FuelCoreTransaction, - tx_id: &Bytes32, - index: usize, -) -> Arc { - match output { - FuelCoreOutput::Coin { to, asset_id, .. } => OutputsCoinSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index as u16), - to: Some((*to).into()), - asset_id: Some((*asset_id).into()), - } - .arc(), - - FuelCoreOutput::Contract(contract) => { - let contract_id = - match find_output_contract_id(transaction, contract) { - Some(contract_id) => contract_id, - None => { - tracing::warn!( - "Contract ID not found for output: {:?}", - output - ); - - Default::default() - } - }; - - OutputsContractSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index as u16), - contract_id: Some(contract_id.into()), - } - .arc() - } - - FuelCoreOutput::Change { to, asset_id, .. } => OutputsChangeSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index as u16), - to: Some((*to).into()), - asset_id: Some((*asset_id).into()), - } - .arc(), - - FuelCoreOutput::Variable { to, asset_id, .. } => { - OutputsVariableSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index as u16), - to: Some((*to).into()), - asset_id: Some((*asset_id).into()), - } - .arc() - } - - FuelCoreOutput::ContractCreated { contract_id, .. } => { - OutputsContractCreatedSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index as u16), - contract_id: Some((*contract_id).into()), - } - .arc() - } - } -} - -pub fn identifiers( - output: &FuelCoreOutput, - tx: &FuelCoreTransaction, - tx_id: &Bytes32, - index: u8, -) -> Vec { - match output { - FuelCoreOutput::Change { to, asset_id, .. } - | FuelCoreOutput::Variable { to, asset_id, .. } - | FuelCoreOutput::Coin { to, asset_id, .. } => { - vec![ - Identifier::Address(tx_id.to_owned(), index, to.into()), - Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), - ] - } - FuelCoreOutput::Contract(contract) => { - find_output_contract_id(tx, contract) - .map(|contract_id| { - vec![Identifier::ContractID( - tx_id.to_owned(), - index, - contract_id.into(), - )] - }) - .unwrap_or_default() - } - FuelCoreOutput::ContractCreated { contract_id, .. } => { - vec![Identifier::ContractID( - tx_id.to_owned(), - index, - contract_id.into(), - )] - } - } -} - -pub fn find_output_contract_id( - tx: &FuelCoreTransaction, - contract: &FuelCoreOutputContract, -) -> Option { - let input_index = contract.input_index as usize; - tx.inputs().get(input_index).and_then(|input| { - if let FuelCoreInput::Contract(input_contract) = input { - Some(input_contract.contract_id) - } else { - None - } - }) -} diff --git a/crates/fuel-streams-publisher/src/publisher/payloads/receipts.rs b/crates/fuel-streams-publisher/src/publisher/payloads/receipts.rs deleted file mode 100644 index e0936f5f..00000000 --- a/crates/fuel-streams-publisher/src/publisher/payloads/receipts.rs +++ /dev/null @@ -1,235 +0,0 @@ -use std::sync::Arc; - -use fuel_streams_core::prelude::*; -use rayon::prelude::*; -use tokio::task::JoinHandle; - -use crate::{publish, PublishOpts}; - -pub fn publish_tasks( - tx_id: &Bytes32, - stream: &Stream, - opts: &Arc, - receipts: &Vec, -) -> Vec>> { - let packets: Vec> = receipts - .par_iter() - .enumerate() - .flat_map(|(index, receipt)| { - let main_subject = main_subject(receipt, tx_id, index); - let identifier_subjects = identifiers(receipt, tx_id, index as u8) - .into_par_iter() - .map(|identifier| identifier.into()) - .map(|subject: ReceiptsByIdSubject| subject.arc()) - .collect::>(); - - let receipt: Receipt = receipt.into(); - - let mut packets = vec![receipt.to_packet(main_subject)]; - packets.extend( - identifier_subjects - .into_iter() - .map(|subject| receipt.to_packet(subject)), - ); - - packets - }) - .collect(); - - packets - .iter() - .map(|packet| publish(packet, Arc::new(stream.to_owned()), opts)) - .collect() -} - -fn main_subject( - receipt: &FuelCoreReceipt, - tx_id: &Bytes32, - index: usize, -) -> Arc { - match receipt { - FuelCoreReceipt::Call { - id: from, - to, - asset_id, - .. - } => ReceiptsCallSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - from: Some(from.into()), - to: Some(to.into()), - asset_id: Some(asset_id.into()), - } - .arc(), - FuelCoreReceipt::Return { id, .. } => ReceiptsReturnSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - id: Some(id.into()), - } - .arc(), - FuelCoreReceipt::ReturnData { id, .. } => ReceiptsReturnDataSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - id: Some(id.into()), - } - .arc(), - FuelCoreReceipt::Panic { id, .. } => ReceiptsPanicSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - id: Some(id.into()), - } - .arc(), - FuelCoreReceipt::Revert { id, .. } => ReceiptsRevertSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - id: Some(id.into()), - } - .arc(), - FuelCoreReceipt::Log { id, .. } => ReceiptsLogSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - id: Some(id.into()), - } - .arc(), - FuelCoreReceipt::LogData { id, .. } => ReceiptsLogDataSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - id: Some(id.into()), - } - .arc(), - FuelCoreReceipt::Transfer { - id: from, - to, - asset_id, - .. - } => ReceiptsTransferSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - from: Some(from.into()), - to: Some(to.into()), - asset_id: Some(asset_id.into()), - } - .arc(), - - FuelCoreReceipt::TransferOut { - id: from, - to, - asset_id, - .. - } => ReceiptsTransferOutSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - from: Some(from.into()), - to: Some(to.into()), - asset_id: Some(asset_id.into()), - } - .arc(), - - FuelCoreReceipt::ScriptResult { .. } => ReceiptsScriptResultSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - } - .arc(), - FuelCoreReceipt::MessageOut { - sender, recipient, .. - } => ReceiptsMessageOutSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - sender: Some(sender.into()), - recipient: Some(recipient.into()), - } - .arc(), - FuelCoreReceipt::Mint { - contract_id, - sub_id, - .. - } => ReceiptsMintSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - contract_id: Some(contract_id.into()), - sub_id: Some((*sub_id).into()), - } - .arc(), - FuelCoreReceipt::Burn { - contract_id, - sub_id, - .. - } => ReceiptsBurnSubject { - tx_id: Some(tx_id.to_owned()), - index: Some(index), - contract_id: Some(contract_id.into()), - sub_id: Some((*sub_id).into()), - } - .arc(), - } -} - -pub fn identifiers( - receipt: &FuelCoreReceipt, - tx_id: &Bytes32, - index: u8, -) -> Vec { - match receipt { - FuelCoreReceipt::Call { - id: from, - to, - asset_id, - .. - } => { - vec![ - Identifier::ContractID(tx_id.to_owned(), index, from.into()), - Identifier::ContractID(tx_id.to_owned(), index, to.into()), - Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), - ] - } - FuelCoreReceipt::Return { id, .. } - | FuelCoreReceipt::ReturnData { id, .. } - | FuelCoreReceipt::Panic { id, .. } - | FuelCoreReceipt::Revert { id, .. } - | FuelCoreReceipt::Log { id, .. } - | FuelCoreReceipt::LogData { id, .. } => { - vec![Identifier::ContractID(tx_id.to_owned(), index, id.into())] - } - FuelCoreReceipt::Transfer { - id: from, - to, - asset_id, - .. - } => { - vec![ - Identifier::ContractID(tx_id.to_owned(), index, from.into()), - Identifier::ContractID(tx_id.to_owned(), index, to.into()), - Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), - ] - } - FuelCoreReceipt::TransferOut { - id: from, - to, - asset_id, - .. - } => { - vec![ - Identifier::ContractID(tx_id.to_owned(), index, from.into()), - Identifier::ContractID(tx_id.to_owned(), index, to.into()), - Identifier::AssetID(tx_id.to_owned(), index, asset_id.into()), - ] - } - FuelCoreReceipt::MessageOut { - sender, recipient, .. - } => { - vec![ - Identifier::Address(tx_id.to_owned(), index, sender.into()), - Identifier::Address(tx_id.to_owned(), index, recipient.into()), - ] - } - FuelCoreReceipt::Mint { contract_id, .. } - | FuelCoreReceipt::Burn { contract_id, .. } => { - vec![Identifier::ContractID( - tx_id.to_owned(), - index, - contract_id.into(), - )] - } - _ => Vec::new(), - } -} diff --git a/crates/fuel-streams-publisher/src/publisher/payloads/transactions.rs b/crates/fuel-streams-publisher/src/publisher/payloads/transactions.rs deleted file mode 100644 index fc97edc6..00000000 --- a/crates/fuel-streams-publisher/src/publisher/payloads/transactions.rs +++ /dev/null @@ -1,177 +0,0 @@ -use std::sync::Arc; - -use fuel_core_types::fuel_tx::field::ScriptData; -use fuel_streams_core::prelude::*; -use rayon::prelude::*; -use tokio::task::JoinHandle; - -use super::{ - inputs::{self, publish_tasks as publish_inputs}, - logs::publish_tasks as publish_logs, - outputs::{self, publish_tasks as publish_outputs}, - receipts::{self, publish_tasks as publish_receipts}, - sha256, - utxos::publish_tasks as publish_utxos, -}; -use crate::{publish, FuelCoreLike, FuelStreamsExt, PublishOpts}; - -pub fn publish_all_tasks( - transactions: &[FuelCoreTransaction], - fuel_streams: &dyn FuelStreamsExt, - opts: &Arc, - fuel_core: &dyn FuelCoreLike, -) -> anyhow::Result>>> { - let offchain_database = Arc::clone(&opts.offchain_database); - let mut tasks = vec![]; - - for tx_item @ (_, tx) in transactions.iter().enumerate() { - let tx_id = tx.id(&opts.chain_id); - let tx_status: TransactionStatus = offchain_database - .get_tx_status(&tx_id)? - .map(|status| (&status).into()) - .unwrap_or_default(); - - let receipts = fuel_core.get_receipts(&tx_id)?.unwrap_or_default(); - - let tx_id = tx_id.into(); - - tasks.extend(publish_tasks( - tx_item, - &tx_id, - &tx_status, - fuel_streams.transactions(), - opts, - &receipts, - )); - tasks.extend(publish_inputs(tx, &tx_id, fuel_streams.inputs(), opts)); - tasks.extend(publish_outputs(tx, &tx_id, fuel_streams.outputs(), opts)); - tasks.extend(publish_receipts( - &tx_id, - fuel_streams.receipts(), - opts, - &receipts, - )); - tasks.extend(publish_outputs(tx, &tx_id, fuel_streams.outputs(), opts)); - tasks.extend(publish_logs( - &tx_id, - fuel_streams.logs(), - opts, - &receipts, - )); - tasks.extend(publish_utxos(tx, &tx_id, fuel_streams.utxos(), opts)); - } - - Ok(tasks) -} - -fn publish_tasks( - tx_item: (usize, &FuelCoreTransaction), - tx_id: &Bytes32, - tx_status: &TransactionStatus, - stream: &Stream, - opts: &Arc, - receipts: &Vec, -) -> Vec>> { - let block_height = &opts.block_height; - let base_asset_id = &opts.base_asset_id; - - packets_from_tx( - tx_item, - tx_id, - tx_status, - base_asset_id, - block_height, - receipts, - ) - .iter() - .map(|packet| publish(packet, Arc::new(stream.to_owned()), opts)) - .collect() -} - -fn packets_from_tx( - (index, tx): (usize, &FuelCoreTransaction), - tx_id: &Bytes32, - tx_status: &TransactionStatus, - base_asset_id: &FuelCoreAssetId, - block_height: &BlockHeight, - receipts: &Vec, -) -> Vec> { - let main_subject = TransactionsSubject { - block_height: Some(block_height.to_owned()), - index: Some(index), - tx_id: Some(tx_id.to_owned()), - status: Some(tx_status.to_owned()), - kind: Some(tx.into()), - } - .arc(); - - let transaction = - Transaction::new(tx_id, tx, tx_status, base_asset_id, receipts); - let mut packets = vec![transaction.to_packet(main_subject)]; - - packets.extend( - identifiers(tx, tx_id, index as u8) - .into_par_iter() - .map(|identifier| identifier.into()) - .map(|subject: TransactionsByIdSubject| subject.arc()) - .map(|subject| transaction.to_packet(subject)) - .collect::>(), - ); - - let packets_from_inputs: Vec> = tx - .inputs() - .par_iter() - .flat_map(|input| { - inputs::identifiers(input, tx_id, index as u8) - .into_par_iter() - .map(|identifier| identifier.into()) - .map(|subject: TransactionsByIdSubject| subject.arc()) - .map(|subject| transaction.to_packet(subject)) - }) - .collect(); - - packets.extend(packets_from_inputs); - - let packets_from_outputs: Vec> = tx - .outputs() - .par_iter() - .flat_map(|output| { - outputs::identifiers(output, tx, tx_id, index as u8) - .into_par_iter() - .map(|identifier| identifier.into()) - .map(|subject: TransactionsByIdSubject| subject.arc()) - .map(|subject| transaction.to_packet(subject)) - }) - .collect(); - - packets.extend(packets_from_outputs); - - let packets_from_receipts: Vec> = receipts - .par_iter() - .flat_map(|receipt| { - receipts::identifiers(receipt, tx_id, index as u8) - .into_par_iter() - .map(|identifier| identifier.into()) - .map(|subject: TransactionsByIdSubject| subject.arc()) - .map(|subject| transaction.to_packet(subject)) - }) - .collect(); - - packets.extend(packets_from_receipts); - - packets -} - -fn identifiers( - tx: &FuelCoreTransaction, - tx_id: &Bytes32, - index: u8, -) -> Vec { - match tx { - FuelCoreTransaction::Script(tx) => { - let script_tag = sha256(tx.script_data()); - vec![Identifier::ScriptID(tx_id.to_owned(), index, script_tag)] - } - _ => Vec::new(), - } -} diff --git a/crates/fuel-streams-publisher/src/publisher/payloads/utxos.rs b/crates/fuel-streams-publisher/src/publisher/payloads/utxos.rs deleted file mode 100644 index 0e4d1f89..00000000 --- a/crates/fuel-streams-publisher/src/publisher/payloads/utxos.rs +++ /dev/null @@ -1,145 +0,0 @@ -use std::sync::Arc; - -use fuel_core_types::fuel_tx::{ - input::{ - coin::{CoinPredicate, CoinSigned}, - contract::Contract, - message::{ - compute_message_id, - MessageCoinPredicate, - MessageCoinSigned, - MessageDataPredicate, - MessageDataSigned, - }, - }, - UtxoId, -}; -use fuel_streams_core::prelude::*; -use rayon::prelude::*; -use tokio::task::JoinHandle; - -use crate::{publish, PublishOpts}; - -pub fn publish_tasks( - tx: &FuelCoreTransaction, - tx_id: &Bytes32, - stream: &Stream, - opts: &Arc, -) -> Vec>> { - let packets = tx - .inputs() - .par_iter() - .filter_map(|input| utxo_packet(input, tx_id, input.utxo_id().cloned())) - .collect::>(); - - packets - .into_iter() - .map(|packet| publish(&packet, Arc::new(stream.to_owned()), opts)) - .collect() -} - -fn utxo_packet( - input: &FuelCoreInput, - tx_id: &Bytes32, - utxo_id: Option, -) -> Option> { - utxo_id?; - let utxo_id = utxo_id.expect("safe to unwrap utxo"); - - match input { - FuelCoreInput::Contract(Contract { utxo_id, .. }) => { - let utxo = Utxo { - utxo_id: utxo_id.into(), - tx_id: tx_id.to_owned(), - ..Default::default() - }; - - let subject = UtxosSubject { - utxo_type: Some(UtxoType::Contract), - hash: Some(tx_id.to_owned().into()), - } - .arc(); - - Some(utxo.to_packet(subject)) - } - FuelCoreInput::CoinSigned(CoinSigned { - utxo_id, amount, .. - }) - | FuelCoreInput::CoinPredicate(CoinPredicate { - utxo_id, amount, .. - }) => { - let utxo = Utxo { - utxo_id: utxo_id.into(), - amount: Some(*amount), - tx_id: tx_id.to_owned(), - ..Default::default() - }; - - let subject = UtxosSubject { - utxo_type: Some(UtxoType::Coin), - hash: Some(tx_id.to_owned().into()), - } - .arc(); - - Some(utxo.to_packet(subject)) - } - message @ (FuelCoreInput::MessageCoinSigned(MessageCoinSigned { - amount, - nonce, - recipient, - sender, - .. - }) - | FuelCoreInput::MessageCoinPredicate( - MessageCoinPredicate { - amount, - nonce, - recipient, - sender, - .. - }, - ) - | FuelCoreInput::MessageDataSigned(MessageDataSigned { - amount, - nonce, - recipient, - sender, - .. - }) - | FuelCoreInput::MessageDataPredicate( - MessageDataPredicate { - amount, - nonce, - recipient, - sender, - .. - }, - )) => { - let (data, hash) = if let Some(data) = message.input_data() { - let hash: MessageId = - compute_message_id(sender, recipient, nonce, *amount, data) - .into(); - (Some(data.to_vec()), hash) - } else { - (None, tx_id.to_owned().into()) - }; - - let utxo = Utxo { - utxo_id: utxo_id.into(), - sender: Some(sender.into()), - recipient: Some(recipient.into()), - nonce: Some(nonce.into()), - amount: Some(*amount), - tx_id: tx_id.to_owned(), - data, - }; - let subject = UtxosSubject { - utxo_type: Some(UtxoType::Message), - hash: Some(hash), - } - .arc(); - - Some(utxo.to_packet(subject)) - } - } -} diff --git a/crates/fuel-streams-publisher/src/publisher/shutdown.rs b/crates/fuel-streams-publisher/src/publisher/shutdown.rs deleted file mode 100644 index 89963fff..00000000 --- a/crates/fuel-streams-publisher/src/publisher/shutdown.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::time::Duration; - -use tokio::{ - signal::unix::{signal, SignalKind}, - sync::{broadcast, OnceCell}, -}; - -// TODO: move into publisher module along with subjects - -pub const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(90); - -// First, let's create a ShutdownToken that can be shared -#[derive(Debug)] -pub struct ShutdownToken { - receiver: broadcast::Receiver<()>, -} - -impl ShutdownToken { - pub async fn wait_for_shutdown(&mut self) -> bool { - self.receiver.recv().await.is_ok() - } -} - -#[derive(Debug, Clone)] -pub struct ShutdownController { - sender: broadcast::Sender<()>, - shutdown_initiated: OnceCell<()>, -} - -impl ShutdownController { - pub fn spawn_signal_listener(&self) { - let sender = self.sender.clone(); - tokio::spawn(async move { - let mut sigint = - signal(SignalKind::interrupt()).expect("shutdown_listener"); - let mut sigterm = - signal(SignalKind::terminate()).expect("shutdown_listener"); - - tokio::select! { - _ = sigint.recv() => { - tracing::info!("Received SIGINT ..."); - let _ = sender.send(()); - } - _ = sigterm.recv() => { - tracing::info!("Received SIGTERM ..."); - let _ = sender.send(()); - } - } - }); - } - - pub fn initiate_shutdown( - &self, - ) -> Result> { - if self.shutdown_initiated.set(()).is_ok() { - self.sender.send(()) - } else { - Ok(0) // Shutdown already initiated - } - } -} - -pub fn get_controller_and_token() -> (ShutdownController, ShutdownToken) { - let (sender, receiver) = broadcast::channel(1); - - ( - ShutdownController { - sender, - shutdown_initiated: OnceCell::new(), - }, - ShutdownToken { receiver }, - ) -} diff --git a/crates/fuel-streams-publisher/src/server/http.rs b/crates/fuel-streams-publisher/src/server/http.rs deleted file mode 100644 index 50933e3d..00000000 --- a/crates/fuel-streams-publisher/src/server/http.rs +++ /dev/null @@ -1,117 +0,0 @@ -use std::net::SocketAddr; - -use actix_cors::Cors; -use actix_server::Server; -use actix_web::{http, web, App, HttpResponse, HttpServer}; -use tracing_actix_web::TracingLogger; - -use super::state::ServerState; - -// We are keeping this low to give room for more -// Publishing processing power. This is fine since the -// the latency tolerance when fetching /health and /metrics -// is trivial -const MAX_WORKERS: usize = 2; - -pub fn create_web_server( - state: ServerState, - actix_server_addr: SocketAddr, -) -> anyhow::Result { - let server = HttpServer::new(move || { - // create cors - let cors = Cors::default() - .allow_any_origin() - .allowed_methods(vec!["GET", "POST"]) - .allowed_headers(vec![ - http::header::AUTHORIZATION, - http::header::ACCEPT, - ]) - .allowed_header(http::header::CONTENT_TYPE) - .max_age(3600); - - App::new() - .app_data(web::Data::new(state.clone())) - .wrap(TracingLogger::default()) - .wrap(cors) - .service(web::resource("/health").route(web::get().to( - |state: web::Data| async move { - if !state.is_healthy() { - return HttpResponse::ServiceUnavailable() - .body("Service Unavailable"); - } - HttpResponse::Ok().json(state.get_health().await) - }, - ))) - .service(web::resource("/metrics").route(web::get().to( - |state: web::Data| async move { - HttpResponse::Ok() - .body(state.publisher.telemetry.get_metrics().await) - }, - ))) - }) - .bind(actix_server_addr)? - .workers(MAX_WORKERS) - .shutdown_timeout(20) - .run(); - - Ok(server) -} - -#[cfg(test)] -#[cfg(feature = "test-helpers")] -mod tests { - use std::time::Duration; - - use actix_web::{http, test, web, App, HttpResponse}; - use fuel_core::service::Config; - use fuel_core_bin::FuelService; - use fuel_core_services::State; - - use crate::{ - server::state::{HealthResponse, ServerState}, - telemetry::Telemetry, - FuelCore, - Publisher, - }; - - #[actix_web::test] - async fn test_health_check() { - let fuel_service = - FuelService::new_node(Config::local_node()).await.unwrap(); - assert_eq!(fuel_service.state(), State::Started); - - let telemetry = Telemetry::new().await.unwrap(); - - let fuel_core = FuelCore::from(fuel_service); - let publisher = - Publisher::new(fuel_core.arc(), telemetry).await.unwrap(); - let state = ServerState::new(publisher).await; - assert!(state.publisher.nats_client.is_connected()); - - let app = test::init_service( - App::new().app_data(web::Data::new(state.clone())).route( - "/health", - web::get().to(|state: web::Data| async move { - if !state.is_healthy() { - return HttpResponse::ServiceUnavailable() - .body("Service Unavailable"); - } - HttpResponse::Ok().json(state.get_health().await) - }), - ), - ) - .await; - - let uptime = Duration::from_secs(2); - tokio::time::sleep(uptime).await; - - let req = test::TestRequest::get().uri("/health").to_request(); - let resp = test::call_service(&app, req).await; - - assert_eq!(resp.status(), http::StatusCode::OK); - - let result: HealthResponse = test::read_body_json(resp).await; - assert!(result.uptime >= uptime.as_secs()); - assert!(!result.streams_info.is_empty()); - } -} diff --git a/crates/fuel-streams-publisher/src/server/mod.rs b/crates/fuel-streams-publisher/src/server/mod.rs deleted file mode 100644 index 8ebfa9ca..00000000 --- a/crates/fuel-streams-publisher/src/server/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod http; -pub mod state; diff --git a/crates/fuel-streams-publisher/src/server/state.rs b/crates/fuel-streams-publisher/src/server/state.rs deleted file mode 100644 index c15660b9..00000000 --- a/crates/fuel-streams-publisher/src/server/state.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::{ - sync::Arc, - time::{Duration, Instant}, -}; - -use async_nats::jetstream::stream::State; -use parking_lot::RwLock; -use serde::{Deserialize, Serialize}; - -use crate::Publisher; - -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct StreamInfo { - consumers: Vec, - state: StreamState, - stream_name: String, -} - -#[derive(Debug, Serialize, Deserialize, Clone, Copy)] -pub struct StreamState { - /// The number of messages contained in this stream - pub messages: u64, - /// The number of bytes of all messages contained in this stream - pub bytes: u64, - /// The lowest sequence number still present in this stream - #[serde(rename = "first_seq")] - pub first_sequence: u64, - /// The time associated with the oldest message still present in this stream - #[serde(rename = "first_ts")] - pub first_timestamp: i64, - /// The last sequence number assigned to a message in this stream - #[serde(rename = "last_seq")] - pub last_sequence: u64, - /// The time that the last message was received by this stream - #[serde(rename = "last_ts")] - pub last_timestamp: i64, - /// The number of consumers configured to consume this stream - pub consumer_count: usize, -} - -impl From for StreamState { - fn from(state: State) -> Self { - StreamState { - messages: state.messages, - bytes: state.bytes, - first_sequence: state.first_sequence, - first_timestamp: state.first_timestamp.unix_timestamp(), - last_sequence: state.last_sequence, - last_timestamp: state.last_timestamp.unix_timestamp(), - consumer_count: state.consumer_count, - } - } -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct HealthResponse { - pub uptime: u64, - pub streams_info: Vec, -} - -#[derive(Clone)] -pub struct ServerState { - pub publisher: Publisher, - pub start_time: Instant, - pub connection_count: Arc>, -} - -impl ServerState { - pub async fn new(publisher: Publisher) -> Self { - Self { - publisher, - start_time: Instant::now(), - connection_count: Arc::new(RwLock::new(0)), - } - } -} - -impl ServerState { - pub fn is_healthy(&self) -> bool { - self.publisher.is_healthy() - } - - pub async fn get_health(&self) -> HealthResponse { - let streams_info = self - .publisher - .fuel_streams - .get_consumers_and_state() - .await - .unwrap_or_default() - .into_iter() - .map(|res| StreamInfo { - consumers: res.1, - state: res.2.into(), - stream_name: res.0, - }) - .collect::>(); - HealthResponse { - uptime: self.uptime().as_secs(), - streams_info, - } - } - - pub fn uptime(&self) -> Duration { - self.start_time.elapsed() - } -} diff --git a/crates/fuel-streams-publisher/src/telemetry/mod.rs b/crates/fuel-streams-publisher/src/telemetry/mod.rs deleted file mode 100644 index 17f26842..00000000 --- a/crates/fuel-streams-publisher/src/telemetry/mod.rs +++ /dev/null @@ -1,277 +0,0 @@ -mod elastic_search; -mod publisher; -mod runtime; -#[allow(clippy::needless_borrows_for_generic_args)] -mod system; - -use std::{sync::Arc, time::Duration}; - -use anyhow::Context; -use elastic_search::{ - new_elastic_search, - should_use_elasticsearch, - ElasticSearch, - LogEntry, -}; -use fuel_streams_core::prelude::*; -// TODO: Consider using tokio's Rwlock instead -use parking_lot::RwLock; -use publisher::PublisherMetrics; -use runtime::Runtime; -use system::{System, SystemMetricsWrapper}; - -#[derive(Clone)] -pub struct Telemetry { - runtime: Arc, - system: Arc>, - publisher_metrics: Option>, - elastic_search: Option>, -} - -impl Telemetry { - const DEDICATED_THREADS: usize = 2; - - pub async fn new() -> anyhow::Result> { - let runtime = - Runtime::new(Self::DEDICATED_THREADS, Duration::from_secs(20)); - let system = Arc::new(RwLock::new(System::new().await)); - - let publisher_metrics = if should_use_publisher_metrics() { - Some(Arc::new(PublisherMetrics::default())) - } else { - None - }; - - let elastic_search = if should_use_elasticsearch() { - Some(Arc::new(new_elastic_search().await?)) - } else { - None - }; - - Ok(Arc::new(Self { - runtime: Arc::new(runtime), - system, - publisher_metrics, - elastic_search, - })) - } - - pub async fn start(&self) -> anyhow::Result<()> { - let system = Arc::clone(&self.system); - - if let Some(elastic_search) = self.elastic_search.as_ref() { - tracing::info!( - "Elastic Search connection live? {:?}", - elastic_search.get_conn().check_alive().unwrap_or_default() - ); - elastic_search - .get_conn() - .ping() - .await - .context("Error pinging elastisearch connection")?; - tracing::info!("Elastic logger pinged successfully!"); - }; - - self.runtime.start(move || { - system.write().refresh(); - }); - - Ok(()) - } - - pub fn log_info(&self, message: &str) { - let entry = LogEntry::new("INFO", message); - self.maybe_elog(entry); - tracing::info!("{}", message); - } - - pub fn log_error(&self, message: &str) { - let entry = LogEntry::new("ERROR", message); - self.maybe_elog(entry); - tracing::error!("{}", message); - } - - fn maybe_elog(&self, entry: LogEntry) { - if let Some(elastic_search) = &self.elastic_search { - self.runtime - .spawn(elastic_search::log(elastic_search.clone(), entry)); - } - } - - pub fn update_publisher_success_metrics( - &self, - subject: &str, - published_data_size: usize, - chain_id: &FuelCoreChainId, - block_producer: &Address, - ) { - self.maybe_use_metrics(|metrics| { - // Update message size histogram - metrics - .message_size_histogram - .with_label_values(&[ - &chain_id.to_string(), - &block_producer.to_string(), - subject, - ]) - .observe(published_data_size as f64); - - // Increment total published messages - metrics - .total_published_messages - .with_label_values(&[ - &chain_id.to_string(), - &block_producer.to_string(), - ]) - .inc(); - - // Increment throughput for the published messages - metrics - .published_messages_throughput - .with_label_values(&[ - &chain_id.to_string(), - &block_producer.to_string(), - subject, - ]) - .inc(); - }); - } - - pub fn update_publisher_error_metrics( - &self, - subject: &str, - chain_id: &FuelCoreChainId, - block_producer: &Address, - error: &str, - ) { - self.maybe_use_metrics(|metrics| { - metrics - .error_rates - .with_label_values(&[ - &chain_id.to_string(), - &block_producer.to_string(), - subject, - error, - ]) - .inc(); - }); - } - - pub fn record_streams_count( - &self, - chain_id: &FuelCoreChainId, - count: usize, - ) { - self.maybe_use_metrics(|metrics| { - metrics - .total_subs - .with_label_values(&[&chain_id.to_string()]) - .set(count as i64); - }); - } - - pub fn record_failed_publishing( - &self, - chain_id: &FuelCoreChainId, - block_producer: &Address, - ) { - self.maybe_use_metrics(|metrics| { - metrics - .total_failed_messages - .with_label_values(&[ - &chain_id.to_string(), - &block_producer.to_string(), - ]) - .inc(); - }); - } - - pub fn maybe_use_metrics(&self, f: F) - where - F: Fn(&PublisherMetrics), - { - if let Some(metrics) = &self.publisher_metrics { - f(metrics); - } - } - - // TODO: Break into smaller functions - pub async fn get_metrics(&self) -> String { - use prometheus::Encoder; - let encoder = prometheus::TextEncoder::new(); - - if self.publisher_metrics.is_none() { - return "".to_string(); - } - - // fetch all measured metrics - let mut buffer = Vec::new(); - if let Err(e) = encoder.encode( - &self.publisher_metrics.as_ref().unwrap().registry.gather(), - &mut buffer, - ) { - tracing::error!("could not encode custom metrics: {}", e); - }; - let mut res = match String::from_utf8(buffer.clone()) { - Ok(v) => v, - Err(e) => { - tracing::error!( - "custom metrics could not be from_utf8'd: {}", - e - ); - String::default() - } - }; - buffer.clear(); - - let mut buffer = Vec::new(); - if let Err(e) = encoder.encode(&prometheus::gather(), &mut buffer) { - tracing::error!("could not encode prometheus metrics: {}", e); - }; - let res_custom = match String::from_utf8(buffer.clone()) { - Ok(v) => v, - Err(e) => { - tracing::error!( - "prometheus metrics could not be from_utf8'd: {}", - e - ); - String::default() - } - }; - buffer.clear(); - - res.push_str(&res_custom); - - // now fetch and add system metrics - let system_metrics = match self.system.read().metrics() { - Ok(m) => { - let metrics = SystemMetricsWrapper::from(m); - let labels: Vec<(&str, &str)> = vec![]; - match serde_prometheus::to_string(&metrics, None, labels) { - Ok(m) => m, - Err(err) => { - tracing::error!( - "could not encode system metrics: {:?}", - err - ); - String::default() - } - } - } - Err(err) => { - tracing::error!( - "prometheus system metrics could not be stringified: {:?}", - err - ); - String::default() - } - }; - res.push_str(&system_metrics); - - res - } -} - -pub fn should_use_publisher_metrics() -> bool { - dotenvy::var("USE_METRICS").is_ok_and(|val| val == "true") -} diff --git a/crates/fuel-streams-publisher/src/telemetry/publisher.rs b/crates/fuel-streams-publisher/src/telemetry/publisher.rs deleted file mode 100644 index 338d46b5..00000000 --- a/crates/fuel-streams-publisher/src/telemetry/publisher.rs +++ /dev/null @@ -1,423 +0,0 @@ -use std::sync::Arc; - -use chrono::Utc; -use fuel_core::database::database_description::DatabaseHeight; -use fuel_streams_core::prelude::*; -use prometheus::{ - register_histogram_vec, - register_int_counter_vec, - register_int_gauge_vec, - HistogramVec, - IntCounterVec, - IntGaugeVec, - Registry, -}; - -#[derive(Clone, Debug)] -pub struct PublisherMetrics { - pub registry: Registry, - pub total_subs: IntGaugeVec, - pub total_published_messages: IntCounterVec, - pub total_failed_messages: IntCounterVec, - pub last_published_block_height: IntGaugeVec, - pub last_published_block_timestamp: IntGaugeVec, - pub published_messages_throughput: IntCounterVec, - pub publishing_latency_histogram: HistogramVec, - pub message_size_histogram: HistogramVec, - pub error_rates: IntCounterVec, -} - -impl Default for PublisherMetrics { - fn default() -> Self { - PublisherMetrics::new(None) - .expect("Failed to create default PublisherMetrics") - } -} - -impl PublisherMetrics { - pub fn new(prefix: Option) -> anyhow::Result { - let metric_prefix = prefix - .clone() - .map(|p| format!("{}_", p)) - .unwrap_or_default(); - - let total_subs = register_int_gauge_vec!( - format!("{}publisher_metrics_total_subscriptions", metric_prefix), - "A metric counting the number of active subscriptions", - &["chain_id"], - ) - .expect("metric must be created"); - - let total_published_messages = register_int_counter_vec!( - format!( - "{}publisher_metrics_total_published_messages", - metric_prefix - ), - "A metric counting the number of published messages", - &["chain_id", "block_producer"], - ) - .expect("metric must be created"); - - let total_failed_messages = register_int_counter_vec!( - format!("{}publisher_metrics_total_failed_messages", metric_prefix), - "A metric counting the number of unpublished and failed messages", - &["chain_id", "block_producer"], - ) - .expect("metric must be created"); - - let last_published_block_height = register_int_gauge_vec!( - format!( - "{}publisher_metrics_last_published_block_height", - metric_prefix - ), - "A metric that represents the last published block height", - &["chain_id", "block_producer"], - ) - .expect("metric must be created"); - - let last_published_block_timestamp = register_int_gauge_vec!( - format!( - "{}publisher_metrics_last_published_block_timestamp", - metric_prefix - ), - "A metric that represents the last published transaction timestamp", - &["chain_id", "block_producer"], - ) - .expect("metric must be created"); - - let published_messages_throughput = register_int_counter_vec!( - format!("{}publisher_metrics_published_messages_throughput", metric_prefix), - "A metric counting the number of published messages per subject wildcard", - &["chain_id", "block_producer", "subject_wildcard"], - ) - .expect("metric must be created"); - - // New histogram metric for block latency - let publishing_latency_histogram = register_histogram_vec!( - format!("{}publisher_metrics_block_latency_seconds", metric_prefix), - "Histogram of latencies between receiving and publishing a block", - &["chain_id", "block_producer", "subject_wildcard"], - // buckets for latency measurement (e.g., 0.1s, 0.5s, 1s, 5s, 10s) - vec![0.1, 0.5, 1.0, 5.0, 10.0], - ) - .expect("metric must be created"); - - let message_size_histogram = register_histogram_vec!( - format!("{}publisher_metrics_message_size_bytes", metric_prefix), - "Histogram of message sizes in bytes", - &["chain_id", "block_producer", "subject_wildcard"], - vec![100.0, 500.0, 1000.0, 5000.0, 10000.0, 100000.0, 1000000.0] - ) - .expect("metric must be created"); - - let error_rates = - register_int_counter_vec!( - format!("{}publisher_metrics_error_rates", metric_prefix), - "A metric counting errors or failures during message processing", - &["chain_id", "block_producer", "subject_wildcard", "error_type"], - ) - .expect("metric must be created"); - - let registry = - Registry::new_custom(prefix, None).expect("registry to be created"); - registry.register(Box::new(total_subs.clone()))?; - registry.register(Box::new(total_published_messages.clone()))?; - registry.register(Box::new(total_failed_messages.clone()))?; - registry.register(Box::new(last_published_block_height.clone()))?; - registry.register(Box::new(last_published_block_timestamp.clone()))?; - registry.register(Box::new(published_messages_throughput.clone()))?; - registry.register(Box::new(publishing_latency_histogram.clone()))?; - registry.register(Box::new(message_size_histogram.clone()))?; - registry.register(Box::new(error_rates.clone()))?; - - Ok(Self { - registry, - total_subs, - total_published_messages, - total_failed_messages, - last_published_block_height, - last_published_block_timestamp, - published_messages_throughput, - publishing_latency_histogram, - message_size_histogram, - error_rates, - }) - } -} - -#[allow(dead_code)] -// TODO: Will this be useful in the future? -pub fn add_block_metrics( - chain_id: &FuelCoreChainId, - block: &FuelCoreBlock, - block_producer: &Address, - metrics: &Arc, -) -> anyhow::Result> { - let latency = Utc::now().timestamp() - block.header().time().to_unix(); - - metrics - .publishing_latency_histogram - .with_label_values(&[ - &chain_id.to_string(), - &block_producer.to_string(), - BlocksSubject::WILDCARD, - ]) - .observe(latency as f64); - - metrics - .last_published_block_timestamp - .with_label_values(&[ - &chain_id.to_string(), - &block_producer.to_string(), - ]) - .set(block.header().time().to_unix()); - - metrics - .last_published_block_height - .with_label_values(&[ - &chain_id.to_string(), - &block_producer.to_string(), - ]) - .set(block.header().consensus().height.as_u64() as i64); - - Ok(metrics.to_owned()) -} - -#[cfg(test)] -mod tests { - use prometheus::{gather, Encoder, TextEncoder}; - - use super::*; - - impl PublisherMetrics { - pub fn random() -> Self { - use rand::{distributions::Alphanumeric, Rng}; - - let prefix = rand::thread_rng() - .sample_iter(&Alphanumeric) - .filter(|c| c.is_ascii_alphabetic()) - .take(6) - .map(char::from) - .collect(); - - PublisherMetrics::new(Some(prefix)) - .expect("Failed to create random PublisherMetrics") - } - } - - #[test] - fn test_total_published_messages_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .total_published_messages - .with_label_values(&["chain_id_1", "block_producer_1"]) - .inc_by(5); - - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let output = String::from_utf8(buffer.clone()).unwrap(); - - assert!(output.contains("publisher_metrics_total_published_messages")); - assert!(output.contains("chain_id_1")); - assert!(output.contains("block_producer_1")); - assert!(output.contains("5")); - } - - #[test] - fn test_latency_histogram_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .publishing_latency_histogram - .with_label_values(&["chain_id_1", "block_producer_1", "topic_1"]) - .observe(0.75); - - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let output = String::from_utf8(buffer.clone()).unwrap(); - - assert!(output.contains("publisher_metrics_block_latency_seconds")); - assert!(output.contains("chain_id_1")); - assert!(output.contains("block_producer_1")); - assert!(output.contains("topic_1")); - assert!(output.contains("0.75")); - } - - #[test] - fn test_message_size_histogram_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .message_size_histogram - .with_label_values(&["chain_id_1", "block_producer_1", "topic_1"]) - .observe(1500.1); - - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let output = String::from_utf8(buffer.clone()).unwrap(); - - assert!(output.contains("publisher_metrics_message_size_bytes")); - assert!(output.contains("chain_id_1")); - assert!(output.contains("block_producer_1")); - assert!(output.contains("topic_1")); - assert!(output.contains("1500.1")); - } - - #[test] - fn test_total_failed_messages_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .total_failed_messages - .with_label_values(&["chain_id_1", "block_producer_1"]) - .inc_by(3); - - // Gather all the metrics - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - // Convert the gathered output to a string - let output = String::from_utf8(buffer.clone()).unwrap(); - - // Assert that the output contains the correct failed message metric - assert!(output.contains("publisher_metrics_total_failed_messages")); - assert!(output.contains("chain_id_1")); - assert!(output.contains("block_producer_1")); - assert!(output.contains("3")); - } - - #[test] - fn test_total_subs_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .total_subs - .with_label_values(&["chain_id_1"]) - .set(10); - - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let output = String::from_utf8(buffer.clone()).unwrap(); - - assert!(output.contains("publisher_metrics_total_subscriptions")); - assert!(output.contains("chain_id_1")); - assert!(output.contains("10")); - } - - #[test] - fn test_last_published_block_height_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .last_published_block_height - .with_label_values(&["chain_id_1", "block_producer_1"]) - .set(1234); - - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let output = String::from_utf8(buffer.clone()).unwrap(); - - assert!( - output.contains("publisher_metrics_last_published_block_height") - ); - assert!(output.contains("chain_id_1")); - assert!(output.contains("block_producer_1")); - assert!(output.contains("1234")); - } - - #[test] - fn test_last_published_block_timestamp_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .last_published_block_timestamp - .with_label_values(&["chain_id_1", "block_producer_1"]) - .set(1633046400); - - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let output = String::from_utf8(buffer.clone()).unwrap(); - - assert!( - output.contains("publisher_metrics_last_published_block_timestamp") - ); - assert!(output.contains("chain_id_1")); - assert!(output.contains("block_producer_1")); - assert!(output.contains("1633046400")); - } - - #[test] - fn test_published_messages_throughput_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .published_messages_throughput - .with_label_values(&["chain_id_1", "block_producer_1", "topic_1"]) - .inc_by(10); - - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let output = String::from_utf8(buffer.clone()).unwrap(); - - assert!( - output.contains("publisher_metrics_published_messages_throughput") - ); - assert!(output.contains("chain_id_1")); - assert!(output.contains("block_producer_1")); - assert!(output.contains("topic_1")); - assert!(output.contains("10")); - } - - #[test] - fn test_error_rates_metric() { - let metrics = PublisherMetrics::random(); - - metrics - .error_rates - .with_label_values(&[ - "chain_id_1", - "block_producer_1", - "topic_1", - "timeout", - ]) - .inc_by(1); - - let metric_families = gather(); - let mut buffer = Vec::new(); - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let output = String::from_utf8(buffer.clone()).unwrap(); - - assert!(output.contains("publisher_metrics_error_rates")); - assert!(output.contains("chain_id_1")); - assert!(output.contains("block_producer_1")); - assert!(output.contains("topic_1")); - assert!(output.contains("timeout")); - assert!(output.contains("1")); - } -} diff --git a/crates/fuel-streams-publisher/src/telemetry/system.rs b/crates/fuel-streams-publisher/src/telemetry/system.rs deleted file mode 100644 index ec0f14eb..00000000 --- a/crates/fuel-streams-publisher/src/telemetry/system.rs +++ /dev/null @@ -1,634 +0,0 @@ -use std::{ - collections::HashMap, - convert::TryFrom, - hash::Hash, - path::PathBuf, - time::Duration, -}; - -use derive_more::Deref; -use rust_decimal::{ - prelude::{FromPrimitive, ToPrimitive}, - Decimal, -}; -use serde::{ser::SerializeStruct, Serialize, Serializer}; -use sysinfo::{ - CpuExt, - CpuRefreshKind, - DiskExt, - Pid, - PidExt, - ProcessExt, - RefreshKind, - SystemExt, -}; -use thiserror::Error; -use tokio::time; - -// TODO: move this to web interface as `SystemsMetricsResponse` ? -#[derive(Serialize)] -pub struct SystemMetricsWrapper { - system: SystemMetrics, -} - -impl From for SystemMetricsWrapper { - fn from(system: SystemMetrics) -> Self { - Self { system } - } -} - -#[derive(Debug, Error)] -pub enum Error { - #[error("The process {0} could not be found")] - ProcessNotFound(Pid), -} - -#[derive(Debug, Deref)] -pub struct System { - /// System information from the `sysinfo` crate. - #[deref] - system: sysinfo::System, - /// Refresh settings. - specifics: RefreshKind, - /// Cached physical CPU core count. - cpu_physical_core_count: Option, - /// Process ID. - pid: Pid, -} - -impl System { - pub async fn new() -> Self { - let specifics = RefreshKind::new().with_disks_list().with_memory(); - // Gathering CPU information takes about 150ms+ extra. - let specifics = specifics.with_cpu(CpuRefreshKind::everything()); - - let mut system = sysinfo::System::new_with_specifics(specifics); - - // We're only interested in the current process. - // NOTE: This ::expect can never fail on Linux! - let pid = sysinfo::get_current_pid().expect("Unable to get PID"); - system.refresh_process(pid); - - // We have to refresh the CPU statistics once on startup. - time::sleep(Duration::from_millis(100)).await; - system.refresh_process(pid); - - // Only retrieve the physical CPU core count once (while - // hotplug CPUs exist on virtual and physical platforms, we - // just assume that it is usually not changing on runtime). - let cpu_physical_core_count = system.physical_core_count(); - - Self { - system, - specifics, - cpu_physical_core_count, - pid, - } - } - - pub fn refresh(&mut self) { - self.system.refresh_process(self.pid); - self.system.refresh_specifics(self.specifics); - } - - pub fn metrics(&self) -> Result { - SystemMetrics::try_from(self) - } - - fn pid(&self) -> Pid { - self.pid - } -} - -/// Accumulated system status information. -#[derive(Debug, Default, Serialize)] -pub struct SystemMetrics { - /// Parent process of the application. - pub application: Process, - /// System memory information. - pub memory: SystemMemory, - /// Load averages - pub load_average: LoadAverage, - /// Host and operation system information. - pub host: Host, - /// Disk information and usage. - pub disk: HashMap, - /// CPU physical core count. - #[serde(serialize_with = "format_value")] - pub cpu_physical_core_count: usize, - /// CPU count. - #[serde(serialize_with = "format_value")] - pub cpu_count: usize, - /// CPU information. - pub cpu: HashMap, -} - -impl TryFrom<&System> for SystemMetrics { - type Error = Error; - - fn try_from(system: &System) -> Result { - // Get current pid. - let pid = system.pid(); - - let disk = system - .disks() - .iter() - .map(|v| { - let path = v.mount_point().to_path_buf(); - let disk = Disk::from(v); - (path, disk) - }) - .collect(); - - let cpu = system - .cpus() - .iter() - .enumerate() - .map(|(i, v)| (i, v.into())) - .collect::>(); - // Total number of CPUs (including CPU threads). - let cpu_count = cpu.len(); - - // Use cached number of CPU physical cores, if set. - let cpu_physical_core_count = system - .cpu_physical_core_count - .unwrap_or_else(|| system.physical_core_count().unwrap_or(1)); - - Ok(Self { - application: TryFrom::try_from((system.deref(), pid))?, - memory: system.deref().into(), - load_average: system.deref().into(), - host: system.deref().into(), - disk, - cpu_count, - cpu_physical_core_count, - cpu, - }) - } -} - -/// System memory usage information. -#[derive(Debug, Clone, Default)] -pub struct Memory { - /// Total memory. - size: u64, - /// Used memory. - free: Option, - /// Memory usage in percent. - usage: Decimal, -} - -impl serde::Serialize for Memory { - fn serialize( - &self, - serializer: S, - ) -> Result { - if let Some(free) = self.free { - let mut s = serializer.serialize_struct("Memory", 3)?; - s.serialize_field("size", &Format::Memory(self.size))?; - s.serialize_field("free", &Format::Memory(free))?; - s.serialize_field("usage", &Format::Memory(AsF64(self.usage)))?; - s.end() - } else { - let mut s = serializer.serialize_struct("Memory", 2)?; - s.serialize_field("size", &Format::Memory2(self.size))?; - s.serialize_field("usage", &Format::Memory2(AsF64(self.usage)))?; - s.end() - } - } -} - -/// System memory usage information. -#[derive(Debug, Default, Serialize)] -pub struct SystemMemory { - /// System memory. - system: Memory, - /// Swap memory. - swap: Memory, -} - -impl From<&sysinfo::System> for SystemMemory { - fn from(system: &sysinfo::System) -> Self { - let size = system.total_memory(); - let used = system.used_memory(); - let free = Some(size.saturating_sub(used)); - let usage = percent_usage(used, size); - - let swap_size = system.total_swap(); - let swap_used = system.used_swap(); - let swap_free = Some(swap_size.saturating_sub(swap_used)); - let swap_usage = percent_usage(swap_used, swap_size); - - Self { - system: Memory { size, free, usage }, - swap: Memory { - size: swap_size, - free: swap_free, - usage: swap_usage, - }, - } - } -} - -/// Process information and metrics. -#[derive(Debug)] -pub struct Process { - pid: Pid, - name: String, - cpu_usage: Decimal, - memory: Memory, -} - -impl Default for Process { - fn default() -> Self { - Self { - pid: Pid::from(0), - name: Default::default(), - cpu_usage: Default::default(), - memory: Default::default(), - } - } -} - -impl serde::Serialize for Process { - fn serialize( - &self, - serializer: S, - ) -> Result { - let mut s = serializer.serialize_struct("Process", 4)?; - s.serialize_field( - "pid", - &Format::::Process(self.pid.as_u32() as i32), - )?; - s.serialize_field("name", &FormatKey(&self.name))?; - s.serialize_field( - "cpu_usage", - &Format::Process(AsF64(self.cpu_usage)), - )?; - s.serialize_field("memory", &self.memory)?; - s.end() - } -} - -impl TryFrom<(&sysinfo::System, Pid)> for Process { - type Error = Error; - - fn try_from( - (system, pid): (&sysinfo::System, Pid), - ) -> Result { - let process = system.process(pid).ok_or(Error::ProcessNotFound(pid))?; - - let total = system.total_memory(); - let size = process.memory(); - let usage = percent_usage(size, total); - - Ok(Self { - memory: Memory { - size, - free: None, - usage, - }, - ..Self::from(process) - }) - } -} - -impl From<&sysinfo::Process> for Process { - fn from(process: &sysinfo::Process) -> Self { - Self { - name: process.name().to_string(), - pid: process.pid(), - cpu_usage: decimal(process.cpu_usage()), - memory: Default::default(), - } - } -} - -/// Disk information and usage. -#[derive(Debug)] -pub struct Disk { - size: u64, - free: u64, - usage: Decimal, -} - -impl serde::Serialize for Disk { - fn serialize( - &self, - serializer: S, - ) -> Result { - let mut s = serializer.serialize_struct("Disk", 2)?; - s.serialize_field("size", &Format::Disk(self.size))?; - s.serialize_field("free", &Format::Disk(self.free))?; - s.serialize_field("usage", &Format::Disk(AsF64(self.usage)))?; - s.end() - } -} - -impl From<&sysinfo::Disk> for Disk { - fn from(disk: &sysinfo::Disk) -> Self { - let size = disk.total_space(); - let free = disk.available_space(); - let used = size.saturating_sub(free); - - // Calculate the disk usage in percent. - let usage = percent_usage(used, size); - - Self { size, free, usage } - } -} - -/// System memory usage information. -#[derive(Debug, Default)] -pub struct LoadAverage(f64, f64, f64); - -impl serde::Serialize for LoadAverage { - fn serialize( - &self, - serializer: S, - ) -> Result { - let mut s = serializer.serialize_struct("LoadAverage", 3)?; - s.serialize_field("1", &Format::LoadAverage(self.0))?; - s.serialize_field("5", &Format::LoadAverage(self.1))?; - s.serialize_field("15", &Format::LoadAverage(self.2))?; - s.end() - } -} - -impl From<&sysinfo::System> for LoadAverage { - fn from(system: &sysinfo::System) -> Self { - let load_avg = system.load_average(); - Self(load_avg.one, load_avg.five, load_avg.fifteen) - } -} - -/// System memory usage information. -#[derive(Debug, Default)] -pub struct Cpu { - #[allow(dead_code)] - name: String, - frequency: u64, - usage: Decimal, -} - -impl serde::Serialize for Cpu { - fn serialize( - &self, - serializer: S, - ) -> Result { - let mut s = serializer.serialize_struct("Cpu", 2)?; - s.serialize_field("frequency", &Format::Cpu(self.frequency))?; - s.serialize_field("usage", &Format::Cpu(AsF64(self.usage)))?; - s.end() - } -} - -impl From<&sysinfo::Cpu> for Cpu { - fn from(cpu: &sysinfo::Cpu) -> Self { - Self { - name: cpu.brand().to_string(), - frequency: cpu.frequency(), - usage: decimal(cpu.cpu_usage()), - } - } -} - -/// System memory usage information. -#[derive(Debug, Default)] -pub struct Host { - os_version: String, - kernel_version: String, - uptime: u64, -} - -impl serde::Serialize for Host { - fn serialize( - &self, - serializer: S, - ) -> Result { - let mut s = serializer.serialize_struct("Host", 3)?; - s.serialize_field("os_version", &FormatKey(&self.os_version))?; - s.serialize_field("kernel_version", &FormatKey(&self.kernel_version))?; - s.serialize_field("uptime", &Format::Host(self.uptime))?; - s.end() - } -} - -impl From<&sysinfo::System> for Host { - fn from(system: &sysinfo::System) -> Self { - Self { - os_version: system.long_os_version().unwrap_or_default(), - kernel_version: system.kernel_version().unwrap_or_default(), - uptime: system.uptime(), - } - } -} - -struct AsF64(Decimal); - -impl Serialize for AsF64 { - fn serialize( - &self, - serializer: S, - ) -> Result { - use serde::ser::Error; - let value = self.0.to_f64().ok_or_else(|| { - S::Error::custom(format!( - "Failed to convert a Decimal value into a f64: {:?}", - self.0 - )) - })?; - value.serialize(serializer) - } -} - -pub enum Format { - Cpu(T), - Disk(T), - Host(T), - LoadAverage(T), - Memory(T), - Memory2(T), - Process(T), -} - -impl serde::Serialize for Format { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - // https://en.wikipedia.org/wiki/Brainfuck light - let (code, v) = match &self { - Self::Host(v) => ("<<-|", v), - Self::Cpu(v) => (" (" ("<<-|", v), - Self::Memory(v) => (" ("< ("<<<|", v), - }; - - serializer.serialize_newtype_struct(code, v) - } -} - -pub struct FormatKey(T); - -impl serde::Serialize for FormatKey { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut hashmap = HashMap::new(); - hashmap.insert(&self.0, 1); - serializer.serialize_newtype_struct(".<<<|", &hashmap) - } -} - -fn format_value(value: &usize, serializer: S) -> Result -where - S: Serializer, -{ - serializer.serialize_newtype_struct("<<-|", value) -} - -const DECIMAL_PRECISION: u32 = 4; - -#[inline] -fn percent_usage(current: u64, max: u64) -> Decimal { - Decimal::from(current) - .checked_div(Decimal::from(max)) - .unwrap_or_default() - .checked_mul(100.into()) - .unwrap_or_default() - .round_dp(DECIMAL_PRECISION) -} - -#[inline] -fn decimal(current: f32) -> Decimal { - Decimal::from_f32(current) - .unwrap_or_default() - .round_dp(DECIMAL_PRECISION) -} - -#[cfg(test)] -mod tests { - use std::path::PathBuf; - - use rust_decimal::Decimal; - use serde::Serialize; - - use super::*; - - #[derive(Serialize)] - pub struct Metrics { - system: super::SystemMetrics, - } - - impl From<&System> for Metrics { - fn from(system: &System) -> Self { - Self { - system: system.metrics().expect("metrics"), - } - } - } - - #[tokio::test] - async fn test_metrics_system_values() { - let system = System::new().await; - let metrics = Metrics::from(&system); - - // NOTE: This ::expect can never fail on Linux! - let pid = sysinfo::get_current_pid().expect("Unable to get PID"); - assert_eq!(metrics.system.application.pid, pid); - assert!(metrics.system.host.uptime > 0); - assert!(!metrics.system.cpu.is_empty()); - assert!(!metrics.system.disk.is_empty()); - } - - #[tokio::test] - async fn test_metrics_system_prometheus_full() { - let memory = Memory { - size: 1000, - free: Some(877), - usage: Decimal::new(1234, 2), - }; - - let metrics = Metrics { - system: SystemMetrics { - application: Process { - pid: Pid::from(0), - name: "process".to_string(), - cpu_usage: Decimal::new(1234, 2), - memory: memory.clone(), - }, - memory: SystemMemory { - system: memory.clone(), - swap: memory, - }, - load_average: LoadAverage(1.2, 2.3, 3.4), - host: Host { - os_version: "os-version".to_string(), - kernel_version: "kernel-version".to_string(), - uptime: 123456, - }, - disk: vec![( - PathBuf::from("disk1"), - Disk { - size: 1000, - free: 877, - usage: Decimal::new(1234, 2), - }, - )] - .into_iter() - .collect(), - cpu_physical_core_count: 1, - cpu_count: 1, - cpu: vec![( - 1, - Cpu { - name: "cpu1".to_string(), - frequency: 12345, - usage: Decimal::new(1234, 2), - }, - )] - .into_iter() - .collect(), - }, - }; - - let output = serde_prometheus::to_string(&metrics, None, &[]) - .expect("prometheus"); - - assert_eq!( - output.trim_end().split('\n').collect::>(), - vec![ - r#"system_application_pid 0"#, - r#"system_application_name{path = "process"} 1"#, - r#"system_application_cpu_usage 12.34"#, - r#"system_application_size{type = "memory"} 1000"#, - r#"system_application_free{type = "memory"} 877"#, - r#"system_application_usage{type = "memory"} 12.34"#, - r#"system_memory_size{type = "system"} 1000"#, - r#"system_memory_free{type = "system"} 877"#, - r#"system_memory_usage{type = "system"} 12.34"#, - r#"system_memory_size{type = "swap"} 1000"#, - r#"system_memory_free{type = "swap"} 877"#, - r#"system_memory_usage{type = "swap"} 12.34"#, - r#"system_load_average_1 1.2"#, - r#"system_load_average_5 2.3"#, - r#"system_load_average_15 3.4"#, - r#"system_host_os_version{path = "os-version"} 1"#, - r#"system_host_kernel_version{path = "kernel-version"} 1"#, - r#"system_host_uptime 123456"#, - r#"system_disk_size{path = "disk1"} 1000"#, - r#"system_disk_free{path = "disk1"} 877"#, - r#"system_disk_usage{path = "disk1"} 12.34"#, - r#"system_cpu_physical_core_count 1"#, - r#"system_cpu_count 1"#, - r#"system_cpu_frequency{id = "1"} 12345"#, - r#"system_cpu_usage{id = "1"} 12.34"#, - ] - ) - } -} diff --git a/crates/fuel-streams-storage/Cargo.toml b/crates/fuel-streams-storage/Cargo.toml index c8478563..2aefacfb 100644 --- a/crates/fuel-streams-storage/Cargo.toml +++ b/crates/fuel-streams-storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fuel-streams-storage" -description = "strategies and adapters for storing fuel streams in transient and file storage systems (i.e. NATS and S3)" +description = "Srategies and adapters for storing fuel streams in transient and file storage systems (i.e. NATS and S3)" authors = { workspace = true } keywords = { workspace = true } edition = { workspace = true } @@ -11,14 +11,11 @@ version = { workspace = true } rust-version = { workspace = true } [dependencies] -async-nats = { workspace = true } aws-config = { version = "1.5.10", features = ["behavior-version-latest"] } aws-sdk-s3 = "1.65.0" aws-smithy-runtime-api = "1.7.3" aws-smithy-types = "=1.2.9" -displaydoc = { workspace = true } dotenvy = { workspace = true } -fuel-networks = { workspace = true } rand = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } diff --git a/crates/fuel-streams-storage/src/lib.rs b/crates/fuel-streams-storage/src/lib.rs index 15f3b0ec..f5cf85aa 100644 --- a/crates/fuel-streams-storage/src/lib.rs +++ b/crates/fuel-streams-storage/src/lib.rs @@ -1,7 +1,3 @@ // TODO: Introduce Adapters for Transient and FileStorage (NATS and S3 clients would implement those) - -pub mod nats; pub mod s3; - -pub use nats::*; pub use s3::*; diff --git a/crates/fuel-streams-storage/src/nats/mod.rs b/crates/fuel-streams-storage/src/nats/mod.rs deleted file mode 100644 index 63359fd2..00000000 --- a/crates/fuel-streams-storage/src/nats/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -/// Houses shared APIs for interacting with NATS for fuel-streams-publisher and fuel-streams crates -/// As much as possible, the public interface/APIS should be agnostic of NATS. These can then be extended -/// in the fuel-streams-publisher and fuel-streams crates to provide a more opinionated API towards -/// their specific use-cases. -mod error; -mod nats_client; -mod nats_client_opts; -mod nats_namespace; - -pub mod types; - -pub use error::*; -pub use nats_client::*; -pub use nats_client_opts::*; -pub use nats_namespace::*; -pub use types::*; diff --git a/crates/fuel-streams-storage/src/nats/nats_client_opts.rs b/crates/fuel-streams-storage/src/nats/nats_client_opts.rs deleted file mode 100644 index c43833d1..00000000 --- a/crates/fuel-streams-storage/src/nats/nats_client_opts.rs +++ /dev/null @@ -1,147 +0,0 @@ -use std::time::Duration; - -use async_nats::ConnectOptions; -use fuel_networks::{FuelNetwork, FuelNetworkUserRole}; - -use super::NatsNamespace; - -/// Represents options for configuring a NATS client. -/// -/// # Examples -/// -/// Creating a new `NatsClientOpts` instance: -/// -/// ``` -/// use fuel_streams_storage::nats::NatsClientOpts; -/// use fuel_networks::FuelNetwork; -/// -/// let opts = NatsClientOpts::new(FuelNetwork::Local); -/// ``` -/// -/// Creating a public `NatsClientOpts`: -/// -/// ``` -/// use fuel_streams_storage::nats::NatsClientOpts; -/// use fuel_networks::FuelNetwork; -/// -/// let opts = NatsClientOpts::new(FuelNetwork::Local); -/// ``` -/// -/// Modifying `NatsClientOpts`: -/// -/// ``` -/// use fuel_streams_storage::nats::NatsClientOpts; -/// use fuel_networks::{FuelNetwork, FuelNetworkUserRole}; -/// -/// let opts = NatsClientOpts::new(FuelNetwork::Local) -/// .with_role(FuelNetworkUserRole::Admin) -/// .with_timeout(10); -/// ``` -#[derive(Debug, Clone)] -pub struct NatsClientOpts { - pub network: FuelNetwork, - /// The role of the user connecting to the NATS server (Admin or Public). - pub(crate) role: FuelNetworkUserRole, - /// The namespace used as a prefix for NATS streams, consumers, and subject names. - pub(crate) namespace: NatsNamespace, - /// The timeout in seconds for NATS operations. - pub(crate) timeout_secs: u64, - /// URL of the NATS server. - pub(crate) url: Option, -} - -impl NatsClientOpts { - pub fn new(network: FuelNetwork) -> Self { - Self { - network, - role: FuelNetworkUserRole::default(), - namespace: NatsNamespace::default(), - timeout_secs: 5, - url: None, - } - } - - pub fn admin_opts() -> Self { - Self::new(FuelNetwork::load_from_env()) - .with_role(FuelNetworkUserRole::Admin) - } - - pub fn with_role(self, role: FuelNetworkUserRole) -> Self { - Self { role, ..self } - } - - pub fn with_url(self, url: String) -> Self { - Self { - url: Some(url), - ..self - } - } - - pub fn get_url(&self) -> String { - match self.url.clone() { - Some(url) => url, - None => match self.role { - FuelNetworkUserRole::Admin => dotenvy::var("NATS_URL") - .expect("NATS_URL must be set for admin role"), - FuelNetworkUserRole::Default => self.network.to_nats_url(), - }, - } - } - - #[cfg(any(test, feature = "test-helpers"))] - pub fn with_rdn_namespace(self) -> Self { - let namespace = format!(r"namespace-{}", Self::random_int()); - self.with_namespace(&namespace) - } - - #[cfg(any(test, feature = "test-helpers"))] - pub fn with_namespace(self, namespace: &str) -> Self { - let namespace = NatsNamespace::Custom(namespace.to_string()); - Self { namespace, ..self } - } - - pub fn with_timeout(self, secs: u64) -> Self { - Self { - timeout_secs: secs, - ..self - } - } - - pub(super) fn connect_opts(&self) -> ConnectOptions { - let (user, pass) = match self.role { - FuelNetworkUserRole::Admin => ( - Some("admin".to_string()), - Some( - dotenvy::var("NATS_ADMIN_PASS") - .expect("`NATS_ADMIN_PASS` env must be set"), - ), - ), - FuelNetworkUserRole::Default => { - (Some("default_user".to_string()), Some("".to_string())) - } - }; - - match (user, pass) { - (Some(user), Some(pass)) => { - ConnectOptions::with_user_and_password(user, pass) - .connection_timeout(Duration::from_secs(self.timeout_secs)) - .max_reconnects(1) - .name(Self::conn_id()) - } - _ => ConnectOptions::new() - .connection_timeout(Duration::from_secs(self.timeout_secs)) - .max_reconnects(1) - .name(Self::conn_id()), - } - } - - // This will be useful for debugging and monitoring connections - fn conn_id() -> String { - format!(r"connection-{}", Self::random_int()) - } - - fn random_int() -> u32 { - use rand::Rng; - rand::thread_rng().gen() - } -} diff --git a/crates/fuel-streams-storage/src/s3/s3_client.rs b/crates/fuel-streams-storage/src/s3/s3_client.rs index ea07be30..dad2ec35 100644 --- a/crates/fuel-streams-storage/src/s3/s3_client.rs +++ b/crates/fuel-streams-storage/src/s3/s3_client.rs @@ -1,4 +1,4 @@ -use aws_config::{meta::region::RegionProviderChain, Region}; +use aws_config::{BehaviorVersion, Region}; use aws_sdk_s3::{ config::http::HttpResponse, operation::{ @@ -6,7 +6,9 @@ use aws_sdk_s3::{ delete_bucket::DeleteBucketError, delete_object::DeleteObjectError, get_object::GetObjectError, + put_bucket_policy::PutBucketPolicyError, put_object::PutObjectError, + put_public_access_block::PutPublicAccessBlockError, }, Client, }; @@ -34,6 +36,12 @@ pub enum S3ClientError { MissingEnvVar(String), #[error("Failed to stream objects because: {0}")] StreamingError(String), + #[error("Failed to put bucket policy: {0}")] + PutBucketPolicyError(#[from] SdkError), + #[error("Failed to put public access block: {0}")] + PutPublicAccessBlockError( + #[from] SdkError, + ), #[error("IO Error: {0}")] IoError(#[from] std::io::Error), } @@ -46,52 +54,121 @@ pub struct S3Client { impl S3Client { pub async fn new(opts: &S3ClientOpts) -> Result { - // Load AWS configuration - let mut aws_config = aws_config::from_env(); - - if let Some(endpoint_url) = opts.endpoint_url() { - aws_config = aws_config.endpoint_url(endpoint_url); - } - - if let Some(region) = opts.region() { - let region_provider = - RegionProviderChain::first_try(Region::new(region)); - let region = region_provider.region().await.unwrap(); - - aws_config = aws_config.region(region); - } - - let s3_config = - aws_sdk_s3::config::Builder::from(&aws_config.load().await) - .force_path_style(true) - .build(); + let config = aws_config::defaults(BehaviorVersion::latest()) + .endpoint_url(opts.endpoint_url().to_string()) + .region(Region::new(opts.region().to_string())) + // TODO: Remove this once we have a proper S3 bucket created + // for now this is a workaround to avoid signing requests + .no_credentials() + .load() + .await; + + // Create S3 config without signing + let s3_config = aws_sdk_s3::config::Builder::from(&config) + .force_path_style(true) + .disable_s3_express_session_auth(true) + .build(); let client = aws_sdk_s3::Client::from_conf(s3_config); - - Ok(Self { + let s3_client = Self { client, bucket: opts.bucket(), - }) + }; + + Ok(s3_client) } pub fn arc(self) -> std::sync::Arc { std::sync::Arc::new(self) } + pub fn client(&self) -> &Client { + &self.client + } + + pub fn bucket(&self) -> &str { + &self.bucket + } + pub async fn put_object( &self, key: &str, object: Vec, ) -> Result<(), S3ClientError> { - self.client + match self + .client .put_object() .bucket(&self.bucket) .key(key) .body(object.into()) .send() - .await?; - - Ok(()) + .await + { + Ok(_) => Ok(()), + Err(error) => match error { + SdkError::ServiceError(error) => { + tracing::error!( + "Failed to put object in S3 bucket={} key={}: {}", + self.bucket, + key, + error.err() + ); + Err(S3ClientError::PutObjectError(SdkError::ServiceError( + error, + ))) + } + SdkError::ConstructionFailure(error) => { + tracing::error!( + "Failed to construct S3 request for bucket={} key={}", + self.bucket, + key, + ); + Err(S3ClientError::PutObjectError( + SdkError::ConstructionFailure(error), + )) + } + SdkError::TimeoutError(error) => { + tracing::error!( + "Timeout putting object in S3 bucket={} key={}", + self.bucket, + key, + ); + Err(S3ClientError::PutObjectError(SdkError::TimeoutError( + error, + ))) + } + SdkError::DispatchFailure(error) => { + tracing::error!( + "Failed to dispatch S3 request for bucket={} key={}: {}", + self.bucket, + key, + error.as_connector_error().unwrap() + ); + Err(S3ClientError::PutObjectError( + SdkError::DispatchFailure(error), + )) + } + SdkError::ResponseError(error) => { + tracing::error!( + "Invalid response from S3 for bucket={} key={}", + self.bucket, + key, + ); + Err(S3ClientError::PutObjectError(SdkError::ResponseError( + error, + ))) + } + _ => { + tracing::error!( + "Failed to put object in S3 bucket={} key={}: {:?}", + self.bucket, + key, + error + ); + Err(S3ClientError::PutObjectError(error)) + } + }, + } } pub async fn get_object( @@ -135,13 +212,12 @@ impl S3Client { #[cfg(any(test, feature = "test-helpers"))] pub async fn new_for_testing() -> Self { - use fuel_networks::FuelNetwork; - dotenvy::dotenv().expect(".env file not found"); - let s3_client = Self::new( - &S3ClientOpts::new(FuelNetwork::Local).with_random_namespace(), - ) + let s3_client = Self::new(&S3ClientOpts::new( + crate::S3Env::Local, + crate::S3Role::Admin, + )) .await .expect( "S3Client creation failed. Check AWS Env vars and Localstack setup", diff --git a/crates/fuel-streams-storage/src/s3/s3_client_opts.rs b/crates/fuel-streams-storage/src/s3/s3_client_opts.rs index a0d377a8..468efa30 100644 --- a/crates/fuel-streams-storage/src/s3/s3_client_opts.rs +++ b/crates/fuel-streams-storage/src/s3/s3_client_opts.rs @@ -1,49 +1,94 @@ -use fuel_networks::{FuelNetwork, FuelNetworkUserRole}; +use std::str::FromStr; + +#[derive(Debug, Clone, Default)] +pub enum S3Role { + Admin, + #[default] + Public, +} + +#[derive(Debug, Clone, Default)] +pub enum S3Env { + #[default] + Local, + Testnet, + Mainnet, +} + +impl FromStr for S3Env { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "local" => Ok(S3Env::Local), + "testnet" => Ok(S3Env::Testnet), + "mainnet" => Ok(S3Env::Mainnet), + _ => Err(format!("unknown S3 type: {}", s)), + } + } +} -// Introduced for consistency. -// TODO: make it more ergonomic by probably using FuelNetwork in S3Client directly #[derive(Debug, Clone, Default)] pub struct S3ClientOpts { - pub fuel_network: FuelNetwork, - pub role: FuelNetworkUserRole, + pub s3_env: S3Env, + pub role: S3Role, pub namespace: Option, } impl S3ClientOpts { - pub fn new(fuel_network: FuelNetwork) -> Self { + pub fn new(s3_env: S3Env, role: S3Role) -> Self { + Self { + s3_env, + role, + namespace: None, + } + } + + pub fn from_env(role: Option) -> Self { + let s3_env = std::env::var("NETWORK") + .map(|s| S3Env::from_str(&s).unwrap_or_default()) + .unwrap_or_default(); + Self { - fuel_network, - role: FuelNetworkUserRole::default(), + s3_env, + role: role.unwrap_or_default(), namespace: None, } } pub fn admin_opts() -> Self { - Self::new(FuelNetwork::load_from_env()) - .with_role(FuelNetworkUserRole::Admin) + Self::from_env(Some(S3Role::Admin)) } - pub fn with_role(self, role: FuelNetworkUserRole) -> Self { - Self { role, ..self } + pub fn public_opts() -> Self { + Self::from_env(Some(S3Role::Public)) } - pub fn endpoint_url(&self) -> Option { + pub fn endpoint_url(&self) -> String { match self.role { - FuelNetworkUserRole::Admin => dotenvy::var("AWS_ENDPOINT_URL").ok(), - FuelNetworkUserRole::Default => Some(self.fuel_network.to_s3_url()), + S3Role::Admin => dotenvy::var("AWS_ENDPOINT_URL") + .expect("AWS_ENDPOINT_URL must be set for admin role"), + S3Role::Public => { + match self.s3_env { + S3Env::Local => "http://localhost:4566".to_string(), + S3Env::Testnet | S3Env::Mainnet => { + let bucket = self.bucket(); + let region = self.region(); + format!("https://{bucket}.s3-website-{region}.amazonaws.com") + } + } + } } } - pub fn region(&self) -> Option { - match self.role { - FuelNetworkUserRole::Admin => dotenvy::var("AWS_S3_REGION").ok(), - FuelNetworkUserRole::Default => { - Some(self.fuel_network.to_s3_region()) - } + pub fn region(&self) -> String { + match &self.role { + S3Role::Admin => dotenvy::var("AWS_REGION") + .expect("AWS_REGION must be set for admin role"), + S3Role::Public => "us-east-1".to_string(), } } - // TODO: Consider revamping and reusing NATs' Namespace here #[cfg(any(test, feature = "test-helpers"))] pub fn with_random_namespace(mut self) -> Self { let random_namespace = { @@ -56,14 +101,20 @@ impl S3ClientOpts { } pub fn bucket(&self) -> String { - match self.role { - FuelNetworkUserRole::Admin => dotenvy::var("AWS_S3_BUCKET_NAME") - .expect("AWS_S3_BUCKET_NAME must be set for admin role"), - FuelNetworkUserRole::Default => format!( - "{}-{}", - self.fuel_network.to_s3_bucket(), - self.namespace.to_owned().unwrap_or_default() - ), + if matches!(self.role, S3Role::Admin) { + return dotenvy::var("AWS_S3_BUCKET_NAME") + .expect("AWS_S3_BUCKET_NAME must be set for admin role"); } + + let base_bucket = match self.s3_env { + S3Env::Local => "fuel-streams-local", + S3Env::Testnet => "fuel-streams-testnet", + S3Env::Mainnet => "fuel-streams", + }; + + self.namespace + .as_ref() + .map(|ns| format!("{base_bucket}-{ns}")) + .unwrap_or(base_bucket.to_string()) } } diff --git a/crates/fuel-streams-ws/src/server/ws/socket.rs b/crates/fuel-streams-ws/src/server/ws/socket.rs deleted file mode 100644 index a705b2cf..00000000 --- a/crates/fuel-streams-ws/src/server/ws/socket.rs +++ /dev/null @@ -1,397 +0,0 @@ -use std::sync::{atomic::AtomicUsize, Arc}; - -use actix_web::{ - web::{self, Bytes}, - HttpMessage, - HttpRequest, - Responder, -}; -use actix_ws::{Message, Session}; -use fuel_streams::{ - logs::Log, - types::{Block, Input, Output, Receipt, Transaction}, - utxos::Utxo, - StreamEncoder, - Streamable, -}; -use fuel_streams_core::SubscriptionConfig; -use fuel_streams_storage::DeliverPolicy; -use futures::StreamExt; -use uuid::Uuid; - -use super::{ - errors::WsSubscriptionError, - fuel_streams::FuelStreams, - models::ClientMessage, -}; -use crate::{ - server::{ - state::ServerState, - ws::{ - fuel_streams::FuelStreamsExt, - models::{ServerMessage, SubscriptionPayload}, - }, - }, - telemetry::Telemetry, -}; - -static _NEXT_USER_ID: AtomicUsize = AtomicUsize::new(1); - -pub async fn get_ws( - req: HttpRequest, - body: web::Payload, - state: web::Data, -) -> actix_web::Result { - // extract user id - let user_id = match req.extensions().get::() { - Some(user_id) => { - tracing::info!( - "Authenticated WebSocket connection for user: {:?}", - user_id.to_string() - ); - user_id.to_owned() - } - None => { - tracing::info!("Unauthenticated WebSocket connection"); - return Err(actix_web::error::ErrorUnauthorized( - "Missing or invalid JWT", - )); - } - }; - - // split the request into response, session, and message stream - let (response, session, mut msg_stream) = actix_ws::handle(&req, body)?; - - // record the new subscription - state.context.telemetry.increment_subscriptions_count(); - - // spawm an actor handling the ws connection - let streams = state.context.fuel_streams.clone(); - let telemetry = state.context.telemetry.clone(); - actix_web::rt::spawn(async move { - tracing::info!("Ws opened for user id {:?}", user_id.to_string()); - while let Some(Ok(msg)) = msg_stream.recv().await { - let mut session = session.clone(); - match msg { - Message::Ping(bytes) => { - tracing::info!("Received ping, {:?}", bytes); - if session.pong(&bytes).await.is_err() { - tracing::error!("Error sending pong, {:?}", bytes); - } - } - Message::Pong(bytes) => { - tracing::info!("Received pong, {:?}", bytes); - } - Message::Text(string) => { - tracing::info!("Received text, {string}"); - } - Message::Binary(bytes) => { - tracing::info!("Received binary {:?}", bytes); - let client_message = match parse_client_message(bytes) { - Ok(msg) => msg, - Err(e) => { - close_socket_with_error( - e, user_id, session, None, telemetry, - ) - .await; - return; - } - }; - - // handle the client message - match client_message { - ClientMessage::Subscribe(payload) => { - tracing::info!( - "Received subscribe message: {:?}", - payload - ); - let subject_wildcard = payload.wildcard; - let deliver_policy = payload.deliver_policy; - - // verify the subject name - let sub_subject = - match verify_and_extract_subject_name( - &subject_wildcard, - ) { - Ok(res) => res, - Err(e) => { - close_socket_with_error( - e, - user_id, - session, - Some(subject_wildcard.clone()), - telemetry, - ) - .await; - return; - } - }; - - // start the streamer async - let mut stream_session = session.clone(); - - // reply to socket with subscription - send_message_to_socket( - &mut session, - ServerMessage::Subscribed( - SubscriptionPayload { - wildcard: subject_wildcard.clone(), - deliver_policy, - }, - ), - ) - .await; - - // receive streaming in a background thread - let streams = streams.clone(); - let telemetry = telemetry.clone(); - actix_web::rt::spawn(async move { - // update metrics - telemetry.update_user_subscription_metrics( - user_id, - &subject_wildcard, - ); - - // subscribe to the stream - let config = SubscriptionConfig { - deliver_policy: DeliverPolicy::All, - filter_subjects: vec![ - subject_wildcard.clone() - ], - }; - let mut sub = match streams - .subscribe(&sub_subject, Some(config)) - .await - { - Ok(sub) => sub, - Err(e) => { - close_socket_with_error( - WsSubscriptionError::Stream(e), - user_id, - session, - Some(subject_wildcard.clone()), - telemetry, - ) - .await; - return; - } - }; - - // consume and forward to the ws - while let Some(s3_serialized_payload) = - sub.next().await - { - // decode and serialize back to ws payload - let serialized_ws_payload = match decode( - &subject_wildcard, - s3_serialized_payload, - ) - .await - { - Ok(res) => res, - Err(e) => { - telemetry.update_error_metrics( - &subject_wildcard, - &e.to_string(), - ); - tracing::error!("Error serializing received stream message: {:?}", e); - continue; - } - }; - - // send the payload over the stream - let _ = stream_session - .binary(serialized_ws_payload) - .await; - } - }); - } - ClientMessage::Unsubscribe(payload) => { - tracing::info!( - "Received unsubscribe message: {:?}", - payload - ); - let subject_wildcard = payload.wildcard; - - let deliver_policy = payload.deliver_policy; - - if let Err(e) = verify_and_extract_subject_name( - &subject_wildcard, - ) { - close_socket_with_error( - e, - user_id, - session, - Some(subject_wildcard.clone()), - telemetry, - ) - .await; - return; - } - - // TODO: implement session management for the same user_id - - // send a message to the client to confirm unsubscribing - send_message_to_socket( - &mut session, - ServerMessage::Unsubscribed( - SubscriptionPayload { - wildcard: subject_wildcard, - deliver_policy, - }, - ), - ) - .await; - return; - } - } - } - Message::Close(reason) => { - tracing::info!( - "Got close event, terminating session with reason {:?}", - reason - ); - let reason_str = - reason.and_then(|r| r.description).unwrap_or_default(); - close_socket_with_error( - WsSubscriptionError::ClosedWithReason( - reason_str.to_string(), - ), - user_id, - session, - None, - telemetry, - ) - .await; - return; - } - _ => { - tracing::error!("Received unknown message type"); - close_socket_with_error( - WsSubscriptionError::ClosedWithReason( - "Unknown message type".to_string(), - ), - user_id, - session, - None, - telemetry, - ) - .await; - return; - } - }; - } - }); - - Ok(response) -} - -fn parse_client_message( - msg: Bytes, -) -> Result { - let msg = serde_json::from_slice::(&msg) - .map_err(WsSubscriptionError::UnparsablePayload)?; - Ok(msg) -} - -fn stream_to_server_message( - msg: Vec, -) -> Result, WsSubscriptionError> { - let server_message = serde_json::to_vec(&ServerMessage::Update(msg)) - .map_err(WsSubscriptionError::UnserializableMessagePayload)?; - Ok(server_message) -} - -pub fn verify_and_extract_subject_name( - subject_wildcard: &str, -) -> Result { - let mut subject_parts = subject_wildcard.split('.'); - // TODO: more advanced checks here with Regex - if subject_parts.clone().count() == 1 { - return Err(WsSubscriptionError::UnsupportedWildcardPattern( - subject_wildcard.to_string(), - )); - } - let subject_name = subject_parts.next().unwrap_or_default(); - if !FuelStreams::is_within_subject_names(subject_name) { - return Err(WsSubscriptionError::UnknownSubjectName( - subject_wildcard.to_string(), - )); - } - Ok(subject_name.to_string()) -} - -async fn close_socket_with_error( - e: WsSubscriptionError, - user_id: uuid::Uuid, - mut session: Session, - subject_wildcard: Option, - telemetry: Arc, -) { - tracing::error!("ws subscription error: {:?}", e.to_string()); - if let Some(subject_wildcard) = subject_wildcard { - telemetry.update_error_metrics(&subject_wildcard, &e.to_string()); - telemetry.update_unsubscribed(user_id, &subject_wildcard); - } - telemetry.decrement_subscriptions_count(); - send_message_to_socket(&mut session, ServerMessage::Error(e.to_string())) - .await; - let _ = session.close(None).await; -} - -async fn send_message_to_socket(session: &mut Session, message: ServerMessage) { - let data = serde_json::to_vec(&message).ok().unwrap_or_default(); - let _ = session.binary(data).await; -} - -async fn decode( - subject_wildcard: &str, - s3_payload: Vec, -) -> Result, WsSubscriptionError> { - let subject = verify_and_extract_subject_name(subject_wildcard)?; - match subject.as_str() { - Transaction::NAME => { - let entity = Transaction::decode_or_panic(s3_payload); - let serialized_data = serde_json::to_vec(&entity) - .map_err(WsSubscriptionError::UnparsablePayload)?; - stream_to_server_message(serialized_data) - } - Block::NAME => { - let entity = Block::decode_or_panic(s3_payload); - let serialized_data = serde_json::to_vec(&entity) - .map_err(WsSubscriptionError::UnparsablePayload)?; - stream_to_server_message(serialized_data) - } - Input::NAME => { - let entity = Input::decode_or_panic(s3_payload); - let serialized_data = serde_json::to_vec(&entity) - .map_err(WsSubscriptionError::UnparsablePayload)?; - stream_to_server_message(serialized_data) - } - Output::NAME => { - let entity = Output::decode_or_panic(s3_payload); - let serialized_data = serde_json::to_vec(&entity) - .map_err(WsSubscriptionError::UnparsablePayload)?; - stream_to_server_message(serialized_data) - } - Receipt::NAME => { - let entity = Receipt::decode_or_panic(s3_payload); - let serialized_data = serde_json::to_vec(&entity) - .map_err(WsSubscriptionError::UnparsablePayload)?; - stream_to_server_message(serialized_data) - } - Utxo::NAME => { - let entity = Utxo::decode_or_panic(s3_payload); - let serialized_data = serde_json::to_vec(&entity) - .map_err(WsSubscriptionError::UnparsablePayload)?; - stream_to_server_message(serialized_data) - } - Log::NAME => { - let entity = Log::decode_or_panic(s3_payload); - let serialized_data = serde_json::to_vec(&entity) - .map_err(WsSubscriptionError::UnparsablePayload)?; - stream_to_server_message(serialized_data) - } - _ => Err(WsSubscriptionError::UnknownSubjectName(subject.to_string())), - } -} diff --git a/crates/fuel-streams-ws/src/telemetry/elastic_search.rs b/crates/fuel-streams-ws/src/telemetry/elastic_search.rs deleted file mode 100755 index 905ef088..00000000 --- a/crates/fuel-streams-ws/src/telemetry/elastic_search.rs +++ /dev/null @@ -1,323 +0,0 @@ -// TODO: Consider using external lib for elasticsearch -// TODO: Consider modularizing this module further - -use std::{fs, io, path::PathBuf, sync::Arc}; - -use anyhow::Context; -use chrono::Utc; -use displaydoc::Display; -pub use elasticsearch::params::Refresh; -use elasticsearch::{ - self, - auth::{ClientCertificate, Credentials}, - cert::{Certificate, CertificateValidation}, - http::transport::{SingleNodeConnectionPool, Transport, TransportBuilder}, - params, - Elasticsearch, - IndexParts, -}; -use serde::{Deserialize, Serialize}; -use thiserror::Error; -use url::{self, Url}; - -pub const ELASTICSEARCH_PATH: &str = "fuel-data-systems"; - -/// LogEntry represents a log entry that will be stored in Elastic Search -/// for monitoring purposes. -/// TODO: Consider adding more useful optional fields to this struct -#[derive(Serialize, Deserialize)] -pub struct LogEntry { - timestamp: chrono::DateTime, - level: String, - message: String, -} - -impl LogEntry { - pub fn new(level: &str, message: &str) -> Self { - Self { - timestamp: Utc::now(), - level: level.to_string(), - message: message.to_string(), - } - } -} - -pub async fn log(elastic_search: Arc, log_entry: LogEntry) { - if let Err(err) = elastic_search - .get_conn() - .index( - ELASTICSEARCH_PATH, - Some("publisher-logs"), - &log_entry, - Some(Refresh::WaitFor), - ) - .await - { - tracing::error!("Failed to log to ElasticSearch: {}", err); - } -} - -pub fn should_use_elasticsearch() -> bool { - dotenvy::var("USE_ELASTIC_LOGGING").is_ok_and(|val| val == "true") -} - -pub async fn new_elastic_search() -> anyhow::Result { - let elasticsearch_url = dotenvy::var("ELASTICSEARCH_URL") - .expect("`ELASTICSEARCH_URL` env must be set"); - let elsaticsearch_username = dotenvy::var("ELASTICSEARCH_USERNAME") - .expect("`ELASTICSEARCH_USERNAME` env must be set"); - let elsaticsearch_password = dotenvy::var("ELASTICSEARCH_PASSWORD") - .expect("`ELASTICSEARCH_PASSWORD` env must be set"); - - let config = Config { - url: elasticsearch_url, - enabled: true, - pool_max_size: Some(2), - username: Some(elsaticsearch_username), - password: Some(elsaticsearch_password), - ..Default::default() - }; - let client = ElasticSearch::new(&config) - .await - .context("Failed to configure Elasticsearch connection")?; - Ok(client) -} - -/// Elasticsearch errors -#[derive(Debug, Display, Error)] -pub enum ElasticSearchError { - /// ElasticSearchConfigError: `{0}` - Config(#[from] elasticsearch::http::transport::BuildError), - /// ElasticSearchDisabled - Disabled, - /// ElasticSearchError: `{0}` - Generic(#[from] elasticsearch::Error), - /// IoError: `{0}` - Io(#[from] io::Error), - /// UrlParseError: `{0}` - UrlParse(#[from] url::ParseError), - /// CertificateError: `{0}`: `{0}` - Certificate(PathBuf, io::Error), - /// SerdeJsonError: `{0}` - SerdeJson(#[from] serde_json::Error), -} - -#[derive(Debug, Deserialize, Clone, Default)] -#[serde(rename_all = "kebab-case")] -#[serde(default)] -pub struct Config { - pub url: String, - pub enabled: bool, - pub username: Option, - pub password: Option, - pub api_key_id: Option, - pub api_key_value: Option, - pub pool_max_size: Option, - pub pool_min_size: Option, - pub tls: Option, -} - -/// TLS acceptor configuration. -#[derive(Debug, Deserialize, Clone, Default)] -#[serde(rename_all = "kebab-case")] -#[serde(default)] -pub struct TlsConfig { - /// Filename of CA certificates in PEM format. - pub ca: Option, - /// Filename of combined TLS client certificate and key in PKCS#12 format. - pub certificate: Option, - /// Optional passphrase to decode the TLS private key. - pub key_passphrase: Option, -} - -#[derive(Debug, Clone)] -pub struct ElasticSearch(ElasticConnection); - -impl ElasticSearch { - pub async fn new(config: &Config) -> Result { - if !config.enabled { - return Err(ElasticSearchError::Disabled); - } - let conn_info = ConnectionInfo::new(config)?; - let conn = conn_info - .get_connection() - .expect("connection must be created"); - Ok(Self(conn)) - } - - pub fn get_conn(&self) -> &ElasticConnection { - &self.0 - } -} - -#[derive(Clone, Debug, PartialEq, Deserialize)] -pub struct BulkResults { - pub errors: bool, - #[serde(rename = "items")] - pub results: Vec>, -} - -#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)] -#[serde(rename_all = "lowercase")] -pub enum Operation { - Create(T), - Delete(T), - Index(T), - Update(T), -} - -#[derive(Clone, Debug, Default, PartialEq, Serialize)] -pub struct OperationParams { - #[serde(rename = "_id", skip_serializing_if = "Option::is_none")] - id: Option, - #[serde(rename = "_index", skip_serializing_if = "Option::is_none")] - index: Option, - #[serde(skip_serializing_if = "Option::is_none")] - version: Option, - #[serde(skip_serializing_if = "Option::is_none")] - version_type: Option, -} - -#[derive(Clone, Debug, PartialEq, Deserialize)] -pub struct OperationStatus { - #[serde(rename = "_id")] - pub id: Option, - #[serde(rename = "_index")] - pub index: Option, - #[serde(rename = "status")] - pub http_code: u32, - #[serde(flatten)] - pub result: OperationResult, -} - -#[derive(Clone, Debug, PartialEq, Deserialize)] -pub enum OperationResult { - #[serde(rename = "result")] - Ok(String), - #[serde(rename = "error")] - Error { - #[serde(rename = "type")] - kind: String, - reason: String, - }, -} - -#[derive(Clone, Debug)] -pub struct ConnectionInfo(Transport); - -impl ConnectionInfo { - pub fn new(config: &Config) -> Result { - let url = Url::parse(&config.url)?; - let pool = SingleNodeConnectionPool::new(url); - let transport = TransportBuilder::new(pool); - let tls = config.tls.clone().unwrap_or_default(); - let credentials = match ( - config.api_key_id.as_ref(), - config.api_key_value.as_ref(), - tls.certificate, - ) { - (Some(api_key_id), Some(api_key_value), _) => Some( - Credentials::ApiKey(api_key_id.into(), api_key_value.into()), - ), - (_, _, Some(certificate)) => { - Some(Credentials::Certificate(ClientCertificate::Pkcs12( - fs::read(&certificate).map_err(|err| { - ElasticSearchError::Certificate(certificate, err) - })?, - tls.key_passphrase, - ))) - } - _ => config.username.as_ref().map(|username| { - Credentials::Basic( - username.into(), - config.password.clone().unwrap_or_default(), - ) - }), - }; - let transport = if let Some(ca) = tls.ca { - transport.cert_validation(CertificateValidation::Full( - Certificate::from_pem(&fs::read(&ca).map_err(|err| { - ElasticSearchError::Certificate(ca.clone(), err) - })?) - .map_err(|err| { - ElasticSearchError::Certificate( - ca, - io::Error::new(io::ErrorKind::Other, err), - ) - })?, - )) - } else { - transport - }; - let transport = if let Some(credentials) = credentials { - transport.auth(credentials) - } else { - transport - }; - let inner = transport.build()?; - Ok(Self(inner)) - } - - pub fn get_connection( - &self, - ) -> Result { - let conn = Elasticsearch::new(self.0.clone()); - Ok(ElasticConnection(Some(conn))) - } -} - -#[derive(Debug, Clone)] -pub struct ElasticConnection(Option); - -impl ElasticConnection { - pub fn check_alive(&self) -> Option { - Some(self.0.is_some()) - } - - pub async fn ping(&self) -> Result<(), ElasticSearchError> { - let conn = self.0.as_ref().ok_or_else(|| { - io::Error::new( - io::ErrorKind::ConnectionAborted, - "Connection to Elasticsearch is already closed", - ) - })?; - - let response = conn.ping().send().await?; - let _ = response.error_for_status_code()?; - Ok(()) - } -} - -impl ElasticConnection { - pub async fn index( - &self, - path: &str, - id: Option<&str>, - doc: B, - refresh: Option, - ) -> Result<(), ElasticSearchError> - where - B: Serialize, - { - let conn = self.0.as_ref().ok_or_else(|| { - io::Error::new( - io::ErrorKind::ConnectionAborted, - "Connection to Elasticsearch is already closed", - ) - })?; - let index_parts = id - .map(|id| IndexParts::IndexId(path, id)) - .unwrap_or(IndexParts::Index(path)); - - let response = conn - .index(index_parts) - .body(doc) - .refresh(refresh.unwrap_or(Refresh::False)) - .send() - .await?; - response - .error_for_status_code() - .map(|_| ()) - .map_err(Into::into) - } -} diff --git a/crates/fuel-streams-ws/src/telemetry/runtime.rs b/crates/fuel-streams-ws/src/telemetry/runtime.rs deleted file mode 100644 index 6c3fc637..00000000 --- a/crates/fuel-streams-ws/src/telemetry/runtime.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::{ - collections::VecDeque, - pin::Pin, - sync::{Arc, Mutex}, -}; - -use futures::Future; -use tokio::time::{self, Duration}; - -// Task type: Each task is represented by a Boxed, pinned Future -type Task = Pin + Send + 'static>>; - -#[derive(Clone)] -pub struct Runtime { - task_queue: Arc>>, - max_capacity: usize, - interval: Duration, -} - -impl Runtime { - pub fn new(capacity: usize, interval: Duration) -> Self { - Self { - task_queue: Arc::new(Mutex::new(VecDeque::with_capacity(capacity))), - max_capacity: capacity, - interval, - } - } - - pub fn spawn(&self, task: F) - where - F: Future + Send + 'static, - { - let mut queue = self.task_queue.lock().unwrap(); - - // If the queue is at capacity, discard the oldest task - if queue.len() >= self.max_capacity { - queue.pop_front(); - } - - queue.push_back(Box::pin(task)); - } - - pub fn start(&self, blocking_task_executor: F) - where - F: FnOnce() + Send + 'static + Clone, - { - let interval = self.interval; - let task_queue = Arc::clone(&self.task_queue); - - tokio::spawn(async move { - let mut ticker = time::interval(interval); - - loop { - // Wait for the interval - ticker.tick().await; - - tokio::task::spawn_blocking(blocking_task_executor.clone()); - - // Lock the queue, drain tasks, and run them sequentially - let tasks: Vec<_> = { - let mut queue = task_queue.lock().unwrap(); - queue.drain(..).collect() - }; - - // Run each task sequentially - for task in tasks { - task.await; - } - } - }); - } -} diff --git a/crates/fuel-streams/src/client/client_impl.rs b/crates/fuel-streams/src/client/client_impl.rs index 620d1a7d..b2d9deb1 100644 --- a/crates/fuel-streams/src/client/client_impl.rs +++ b/crates/fuel-streams/src/client/client_impl.rs @@ -36,12 +36,24 @@ impl Client { /// # } /// ``` pub async fn connect(network: FuelNetwork) -> Result { - let nats_opts = NatsClientOpts::new(network); + let nats_opts = + NatsClientOpts::public_opts().with_url(network.to_nats_url()); let nats_client = NatsClient::connect(&nats_opts) .await .map_err(ClientError::NatsConnectionFailed)?; - let s3_client_opts = S3ClientOpts::new(network); + let s3_client_opts = match network { + FuelNetwork::Local => { + S3ClientOpts::new(S3Env::Local, S3Role::Admin) + } + FuelNetwork::Testnet => { + S3ClientOpts::new(S3Env::Testnet, S3Role::Public) + } + FuelNetwork::Mainnet => { + S3ClientOpts::new(S3Env::Mainnet, S3Role::Public) + } + }; + let s3_client = S3Client::new(&s3_client_opts) .await .map_err(ClientError::S3ConnectionFailed)?; @@ -67,11 +79,11 @@ impl Client { /// ```no_run /// use fuel_streams::client::{Client, FuelNetwork}; /// use fuel_streams_core::nats::NatsClientOpts; - /// use fuel_streams_core::s3::S3ClientOpts; + /// use fuel_streams_core::s3::{S3ClientOpts, S3Env, S3Role}; /// /// # async fn example() -> Result<(), fuel_streams::Error> { - /// let nats_opts = NatsClientOpts::new(FuelNetwork::Local); - /// let s3_opts = S3ClientOpts::new(FuelNetwork::Local); + /// let nats_opts = NatsClientOpts::public_opts().with_url("nats://localhost:4222"); + /// let s3_opts = S3ClientOpts::new(S3Env::Local, S3Role::Admin); /// /// let client = Client::with_opts(&nats_opts, &s3_opts).await?; /// # Ok(()) diff --git a/crates/sv-consumer/Cargo.toml b/crates/sv-consumer/Cargo.toml new file mode 100644 index 00000000..50d1e17e --- /dev/null +++ b/crates/sv-consumer/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "sv-consumer" +description = "Service that consumers new blocks from the emitter" +authors = { workspace = true } +keywords = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +version = { workspace = true } +rust-version = { workspace = true } +publish = false + +[[bin]] +name = "sv-consumer" +path = "src/main.rs" + +[dependencies] +anyhow = { workspace = true } +async-nats = { workspace = true } +clap = { workspace = true } +dotenvy = { workspace = true } +fuel-core = { workspace = true, default-features = false, features = ["p2p", "relayer", "rocksdb"] } +fuel-streams-core = { workspace = true, features = ["test-helpers"] } +fuel-streams-executors = { workspace = true, features = ["test-helpers"] } +futures = { workspace = true } +num_cpus = { workspace = true } +serde_json = { workspace = true } +sv-publisher = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +tokio-util = "0.7.13" +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["local-time"] } + +[features] +default = [] +test-helpers = [] + +[target.x86_64-unknown-linux-gnu.dependencies] +openssl = { version = "0.10.68", features = ["vendored"] } + +[target.x86_64-unknown-linux-musl.dependencies] +openssl = { version = "0.10.68", features = ["vendored"] } + +[target.aarch64-unknown-linux-gnu.dependencies] +openssl = { version = "0.10.68", features = ["vendored"] } + +[target.aarch64-unknown-linux-musl.dependencies] +openssl = { version = "0.10.68", features = ["vendored"] } diff --git a/crates/sv-consumer/src/cli.rs b/crates/sv-consumer/src/cli.rs new file mode 100644 index 00000000..6b51f6bf --- /dev/null +++ b/crates/sv-consumer/src/cli.rs @@ -0,0 +1,22 @@ +use clap::Parser; + +#[derive(Clone, Parser)] +pub struct Cli { + /// Fuel Network to connect to. + #[arg( + long, + value_name = "NATS_URL", + env = "NATS_URL", + default_value = "localhost:4222", + help = "NATS URL to connect to." + )] + pub nats_url: String, + #[arg( + long, + value_name = "NATS_PUBLISHER_URL", + env = "NATS_PUBLISHER_URL", + default_value = "localhost:4333", + help = "NATS Publisher URL to connect to." + )] + pub nats_publisher_url: String, +} diff --git a/crates/sv-consumer/src/lib.rs b/crates/sv-consumer/src/lib.rs new file mode 100644 index 00000000..1cddb9ed --- /dev/null +++ b/crates/sv-consumer/src/lib.rs @@ -0,0 +1,33 @@ +use std::sync::Arc; + +use fuel_streams_core::prelude::*; + +pub mod cli; + +#[derive(Debug, Clone, Default)] +pub enum Client { + #[default] + Core, + Publisher, +} + +impl Client { + pub fn url(&self, cli: &cli::Cli) -> String { + match self { + Client::Core => cli.nats_url.clone(), + Client::Publisher => cli.nats_publisher_url.clone(), + } + } + pub async fn create( + &self, + cli: &cli::Cli, + ) -> Result, NatsError> { + let url = self.url(cli); + let opts = NatsClientOpts::admin_opts() + .with_url(url) + .with_domain("CORE".to_string()) + .with_user("admin".to_string()) + .with_password("admin".to_string()); + Ok(Arc::new(NatsClient::connect(&opts).await?)) + } +} diff --git a/crates/sv-consumer/src/main.rs b/crates/sv-consumer/src/main.rs new file mode 100644 index 00000000..3e4fb6b2 --- /dev/null +++ b/crates/sv-consumer/src/main.rs @@ -0,0 +1,220 @@ +use std::{ + env, + sync::{Arc, LazyLock}, + time::Duration, +}; + +use async_nats::jetstream::{ + consumer::{ + pull::{BatchErrorKind, Config as ConsumerConfig}, + Consumer, + }, + context::CreateStreamErrorKind, + stream::{ConsumerErrorKind, RetentionPolicy}, +}; +use clap::Parser; +use fuel_streams_core::prelude::*; +use fuel_streams_executors::*; +use futures::{future::try_join_all, stream::FuturesUnordered, StreamExt}; +use sv_consumer::{cli::Cli, Client}; +use sv_publisher::shutdown::ShutdownController; +use tokio_util::sync::CancellationToken; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::fmt::time; + +#[derive(thiserror::Error, Debug)] +pub enum ConsumerError { + #[error("Failed to receive batch of messages from NATS: {0}")] + BatchStream(#[from] async_nats::error::Error), + + #[error("Failed to create stream: {0}")] + CreateStream(#[from] async_nats::error::Error), + + #[error("Failed to create consumer: {0}")] + CreateConsumer(#[from] async_nats::error::Error), + + #[error("Failed to connect to NATS client: {0}")] + NatsClient(#[from] NatsError), + + #[error("Failed to communicate with NATS server: {0}")] + Nats(#[from] async_nats::Error), + + #[error("Failed to deserialize block payload from message: {0}")] + Deserialization(#[from] serde_json::Error), + + #[error("Failed to decode UTF-8: {0}")] + Utf8(#[from] std::str::Utf8Error), + + #[error("Failed to execute executor tasks: {0}")] + Executor(#[from] ExecutorError), + + #[error("Failed to join tasks: {0}")] + JoinTasks(#[from] tokio::task::JoinError), + + #[error("Failed to acquire semaphore: {0}")] + Semaphore(#[from] tokio::sync::AcquireError), + + #[error("Failed to setup S3 client: {0}")] + S3(#[from] S3ClientError), +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // Initialize tracing subscriber + tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env_lossy(), + ) + .with_timer(time::LocalTime::rfc_3339()) + .with_target(false) + .with_thread_ids(false) + .with_file(true) + .with_line_number(true) + .with_level(true) + .init(); + + if let Err(err) = dotenvy::dotenv() { + tracing::warn!("File .env not found: {:?}", err); + } + + let cli = Cli::parse(); + let shutdown = Arc::new(ShutdownController::new()); + shutdown.clone().spawn_signal_handler(); + + tracing::info!("Consumer started. Waiting for messages..."); + tokio::select! { + result = async { + process_messages(&cli, shutdown.token()) + .await + } => { + result?; + tracing::info!("Processing complete"); + } + _ = shutdown.wait_for_shutdown() => { + tracing::info!("Shutdown signal received"); + } + }; + + tracing::info!("Shutdown complete"); + Ok(()) +} + +async fn setup_s3() -> Result, ConsumerError> { + let s3_client_opts = S3ClientOpts::admin_opts(); + let s3_client = S3Client::new(&s3_client_opts).await?; + Ok(Arc::new(s3_client)) +} + +async fn setup_nats( + cli: &Cli, +) -> Result< + (Arc, Arc, Consumer), + ConsumerError, +> { + let core_client = Client::Core.create(cli).await?; + let publisher_client = Client::Publisher.create(cli).await?; + let stream_name = publisher_client.namespace.stream_name("block_importer"); + let stream = publisher_client + .jetstream + .get_or_create_stream(async_nats::jetstream::stream::Config { + name: stream_name, + subjects: vec!["block_submitted.>".to_string()], + retention: RetentionPolicy::WorkQueue, + duplicate_window: Duration::from_secs(1), + allow_rollup: true, + ..Default::default() + }) + .await?; + + let consumer = stream + .get_or_create_consumer("block_importer", ConsumerConfig { + durable_name: Some("block_importer".to_string()), + ack_policy: AckPolicy::Explicit, + ..Default::default() + }) + .await?; + + Ok((core_client, publisher_client, consumer)) +} + +pub static CONSUMER_MAX_THREADS: LazyLock = LazyLock::new(|| { + let available_cpus = num_cpus::get(); + env::var("CONSUMER_MAX_THREADS") + .ok() + .and_then(|val| val.parse().ok()) + .unwrap_or(available_cpus) +}); + +async fn process_messages( + cli: &Cli, + token: &CancellationToken, +) -> Result<(), ConsumerError> { + let (core_client, publisher_client, consumer) = setup_nats(cli).await?; + let s3_client = setup_s3().await?; + let (_, publisher_stream) = + FuelStreams::setup_all(&core_client, &publisher_client, &s3_client) + .await; + + let fuel_streams: Arc = publisher_stream.arc(); + let semaphore = Arc::new(tokio::sync::Semaphore::new(64)); + while !token.is_cancelled() { + let mut messages = + consumer.fetch().max_messages(100).messages().await?.fuse(); + let mut futs = FuturesUnordered::new(); + while let Some(msg) = messages.next().await { + let msg = msg?; + let fuel_streams = fuel_streams.clone(); + let semaphore = semaphore.clone(); + let future = async move { + let msg_str = std::str::from_utf8(&msg.payload)?; + let payload = Arc::new(BlockPayload::decode(msg_str)?); + let start_time = std::time::Instant::now(); + let futures = Executor::::process_all( + payload.clone(), + &fuel_streams, + &semaphore, + ); + let results = try_join_all(futures).await?; + let end_time = std::time::Instant::now(); + msg.ack().await?; + Ok::<_, ConsumerError>((results, start_time, end_time, payload)) + }; + futs.push(future); + } + while let Some(result) = futs.next().await { + let (results, start_time, end_time, payload) = result?; + log_task(results, start_time, end_time, payload); + } + } + Ok(()) +} + +fn log_task( + res: Vec>, + start_time: std::time::Instant, + end_time: std::time::Instant, + payload: Arc, +) { + let height = payload.metadata().clone().block_height; + let has_error = res.iter().any(|r| r.is_err()); + let errors = res + .iter() + .filter_map(|r| r.as_ref().err()) + .collect::>(); + + let elapsed = end_time.duration_since(start_time); + if has_error { + tracing::error!( + "Block {height} published with errors in {:?}", + elapsed + ); + tracing::debug!("Errors: {:?}", errors); + } else { + tracing::info!( + "Block {height} published successfully in {:?}", + elapsed + ); + } +} diff --git a/crates/sv-publisher/Cargo.toml b/crates/sv-publisher/Cargo.toml new file mode 100644 index 00000000..fdf306e6 --- /dev/null +++ b/crates/sv-publisher/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "sv-publisher" +description = "Service that emitts new blocks using fuel-core block subscription" +authors = { workspace = true } +keywords = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +repository = { workspace = true } +version = { workspace = true } +rust-version = { workspace = true } +publish = false + +[[bin]] +name = "sv-publisher" +path = "src/main.rs" + +[dependencies] +anyhow = { workspace = true } +async-nats = { workspace = true } +clap = { workspace = true } +fuel-core = { workspace = true, default-features = false, features = ["p2p", "relayer", "rocksdb"] } +fuel-core-bin = { workspace = true, default-features = false, features = [ + "p2p", + "relayer", + "rocksdb", +] } +fuel-core-types = { workspace = true, default-features = false, features = ["std", "serde"] } +fuel-streams-core = { workspace = true, features = ["test-helpers"] } +fuel-streams-executors = { workspace = true, features = ["test-helpers"] } +futures = { workspace = true } +thiserror = { workspace = true } +tokio = { workspace = true } +tokio-util = "0.7.13" +tracing = { workspace = true } + +[features] +default = [] +test-helpers = [] + +[target.x86_64-unknown-linux-gnu.dependencies] +openssl = { version = "0.10.68", features = ["vendored"] } + +[target.x86_64-unknown-linux-musl.dependencies] +openssl = { version = "0.10.68", features = ["vendored"] } + +[target.aarch64-unknown-linux-gnu.dependencies] +openssl = { version = "0.10.68", features = ["vendored"] } + +[target.aarch64-unknown-linux-musl.dependencies] +openssl = { version = "0.10.68", features = ["vendored"] } diff --git a/crates/fuel-streams-publisher/src/cli.rs b/crates/sv-publisher/src/cli.rs similarity index 58% rename from crates/fuel-streams-publisher/src/cli.rs rename to crates/sv-publisher/src/cli.rs index 7363eb81..4573fedd 100644 --- a/crates/fuel-streams-publisher/src/cli.rs +++ b/crates/sv-publisher/src/cli.rs @@ -12,21 +12,13 @@ pub struct Cli { /// Flattened command structure for Fuel Core configuration. #[command(flatten)] pub fuel_core_config: fuel_core_bin::cli::run::Command, - /// Http server address + /// Fuel Network to connect to. #[arg( long, - value_name = "TPORT", - env = "TELEMETRY_PORT", - default_value = "8080", - help = "Port for the Actix Web server to bind telemetry to." + value_name = "NATS_URL", + env = "NATS_URL", + default_value = "localhost:4222", + help = "NATS URL to connect to." )] - pub telemetry_port: u16, - #[arg( - long, - value_name = "HISTORICAL", - env = "HISTORICAL", - default_value = "false", - help = "Whether to publish historical data to NATS" - )] - pub historical: bool, + pub nats_url: String, } diff --git a/crates/sv-publisher/src/lib.rs b/crates/sv-publisher/src/lib.rs new file mode 100644 index 00000000..5bf4a4b0 --- /dev/null +++ b/crates/sv-publisher/src/lib.rs @@ -0,0 +1,2 @@ +pub mod cli; +pub mod shutdown; diff --git a/crates/sv-publisher/src/main.rs b/crates/sv-publisher/src/main.rs new file mode 100644 index 00000000..38b1d491 --- /dev/null +++ b/crates/sv-publisher/src/main.rs @@ -0,0 +1,213 @@ +use std::{sync::Arc, time::Duration}; + +use async_nats::jetstream::{ + context::PublishErrorKind, + stream::RetentionPolicy, + Context, +}; +use clap::Parser; +use fuel_core_types::blockchain::SealedBlock; +use fuel_streams_core::prelude::*; +use fuel_streams_executors::*; +use futures::StreamExt; +use sv_publisher::{cli::Cli, shutdown::ShutdownController}; +use thiserror::Error; +use tokio_util::sync::CancellationToken; + +#[derive(Error, Debug)] +pub enum LiveBlockProcessingError { + #[error("Failed to publish block: {0}")] + PublishError(#[from] PublishError), + + #[error("Processing was cancelled")] + Cancelled, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let cli = Cli::parse(); + let config = cli.fuel_core_config; + let fuel_core: Arc = FuelCore::new(config).await?; + fuel_core.start().await?; + + let s3_client = setup_s3().await?; + let nats_client = setup_nats(&cli.nats_url).await?; + let last_block_height = Arc::new(fuel_core.get_latest_block_height()?); + let last_published = + Arc::new(find_last_published_height(&nats_client, &s3_client).await?); + + let shutdown = Arc::new(ShutdownController::new()); + shutdown.clone().spawn_signal_handler(); + + tracing::info!("Last published height: {}", last_published); + tracing::info!("Last block height: {}", last_block_height); + tokio::select! { + result = async { + let historical = process_historical_blocks( + &nats_client, + fuel_core.clone(), + last_block_height, + last_published, + shutdown.token().clone(), + ); + + let live = process_live_blocks( + &nats_client.jetstream, + fuel_core.clone(), + shutdown.token().clone(), + ); + + tokio::join!(historical, live) + } => { + result.0?; + result.1?; + } + _ = shutdown.wait_for_shutdown() => { + tracing::info!("Shutdown signal received, waiting for processing to complete..."); + fuel_core.stop().await + } + } + + tracing::info!("Shutdown complete"); + Ok(()) +} + +async fn setup_s3() -> anyhow::Result> { + let s3_client_opts = S3ClientOpts::admin_opts(); + let s3_client = S3Client::new(&s3_client_opts).await?; + Ok(Arc::new(s3_client)) +} + +async fn setup_nats(nats_url: &str) -> anyhow::Result { + let opts = NatsClientOpts::admin_opts() + .with_url(nats_url.to_string()) + .with_domain("CORE".to_string()); + let nats_client = NatsClient::connect(&opts).await?; + let stream_name = nats_client.namespace.stream_name("block_importer"); + nats_client + .jetstream + .get_or_create_stream(async_nats::jetstream::stream::Config { + name: stream_name, + subjects: vec!["block_submitted.>".to_string()], + retention: RetentionPolicy::WorkQueue, + duplicate_window: Duration::from_secs(1), + ..Default::default() + }) + .await?; + + Ok(nats_client) +} + +async fn find_last_published_height( + nats_client: &NatsClient, + s3_client: &Arc, +) -> anyhow::Result { + let block_stream = + Stream::::get_or_init(nats_client, s3_client).await; + let last_publish_height = block_stream + .get_last_published(BlocksSubject::WILDCARD) + .await?; + match last_publish_height { + Some(block) => Ok(block.height), + None => Ok(0), + } +} + +fn get_historical_block_range( + last_published_height: Arc, + last_block_height: Arc, +) -> Option> { + let last_published_height = *last_published_height; + let last_block_height = *last_block_height; + let start_height = last_published_height + 1; + let end_height = last_block_height; + if start_height > end_height { + tracing::info!("No historical blocks to process"); + return None; + } + let block_count = end_height - start_height + 1; + let heights: Vec = (start_height..=end_height).collect(); + tracing::info!( + "Processing {block_count} historical blocks from height {start_height} to {end_height}" + ); + Some(heights) +} + +fn process_historical_blocks( + nats_client: &NatsClient, + fuel_core: Arc, + last_block_height: Arc, + last_published_height: Arc, + token: CancellationToken, +) -> tokio::task::JoinHandle<()> { + let jetstream = nats_client.jetstream.clone(); + tokio::spawn(async move { + let Some(heights) = get_historical_block_range( + last_published_height, + last_block_height, + ) else { + return; + }; + futures::stream::iter(heights) + .map(|height| { + let jetstream = jetstream.clone(); + let fuel_core = fuel_core.clone(); + let sealed_block = fuel_core.get_sealed_block_by_height(height); + let sealed_block = Arc::new(sealed_block); + async move { + publish_block(&jetstream, &fuel_core, &sealed_block).await + } + }) + .buffer_unordered(100) + .take_until(token.cancelled()) + .collect::>() + .await; + }) +} + +async fn process_live_blocks( + jetstream: &Context, + fuel_core: Arc, + token: CancellationToken, +) -> Result<(), LiveBlockProcessingError> { + let mut subscription = fuel_core.blocks_subscription(); + while let Ok(data) = subscription.recv().await { + if token.is_cancelled() { + break; + } + let sealed_block = Arc::new(data.sealed_block.clone()); + publish_block(jetstream, &fuel_core, &sealed_block).await?; + } + Ok(()) +} + +#[derive(Error, Debug)] +pub enum PublishError { + #[error("Failed to publish block to NATS server: {0}")] + NatsPublish(#[from] async_nats::error::Error), + + #[error("Failed to create block payload due to: {0}")] + BlockPayload(#[from] ExecutorError), + + #[error("Failed to access offchain database: {0}")] + OffchainDatabase(String), +} + +async fn publish_block( + jetstream: &Context, + fuel_core: &Arc, + sealed_block: &Arc, +) -> Result<(), PublishError> { + let metadata = Metadata::new(fuel_core, sealed_block); + let fuel_core = Arc::clone(fuel_core); + let payload = BlockPayload::new(fuel_core, sealed_block, &metadata)?; + jetstream + .send_publish(payload.subject(), payload.to_owned().try_into()?) + .await + .map_err(PublishError::NatsPublish)? + .await + .map_err(PublishError::NatsPublish)?; + + tracing::info!("New block submitted: {}", payload.block_height()); + Ok(()) +} diff --git a/crates/sv-publisher/src/shutdown.rs b/crates/sv-publisher/src/shutdown.rs new file mode 100644 index 00000000..6d66e7b1 --- /dev/null +++ b/crates/sv-publisher/src/shutdown.rs @@ -0,0 +1,104 @@ +use std::sync::Arc; + +use tokio_util::sync::CancellationToken; + +#[derive(Clone)] +pub struct ShutdownController { + token: CancellationToken, +} + +impl Default for ShutdownController { + fn default() -> Self { + Self::new() + } +} + +impl ShutdownController { + pub fn new() -> Self { + Self { + token: CancellationToken::new(), + } + } + + pub fn token(&self) -> &CancellationToken { + &self.token + } + + pub fn spawn_signal_handler(self: Arc) -> Arc { + tokio::spawn({ + let shutdown = self.clone(); + async move { + tokio::signal::ctrl_c() + .await + .expect("Failed to listen for ctrl+c"); + tracing::info!("Received shutdown signal"); + shutdown.initiate_shutdown(); + } + }); + self + } + + pub fn initiate_shutdown(&self) { + tracing::info!("Initiating graceful shutdown..."); + self.token.cancel(); + } + + pub fn is_shutdown_initiated(&self) -> bool { + self.token.is_cancelled() + } + + pub async fn wait_for_shutdown(&self) { + self.token.cancelled().await; + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use super::*; + + #[tokio::test] + async fn test_manual_shutdown() { + let controller = ShutdownController::new(); + assert!( + !controller.is_shutdown_initiated(), + "Controller should not be shutdown initially" + ); + + controller.initiate_shutdown(); + assert!( + controller.is_shutdown_initiated(), + "Controller should be shutdown after initiation" + ); + } + + #[tokio::test] + async fn test_wait_for_shutdown_timeout() { + let controller = ShutdownController::new(); + + let timeout = Duration::from_millis(50); + let result = + tokio::time::timeout(timeout, controller.wait_for_shutdown()).await; + + assert!( + result.is_err(), + "wait_for_shutdown should not complete without initiation" + ); + } + + #[tokio::test] + async fn test_clone_behavior() { + let controller = ShutdownController::new(); + let cloned = controller.clone(); + + // Initiate shutdown from clone + cloned.initiate_shutdown(); + + assert!( + controller.is_shutdown_initiated(), + "Original should be shutdown" + ); + assert!(cloned.is_shutdown_initiated(), "Clone should be shutdown"); + } +} diff --git a/crates/fuel-streams-ws/Cargo.toml b/crates/sv-webserver/Cargo.toml similarity index 95% rename from crates/fuel-streams-ws/Cargo.toml rename to crates/sv-webserver/Cargo.toml index 86c66c2d..e1f77ddd 100644 --- a/crates/fuel-streams-ws/Cargo.toml +++ b/crates/sv-webserver/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "fuel-streams-ws" +name = "sv-webserver" description = "Fuel library for streaming data from nats and storage" authors = { workspace = true } keywords = { workspace = true } @@ -19,22 +19,21 @@ actix-web = { workspace = true } actix-ws = "0.3.0" anyhow = { workspace = true } async-nats = { workspace = true } -async-trait = { workspace = true } bytestring = "1.4.0" chrono = { workspace = true } clap = { workspace = true } -confy = "0.6" derive_more = { version = "1.0", features = ["full"] } displaydoc = { workspace = true } dotenvy = { workspace = true } elasticsearch = "8.15.0-alpha.1" fuel-streams = { workspace = true, features = ["test-helpers"] } fuel-streams-core = { workspace = true, features = ["test-helpers"] } +fuel-streams-nats = { workspace = true, features = ["test-helpers"] } fuel-streams-storage = { workspace = true, features = ["test-helpers"] } futures = { workspace = true } futures-util = { workspace = true } jsonwebtoken = "9.3.0" -num_cpus = "1.16" +num_cpus = { workspace = true } parking_lot = { version = "0.12", features = ["serde"] } prometheus = { version = "0.13", features = ["process"] } rand = { workspace = true } @@ -48,7 +47,6 @@ thiserror = "2.0" time = { version = "0.3", features = ["serde"] } tokio = { workspace = true } tokio-tungstenite = "0.24.0" -toml = "0.8.19" tracing = { workspace = true } tracing-actix-web = { workspace = true } tracing-subscriber = { version = "0.3.19", features = ["env-filter"] } diff --git a/crates/fuel-streams-ws/README.md b/crates/sv-webserver/README.md similarity index 97% rename from crates/fuel-streams-ws/README.md rename to crates/sv-webserver/README.md index 5c60af56..cd717d7c 100644 --- a/crates/fuel-streams-ws/README.md +++ b/crates/sv-webserver/README.md @@ -16,7 +16,7 @@

- 📚 Documentation + 📚 Documentation   🐛 Report Bug   diff --git a/crates/fuel-streams-ws/src/cli.rs b/crates/sv-webserver/src/cli.rs similarity index 95% rename from crates/fuel-streams-ws/src/cli.rs rename to crates/sv-webserver/src/cli.rs index 4aed7400..37cad8bd 100644 --- a/crates/fuel-streams-ws/src/cli.rs +++ b/crates/sv-webserver/src/cli.rs @@ -7,11 +7,11 @@ pub struct Cli { #[arg( long, value_name = "PORT", - env = "API_PORT", + env = "PORT", default_value = "9003", help = "Port number for the API server" )] - pub api_port: u16, + pub port: u16, /// NATS URL #[arg( diff --git a/crates/fuel-streams-ws/src/client/mod.rs b/crates/sv-webserver/src/client/mod.rs similarity index 100% rename from crates/fuel-streams-ws/src/client/mod.rs rename to crates/sv-webserver/src/client/mod.rs diff --git a/crates/fuel-streams-ws/src/config.rs b/crates/sv-webserver/src/config.rs similarity index 97% rename from crates/fuel-streams-ws/src/config.rs rename to crates/sv-webserver/src/config.rs index eeb01ce0..eeaada1b 100644 --- a/crates/fuel-streams-ws/src/config.rs +++ b/crates/sv-webserver/src/config.rs @@ -54,7 +54,7 @@ impl Config { fn from_cli(cli: &crate::cli::Cli) -> Result { Ok(Config { api: ApiConfig { - port: cli.api_port, + port: cli.port, tls: None, }, auth: AuthConfig { diff --git a/crates/fuel-streams-ws/src/lib.rs b/crates/sv-webserver/src/lib.rs similarity index 100% rename from crates/fuel-streams-ws/src/lib.rs rename to crates/sv-webserver/src/lib.rs diff --git a/crates/fuel-streams-ws/src/main.rs b/crates/sv-webserver/src/main.rs similarity index 93% rename from crates/fuel-streams-ws/src/main.rs rename to crates/sv-webserver/src/main.rs index e629bd31..253f6fb3 100644 --- a/crates/fuel-streams-ws/src/main.rs +++ b/crates/sv-webserver/src/main.rs @@ -1,4 +1,4 @@ -use fuel_streams_ws::{ +use sv_webserver::{ config::Config, server::{api::create_api, context::Context, state::ServerState}, }; @@ -18,7 +18,7 @@ async fn main() -> anyhow::Result<()> { .init(); if let Err(err) = dotenvy::dotenv() { - tracing::error!("File .env not found: {:?}", err); + tracing::warn!("File .env not found: {:?}", err); } let config = Config::load()?; diff --git a/crates/fuel-streams-ws/src/server/api.rs b/crates/sv-webserver/src/server/api.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/api.rs rename to crates/sv-webserver/src/server/api.rs diff --git a/crates/fuel-streams-ws/src/server/auth.rs b/crates/sv-webserver/src/server/auth.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/auth.rs rename to crates/sv-webserver/src/server/auth.rs diff --git a/crates/fuel-streams-ws/src/server/context.rs b/crates/sv-webserver/src/server/context.rs similarity index 66% rename from crates/fuel-streams-ws/src/server/context.rs rename to crates/sv-webserver/src/server/context.rs index 7bda3220..c0af7476 100644 --- a/crates/fuel-streams-ws/src/server/context.rs +++ b/crates/sv-webserver/src/server/context.rs @@ -3,11 +3,7 @@ use std::{sync::Arc, time::Duration}; use fuel_streams_core::prelude::*; use fuel_streams_storage::S3Client; -use crate::{ - config::Config, - server::ws::fuel_streams::FuelStreams, - telemetry::Telemetry, -}; +use crate::{config::Config, telemetry::Telemetry}; pub const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(90); @@ -23,8 +19,9 @@ pub struct Context { impl Context { pub async fn new(config: &Config) -> anyhow::Result { - let nats_client_opts = - NatsClientOpts::admin_opts().with_url(config.nats.url.clone()); + let nats_client_opts = NatsClientOpts::admin_opts() + .with_url(config.nats.url.clone()) + .with_domain("CORE"); let nats_client = NatsClient::connect(&nats_client_opts).await?; let s3_client_opts = S3ClientOpts::admin_opts(); let s3_client = Arc::new(S3Client::new(&s3_client_opts).await?); @@ -47,28 +44,6 @@ impl Context { }) } - pub async fn new_for_testing( - fuel_network: FuelNetwork, - ) -> anyhow::Result { - let nats_client_opts = NatsClientOpts::new(fuel_network); - let nats_client = NatsClient::connect(&nats_client_opts).await?; - let s3_client_opts = S3ClientOpts::admin_opts(); - let s3_client = Arc::new(S3Client::new(&s3_client_opts).await?); - Ok(Context { - fuel_streams: Arc::new( - FuelStreams::new(&nats_client, &s3_client).await, - ), - nats_client: nats_client.clone(), - telemetry: Telemetry::new(None).await?, - s3_client: None, - jwt_secret: String::new(), - }) - } - - pub fn get_streams(&self) -> &FuelStreams { - &self.fuel_streams - } - #[allow(dead_code)] async fn shutdown_services_with_timeout(&self) -> anyhow::Result<()> { tokio::time::timeout(GRACEFUL_SHUTDOWN_TIMEOUT, async { diff --git a/crates/fuel-streams-ws/src/server/http/handlers.rs b/crates/sv-webserver/src/server/http/handlers.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/http/handlers.rs rename to crates/sv-webserver/src/server/http/handlers.rs diff --git a/crates/fuel-streams-ws/src/server/http/mod.rs b/crates/sv-webserver/src/server/http/mod.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/http/mod.rs rename to crates/sv-webserver/src/server/http/mod.rs diff --git a/crates/fuel-streams-ws/src/server/http/models.rs b/crates/sv-webserver/src/server/http/models.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/http/models.rs rename to crates/sv-webserver/src/server/http/models.rs diff --git a/crates/fuel-streams-ws/src/server/middlewares/auth.rs b/crates/sv-webserver/src/server/middlewares/auth.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/middlewares/auth.rs rename to crates/sv-webserver/src/server/middlewares/auth.rs diff --git a/crates/fuel-streams-ws/src/server/middlewares/mod.rs b/crates/sv-webserver/src/server/middlewares/mod.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/middlewares/mod.rs rename to crates/sv-webserver/src/server/middlewares/mod.rs diff --git a/crates/fuel-streams-ws/src/server/mod.rs b/crates/sv-webserver/src/server/mod.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/mod.rs rename to crates/sv-webserver/src/server/mod.rs diff --git a/crates/fuel-streams-ws/src/server/state.rs b/crates/sv-webserver/src/server/state.rs similarity index 98% rename from crates/fuel-streams-ws/src/server/state.rs rename to crates/sv-webserver/src/server/state.rs index 8a2a3609..19e379d2 100644 --- a/crates/fuel-streams-ws/src/server/state.rs +++ b/crates/sv-webserver/src/server/state.rs @@ -4,11 +4,11 @@ use std::{ }; use async_nats::jetstream::stream::State; +use fuel_streams_core::prelude::FuelStreamsExt; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; use super::context::Context; -use crate::server::ws::fuel_streams::FuelStreamsExt; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct StreamInfo { diff --git a/crates/fuel-streams-ws/src/server/ws/errors.rs b/crates/sv-webserver/src/server/ws/errors.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/ws/errors.rs rename to crates/sv-webserver/src/server/ws/errors.rs diff --git a/crates/fuel-streams-ws/src/server/ws/mod.rs b/crates/sv-webserver/src/server/ws/mod.rs similarity index 74% rename from crates/fuel-streams-ws/src/server/ws/mod.rs rename to crates/sv-webserver/src/server/ws/mod.rs index c1f05088..bb1b9404 100644 --- a/crates/fuel-streams-ws/src/server/ws/mod.rs +++ b/crates/sv-webserver/src/server/ws/mod.rs @@ -1,5 +1,4 @@ pub mod errors; -pub mod fuel_streams; pub mod models; pub mod socket; pub mod state; diff --git a/crates/fuel-streams-ws/src/server/ws/models.rs b/crates/sv-webserver/src/server/ws/models.rs similarity index 98% rename from crates/fuel-streams-ws/src/server/ws/models.rs rename to crates/sv-webserver/src/server/ws/models.rs index 3033e5f8..0dfcec38 100644 --- a/crates/fuel-streams-ws/src/server/ws/models.rs +++ b/crates/sv-webserver/src/server/ws/models.rs @@ -1,4 +1,4 @@ -use fuel_streams_storage::DeliverPolicy as NatsDeliverPolicy; +use fuel_streams_nats::DeliverPolicy as NatsDeliverPolicy; use serde::{Deserialize, Serialize}; #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)] diff --git a/crates/sv-webserver/src/server/ws/socket.rs b/crates/sv-webserver/src/server/ws/socket.rs new file mode 100644 index 00000000..03512ed7 --- /dev/null +++ b/crates/sv-webserver/src/server/ws/socket.rs @@ -0,0 +1,377 @@ +use std::sync::{atomic::AtomicUsize, Arc}; + +use actix_web::{ + web::{self, Bytes}, + HttpMessage, + HttpRequest, + Responder, +}; +use actix_ws::{Message, Session}; +use fuel_streams::{ + logs::Log, + types::{Block, Input, Output, Receipt, Transaction}, + utxos::Utxo, + StreamEncoder, + Streamable, +}; +use fuel_streams_core::prelude::*; +use fuel_streams_nats::DeliverPolicy; +use futures::StreamExt; +use uuid::Uuid; + +use super::{errors::WsSubscriptionError, models::ClientMessage}; +use crate::{ + server::{ + state::ServerState, + ws::models::{ServerMessage, SubscriptionPayload}, + }, + telemetry::Telemetry, +}; + +static _NEXT_USER_ID: AtomicUsize = AtomicUsize::new(1); + +pub async fn get_ws( + req: HttpRequest, + body: web::Payload, + state: web::Data, +) -> actix_web::Result { + // extract user id + let user_id = match req.extensions().get::() { + Some(user_id) => { + tracing::info!( + "Authenticated WebSocket connection for user: {:?}", + user_id.to_string() + ); + user_id.to_owned() + } + None => { + tracing::info!("Unauthenticated WebSocket connection"); + return Err(actix_web::error::ErrorUnauthorized( + "Missing or invalid JWT", + )); + } + }; + + // split the request into response, session, and message stream + let (response, session, mut msg_stream) = actix_ws::handle(&req, body)?; + + // record the new subscription + state.context.telemetry.increment_subscriptions_count(); + + // spawm an actor handling the ws connection + let streams = state.context.fuel_streams.clone(); + let telemetry = state.context.telemetry.clone(); + actix_web::rt::spawn(async move { + tracing::info!("Ws opened for user id {:?}", user_id.to_string()); + while let Some(Ok(msg)) = msg_stream.recv().await { + let mut session = session.clone(); + match msg { + Message::Ping(bytes) => { + tracing::info!("Received ping, {:?}", bytes); + if session.pong(&bytes).await.is_err() { + tracing::error!("Error sending pong, {:?}", bytes); + } + } + Message::Pong(bytes) => { + tracing::info!("Received pong, {:?}", bytes); + } + Message::Text(string) => { + tracing::info!("Received text, {string}"); + let bytes = Bytes::from(string.as_bytes().to_vec()); + let _ = handle_binary_message( + bytes, + user_id, + session, + Arc::clone(&telemetry), + Arc::clone(&streams), + ) + .await; + } + Message::Binary(bytes) => { + let _ = handle_binary_message( + bytes, + user_id, + session, + Arc::clone(&telemetry), + Arc::clone(&streams), + ) + .await; + } + Message::Close(reason) => { + tracing::info!( + "Got close event, terminating session with reason {:?}", + reason + ); + let reason_str = + reason.and_then(|r| r.description).unwrap_or_default(); + close_socket_with_error( + WsSubscriptionError::ClosedWithReason( + reason_str.to_string(), + ), + user_id, + session, + None, + telemetry, + ) + .await; + return; + } + _ => { + tracing::error!("Received unknown message type"); + close_socket_with_error( + WsSubscriptionError::ClosedWithReason( + "Unknown message type".to_string(), + ), + user_id, + session, + None, + telemetry, + ) + .await; + return; + } + }; + } + }); + + Ok(response) +} + +async fn handle_binary_message( + msg: Bytes, + user_id: uuid::Uuid, + mut session: Session, + telemetry: Arc, + streams: Arc, +) -> Result<(), WsSubscriptionError> { + tracing::info!("Received binary {:?}", msg); + let client_message = match parse_client_message(msg) { + Ok(msg) => msg, + Err(e) => { + close_socket_with_error(e, user_id, session, None, telemetry).await; + return Ok(()); + } + }; + + tracing::info!("Message parsed: {:?}", client_message); + // handle the client message + match client_message { + ClientMessage::Subscribe(payload) => { + tracing::info!("Received subscribe message: {:?}", payload); + let subject_wildcard = payload.wildcard; + let deliver_policy = payload.deliver_policy; + + // verify the subject name + let sub_subject = + match verify_and_extract_subject_name(&subject_wildcard) { + Ok(res) => res, + Err(e) => { + close_socket_with_error( + e, + user_id, + session, + Some(subject_wildcard.clone()), + telemetry, + ) + .await; + return Ok(()); + } + }; + + // start the streamer async + let mut stream_session = session.clone(); + + // reply to socket with subscription + send_message_to_socket( + &mut session, + ServerMessage::Subscribed(SubscriptionPayload { + wildcard: subject_wildcard.clone(), + deliver_policy, + }), + ) + .await; + + // receive streaming in a background thread + let streams = streams.clone(); + let telemetry = telemetry.clone(); + actix_web::rt::spawn(async move { + // update metrics + telemetry.update_user_subscription_metrics( + user_id, + &subject_wildcard, + ); + + // subscribe to the stream + let config = SubscriptionConfig { + deliver_policy: DeliverPolicy::All, + filter_subjects: vec![subject_wildcard.clone()], + }; + let mut sub = + match streams.subscribe(&sub_subject, Some(config)).await { + Ok(sub) => sub, + Err(e) => { + close_socket_with_error( + WsSubscriptionError::Stream(e), + user_id, + session, + Some(subject_wildcard.clone()), + telemetry, + ) + .await; + return; + } + }; + + // consume and forward to the ws + while let Some(s3_serialized_payload) = sub.next().await { + // decode and serialize back to ws payload + let serialized_ws_payload = match decode( + &subject_wildcard, + s3_serialized_payload, + ) + .await + { + Ok(res) => res, + Err(e) => { + telemetry.update_error_metrics( + &subject_wildcard, + &e.to_string(), + ); + tracing::error!("Error serializing received stream message: {:?}", e); + continue; + } + }; + + // send the payload over the stream + let _ = stream_session.binary(serialized_ws_payload).await; + } + }); + Ok(()) + } + ClientMessage::Unsubscribe(payload) => { + tracing::info!("Received unsubscribe message: {:?}", payload); + let subject_wildcard = payload.wildcard; + + let deliver_policy = payload.deliver_policy; + + if let Err(e) = verify_and_extract_subject_name(&subject_wildcard) { + close_socket_with_error( + e, + user_id, + session, + Some(subject_wildcard.clone()), + telemetry, + ) + .await; + return Ok(()); + } + + // TODO: implement session management for the same user_id + // send a message to the client to confirm unsubscribing + send_message_to_socket( + &mut session, + ServerMessage::Unsubscribed(SubscriptionPayload { + wildcard: subject_wildcard, + deliver_policy, + }), + ) + .await; + Ok(()) + } + } +} + +fn parse_client_message( + msg: Bytes, +) -> Result { + let msg = serde_json::from_slice::(&msg) + .map_err(WsSubscriptionError::UnparsablePayload)?; + Ok(msg) +} + +pub fn verify_and_extract_subject_name( + subject_wildcard: &str, +) -> Result { + let mut subject_parts = subject_wildcard.split('.'); + // TODO: more advanced checks here with Regex + if subject_parts.clone().count() == 1 { + return Err(WsSubscriptionError::UnsupportedWildcardPattern( + subject_wildcard.to_string(), + )); + } + let subject_name = subject_parts.next().unwrap_or_default(); + if !FuelStreamsUtils::is_within_subject_names(subject_name) { + return Err(WsSubscriptionError::UnknownSubjectName( + subject_wildcard.to_string(), + )); + } + Ok(subject_name.to_string()) +} + +async fn close_socket_with_error( + e: WsSubscriptionError, + user_id: uuid::Uuid, + mut session: Session, + subject_wildcard: Option, + telemetry: Arc, +) { + tracing::error!("ws subscription error: {:?}", e.to_string()); + if let Some(subject_wildcard) = subject_wildcard { + telemetry.update_error_metrics(&subject_wildcard, &e.to_string()); + telemetry.update_unsubscribed(user_id, &subject_wildcard); + } + telemetry.decrement_subscriptions_count(); + send_message_to_socket(&mut session, ServerMessage::Error(e.to_string())) + .await; + let _ = session.close(None).await; +} + +async fn send_message_to_socket(session: &mut Session, message: ServerMessage) { + let data = serde_json::to_vec(&message).ok().unwrap_or_default(); + let _ = session.binary(data).await; +} + +async fn decode( + subject_wildcard: &str, + s3_payload: Vec, +) -> Result, WsSubscriptionError> { + let subject = verify_and_extract_subject_name(subject_wildcard)?; + match subject.as_str() { + Transaction::NAME => { + let entity = Transaction::decode_or_panic(s3_payload); + serde_json::to_vec(&entity) + .map_err(WsSubscriptionError::UnparsablePayload) + } + Block::NAME => { + let entity = Block::decode_or_panic(s3_payload); + serde_json::to_vec(&entity) + .map_err(WsSubscriptionError::UnparsablePayload) + } + Input::NAME => { + let entity = Input::decode_or_panic(s3_payload); + serde_json::to_vec(&entity) + .map_err(WsSubscriptionError::UnparsablePayload) + } + Output::NAME => { + let entity = Output::decode_or_panic(s3_payload); + serde_json::to_vec(&entity) + .map_err(WsSubscriptionError::UnparsablePayload) + } + Receipt::NAME => { + let entity = Receipt::decode_or_panic(s3_payload); + serde_json::to_vec(&entity) + .map_err(WsSubscriptionError::UnparsablePayload) + } + Utxo::NAME => { + let entity = Utxo::decode_or_panic(s3_payload); + serde_json::to_vec(&entity) + .map_err(WsSubscriptionError::UnparsablePayload) + } + Log::NAME => { + let entity = Log::decode_or_panic(s3_payload); + serde_json::to_vec(&entity) + .map_err(WsSubscriptionError::UnparsablePayload) + } + _ => Err(WsSubscriptionError::UnknownSubjectName(subject.to_string())), + } +} diff --git a/crates/fuel-streams-ws/src/server/ws/state.rs b/crates/sv-webserver/src/server/ws/state.rs similarity index 100% rename from crates/fuel-streams-ws/src/server/ws/state.rs rename to crates/sv-webserver/src/server/ws/state.rs diff --git a/crates/fuel-streams-publisher/src/telemetry/elastic_search.rs b/crates/sv-webserver/src/telemetry/elastic_search.rs similarity index 100% rename from crates/fuel-streams-publisher/src/telemetry/elastic_search.rs rename to crates/sv-webserver/src/telemetry/elastic_search.rs diff --git a/crates/fuel-streams-ws/src/telemetry/metrics.rs b/crates/sv-webserver/src/telemetry/metrics.rs similarity index 100% rename from crates/fuel-streams-ws/src/telemetry/metrics.rs rename to crates/sv-webserver/src/telemetry/metrics.rs diff --git a/crates/fuel-streams-ws/src/telemetry/mod.rs b/crates/sv-webserver/src/telemetry/mod.rs similarity index 100% rename from crates/fuel-streams-ws/src/telemetry/mod.rs rename to crates/sv-webserver/src/telemetry/mod.rs diff --git a/crates/fuel-streams-publisher/src/telemetry/runtime.rs b/crates/sv-webserver/src/telemetry/runtime.rs similarity index 100% rename from crates/fuel-streams-publisher/src/telemetry/runtime.rs rename to crates/sv-webserver/src/telemetry/runtime.rs diff --git a/crates/fuel-streams-ws/src/telemetry/system.rs b/crates/sv-webserver/src/telemetry/system.rs similarity index 88% rename from crates/fuel-streams-ws/src/telemetry/system.rs rename to crates/sv-webserver/src/telemetry/system.rs index ec0f14eb..bae499a0 100644 --- a/crates/fuel-streams-ws/src/telemetry/system.rs +++ b/crates/sv-webserver/src/telemetry/system.rs @@ -572,26 +572,20 @@ mod tests { kernel_version: "kernel-version".to_string(), uptime: 123456, }, - disk: vec![( - PathBuf::from("disk1"), - Disk { - size: 1000, - free: 877, - usage: Decimal::new(1234, 2), - }, - )] + disk: vec![(PathBuf::from("disk1"), Disk { + size: 1000, + free: 877, + usage: Decimal::new(1234, 2), + })] .into_iter() .collect(), cpu_physical_core_count: 1, cpu_count: 1, - cpu: vec![( - 1, - Cpu { - name: "cpu1".to_string(), - frequency: 12345, - usage: Decimal::new(1234, 2), - }, - )] + cpu: vec![(1, Cpu { + name: "cpu1".to_string(), + frequency: 12345, + usage: Decimal::new(1234, 2), + })] .into_iter() .collect(), }, @@ -600,35 +594,32 @@ mod tests { let output = serde_prometheus::to_string(&metrics, None, &[]) .expect("prometheus"); - assert_eq!( - output.trim_end().split('\n').collect::>(), - vec![ - r#"system_application_pid 0"#, - r#"system_application_name{path = "process"} 1"#, - r#"system_application_cpu_usage 12.34"#, - r#"system_application_size{type = "memory"} 1000"#, - r#"system_application_free{type = "memory"} 877"#, - r#"system_application_usage{type = "memory"} 12.34"#, - r#"system_memory_size{type = "system"} 1000"#, - r#"system_memory_free{type = "system"} 877"#, - r#"system_memory_usage{type = "system"} 12.34"#, - r#"system_memory_size{type = "swap"} 1000"#, - r#"system_memory_free{type = "swap"} 877"#, - r#"system_memory_usage{type = "swap"} 12.34"#, - r#"system_load_average_1 1.2"#, - r#"system_load_average_5 2.3"#, - r#"system_load_average_15 3.4"#, - r#"system_host_os_version{path = "os-version"} 1"#, - r#"system_host_kernel_version{path = "kernel-version"} 1"#, - r#"system_host_uptime 123456"#, - r#"system_disk_size{path = "disk1"} 1000"#, - r#"system_disk_free{path = "disk1"} 877"#, - r#"system_disk_usage{path = "disk1"} 12.34"#, - r#"system_cpu_physical_core_count 1"#, - r#"system_cpu_count 1"#, - r#"system_cpu_frequency{id = "1"} 12345"#, - r#"system_cpu_usage{id = "1"} 12.34"#, - ] - ) + assert_eq!(output.trim_end().split('\n').collect::>(), vec![ + r#"system_application_pid 0"#, + r#"system_application_name{path = "process"} 1"#, + r#"system_application_cpu_usage 12.34"#, + r#"system_application_size{type = "memory"} 1000"#, + r#"system_application_free{type = "memory"} 877"#, + r#"system_application_usage{type = "memory"} 12.34"#, + r#"system_memory_size{type = "system"} 1000"#, + r#"system_memory_free{type = "system"} 877"#, + r#"system_memory_usage{type = "system"} 12.34"#, + r#"system_memory_size{type = "swap"} 1000"#, + r#"system_memory_free{type = "swap"} 877"#, + r#"system_memory_usage{type = "swap"} 12.34"#, + r#"system_load_average_1 1.2"#, + r#"system_load_average_5 2.3"#, + r#"system_load_average_15 3.4"#, + r#"system_host_os_version{path = "os-version"} 1"#, + r#"system_host_kernel_version{path = "kernel-version"} 1"#, + r#"system_host_uptime 123456"#, + r#"system_disk_size{path = "disk1"} 1000"#, + r#"system_disk_free{path = "disk1"} 877"#, + r#"system_disk_usage{path = "disk1"} 12.34"#, + r#"system_cpu_physical_core_count 1"#, + r#"system_cpu_count 1"#, + r#"system_cpu_frequency{id = "1"} 12345"#, + r#"system_cpu_usage{id = "1"} 12.34"#, + ]) } } diff --git a/examples/Cargo.toml b/examples/Cargo.toml index cad5e612..c9a435e3 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -8,8 +8,8 @@ edition = "2021" anyhow = { workspace = true } fuel-core-types = { workspace = true } fuel-streams = { workspace = true, features = ["test-helpers"] } -fuel-streams-ws = { workspace = true } futures = { workspace = true } +sv-webserver = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } [[example]] diff --git a/examples/multiple-streams.rs b/examples/multiple-streams.rs index 1bb1d4ca..7f5508f3 100644 --- a/examples/multiple-streams.rs +++ b/examples/multiple-streams.rs @@ -240,6 +240,7 @@ async fn stream_contract( ) -> anyhow::Result<()> { let mut receipt_stream = fuel_streams::Stream::::new(client).await; + // Set up filters for all receipt types that can be associated with a contract receipt_stream.with_filter( ReceiptsBurnSubject::new().with_contract_id(Some(contract_id.into())), ); @@ -275,21 +276,37 @@ async fn stream_contract( ); let mut sub = receipt_stream.subscribe_raw().await?; - while let Some(bytes) = sub.next().await { let decoded_msg = Receipt::decode_raw(bytes).unwrap(); let receipt = decoded_msg.payload; - // Check if the receipt has a contract_id and if it matches our target - if let Some(receipt_contract_id) = &receipt.contract_id { - if *receipt_contract_id == contract_id.into() { - let receipt_subject = decoded_msg.subject; - let receipt_published_at = decoded_msg.timestamp; - println!( - "Received contract receipt: data={:?}, subject={}, published_at={}", - receipt, receipt_subject, receipt_published_at - ); + // Check if this is a contract-related receipt and matches our target + let should_process = match &receipt { + Receipt::Call(r) => { + r.id == contract_id.into() || r.to == contract_id.into() + } + Receipt::Return(r) => r.id == contract_id.into(), + Receipt::ReturnData(r) => r.id == contract_id.into(), + Receipt::Panic(r) => r.id == contract_id.into(), + Receipt::Revert(r) => r.id == contract_id.into(), + Receipt::Log(r) => r.id == contract_id.into(), + Receipt::LogData(r) => r.id == contract_id.into(), + Receipt::Transfer(r) => { + r.id == contract_id.into() || r.to == contract_id.into() } + Receipt::TransferOut(r) => r.id == contract_id.into(), + Receipt::Mint(r) => r.contract_id == contract_id.into(), + Receipt::Burn(r) => r.contract_id == contract_id.into(), + Receipt::ScriptResult(_) | Receipt::MessageOut(_) => false, + }; + + if should_process { + let receipt_subject = decoded_msg.subject; + let receipt_published_at = decoded_msg.timestamp; + println!( + "Received contract receipt: data={:?}, subject={}, published_at={}", + receipt, receipt_subject, receipt_published_at + ); } } diff --git a/examples/websockets.rs b/examples/websockets.rs index c05c8206..c8de232a 100755 --- a/examples/websockets.rs +++ b/examples/websockets.rs @@ -18,7 +18,7 @@ use fuel_streams::{ subjects::SubjectBuildable, types::FuelNetwork, }; -use fuel_streams_ws::{ +use sv_webserver::{ client::WebSocketClient, server::ws::models::DeliverPolicy, }; diff --git a/knope.toml b/knope.toml index 6f7f5d2d..57f4fc54 100644 --- a/knope.toml +++ b/knope.toml @@ -1,5 +1,5 @@ # ------------------------------------------------------------ -# Fuel-streams-publisher package +# Fuel-streams package # ------------------------------------------------------------ [packages.fuel-streams] versioned_files = ["crates/fuel-streams/Cargo.toml"] @@ -19,63 +19,6 @@ extra_changelog_sections = [ ], name = "📝 Notes" }, ] -[[packages.fuel-streams.assets]] -path = "artifacts/fuel-streams-publisher-Linux-aarch64-gnu.tar.gz" - -[[packages.fuel-streams.assets]] -path = "artifacts/fuel-streams-publisher-Linux-aarch64-musl.tar.gz" - -[[packages.fuel-streams.assets]] -path = "artifacts/fuel-streams-publisher-Linux-x86_64-gnu.tar.gz" - -[[packages.fuel-streams.assets]] -path = "artifacts/fuel-streams-publisher-Linux-x86_64-musl.tar.gz" - -[[packages.fuel-streams.assets]] -path = "artifacts/fuel-streams-publisher-macOS-aarch64.tar.gz" - -[[packages.fuel-streams.assets]] -path = "artifacts/fuel-streams-publisher-macOS-x86_64.tar.gz" - -# ------------------------------------------------------------ -# Fuel-streams-ws package -# ------------------------------------------------------------ -[packages.fuel-streams-ws] -versioned_files = ["crates/fuel-streams-ws/Cargo.toml"] -changelog = "CHANGELOG.md" -extra_changelog_sections = [ - { types = [ - "major", - ], name = "⚠️ Breaking Change" }, - { types = [ - "minor", - ], name = "🚀 Features" }, - { types = [ - "patch", - ], name = "🐛 Fixes" }, - { footers = [ - "Changelog-Note", - ], name = "📝 Notes" }, -] - -[[packages.fuel-streams-ws.assets]] -path = "artifacts/fuel-streams-ws-Linux-aarch64-gnu.tar.gz" - -[[packages.fuel-streams-ws.assets]] -path = "artifacts/fuel-streams-ws-Linux-aarch64-musl.tar.gz" - -[[packages.fuel-streams-ws.assets]] -path = "artifacts/fuel-streams-ws-Linux-x86_64-gnu.tar.gz" - -[[packages.fuel-streams-ws.assets]] -path = "artifacts/fuel-streams-ws-Linux-x86_64-musl.tar.gz" - -[[packages.fuel-streams-ws.assets]] -path = "artifacts/fuel-streams-ws-macOS-aarch64.tar.gz" - -[[packages.fuel-streams-ws.assets]] -path = "artifacts/fuel-streams-ws-macOS-x86_64.tar.gz" - # ------------------------------------------------------------ # Workflow to get the current version # ------------------------------------------------------------ diff --git a/scripts/run_publisher.sh b/scripts/run_publisher.sh index c2170dd8..3ffa1868 100755 --- a/scripts/run_publisher.sh +++ b/scripts/run_publisher.sh @@ -28,6 +28,12 @@ usage() { exit 1 } +# Set default values from environment variables with fallbacks +NETWORK=${NETWORK:-"testnet"} +MODE=${MODE:-"profiling"} +PORT=${PORT:-"4004"} +TELEMETRY_PORT=${TELEMETRY_PORT:-"8080"} + while [[ "$#" -gt 0 ]]; do case $1 in --network) @@ -63,7 +69,7 @@ done # ------------------------------ # Load Environment # ------------------------------ -source ./scripts/set_env.sh +source ./scripts/set_env.sh NETWORK=${NETWORK} # Print the configuration being used echo -e "\n==========================================" @@ -94,30 +100,35 @@ echo -e "==========================================\n" # Define common arguments COMMON_ARGS=( "--enable-relayer" + "--service-name" "fuel-${NETWORK}-node" "--keypair" "${KEYPAIR}" "--relayer" "${RELAYER}" "--ip=0.0.0.0" - "--service-name" "fuel-${NETWORK}-node" - "--db-path" "./cluster/docker/db-${NETWORK}" - "--snapshot" "./cluster/chain-config/${NETWORK}" "--port" "${PORT}" - "--telemetry-port" "${TELEMETRY_PORT}" "--peering-port" "30333" + "--db-path" "./cluster/docker/db-${NETWORK}" + "--snapshot" "./cluster/chain-config/${NETWORK}" "--utxo-validation" "--poa-instant" "false" "--enable-p2p" - "--sync-header-batch-size" "${SYNC_HEADER_BATCH_SIZE}" - "--relayer-log-page-size=${RELAYER_LOG_PAGE_SIZE}" - "--sync-block-stream-buffer-size" "30" - "--bootstrap-nodes" "${RESERVED_NODES}" + "--reserved-nodes" "${RESERVED_NODES}" "--relayer-v2-listening-contracts=${RELAYER_V2_LISTENING_CONTRACTS}" "--relayer-da-deploy-height=${RELAYER_DA_DEPLOY_HEIGHT}" + "--relayer-log-page-size=${RELAYER_LOG_PAGE_SIZE}" + "--sync-block-stream-buffer-size" "50" + "--max-database-cache-size" "17179869184" + "--state-rewind-duration" "136y" + "--request-timeout" "60" + "--graphql-max-complexity" "1000000000" + # Application specific + "--nats-url" "nats://localhost:4222" + # "--telemetry-port" "${TELEMETRY_PORT}" ) # Execute based on mode if [ "$MODE" == "dev" ]; then - cargo run -p fuel-streams-publisher -- "${COMMON_ARGS[@]}" ${EXTRA_ARGS} + cargo run -p sv-publisher -- "${COMMON_ARGS[@]}" ${EXTRA_ARGS} else - cargo build --profile profiling --package fuel-streams-publisher - samply record ./target/profiling/fuel-streams-publisher "${COMMON_ARGS[@]}" ${EXTRA_ARGS} + cargo build --profile profiling --package sv-publisher + samply record ./target/profiling/sv-publisher "${COMMON_ARGS[@]}" ${EXTRA_ARGS} fi diff --git a/scripts/run_streamer.sh b/scripts/run_streamer.sh deleted file mode 100755 index c2a109a6..00000000 --- a/scripts/run_streamer.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash - -# Exit immediately if a command exits with a non-zero status -set -e - -# ------------------------------ -# Function to Display Usage -# ------------------------------ -usage() { - echo "Usage: $0 [options]" - echo "Options:" - echo " --mode : Specify the run mode (dev|profiling)" - echo " --extra-args : Optional additional arguments to append (in quotes)" - echo "" - echo "Examples:" - echo " $0 # Runs with all defaults" - echo " $0 --mode dev # Runs with dev mode" - echo " $0 --mode dev # Custom config toml path and mode" - exit 1 -} - -while [[ "$#" -gt 0 ]]; do - case $1 in - --mode) - MODE="$2" - shift 2 - ;; - --extra-args) - EXTRA_ARGS="$2" - shift 2 - ;; - --help) - usage - ;; - *) - echo "Error: Unknown parameter passed: $1" >&2 - usage - ;; - esac -done - -# ------------------------------ -# Load Environment -# ------------------------------ -source ./scripts/set_env.sh - -# Print the configuration being used -echo -e "\n==========================================" -echo "⚙️ Configuration" -echo -e "==========================================" - -# Runtime Configuration -echo "Runtime Settings:" -echo "→ Mode: $MODE" -if [ -n "$EXTRA_ARGS" ]; then - echo "→ Extra Arguments: $EXTRA_ARGS" -fi - -# Environment Variables -echo -e "\nEnvironment Variables:" -echo " → Use Metrics: ${USE_METRICS}" -echo " → Use Elastic Logging: $USE_ELASTIC_LOGGING" -echo " → AWS S3 Enabled: $AWS_S3_ENABLED" -echo " → AWS Access Key Id: $AWS_ACCESS_KEY_ID" -echo " → AWS Secret Access Key: $AWS_SECRET_ACCESS_KEY" -echo " → AWS Region: $AWS_REGION" -echo " → AWS Bucket: $AWS_S3_BUCKET_NAME" -echo " → AWS Endpoint: $AWS_ENDPOINT_URL" -echo " → Jwt Auth Secret: $JWT_AUTH_SECRET" -echo " → Nats Url: $NATS_URL" -echo -e "==========================================\n" - -# Define common arguments -COMMON_ARGS=() - -# Execute based on mode -if [ "$MODE" == "dev" ]; then - cargo run -p fuel-streams-ws -- "${COMMON_ARGS[@]}" ${EXTRA_ARGS} -else - cargo build --profile profiling --package fuel-streams-ws - samply record ./target/profiling/fuel-streams-ws "${COMMON_ARGS[@]}" ${EXTRA_ARGS} -fi diff --git a/scripts/run_webserver.sh b/scripts/run_webserver.sh new file mode 100755 index 00000000..91845a8a --- /dev/null +++ b/scripts/run_webserver.sh @@ -0,0 +1,91 @@ +#!/bin/bash + +# Exit immediately if a command exits with a non-zero status +set -e + +# ------------------------------ +# Function to Display Usage +# ------------------------------ +usage() { + echo "Usage: $0 [options]" + echo "Options:" + echo " --mode : Specify the run mode (dev|profiling)" + echo " --port : Port number for the API server (default: 9003)" + echo " --nats-url : NATS URL (default: nats://localhost:4222)" + echo " --extra-args : Optional additional arguments to append (in quotes)" + echo "" + echo "Examples:" + echo " $0 # Runs with all defaults" + echo " $0 --mode dev --port 8080 # Custom port" + echo " $0 --mode dev --extra-args '\"--use-metrics\"' # Enable metrics" + exit 1 +} + +while [[ "$#" -gt 0 ]]; do + case $1 in + --mode) + MODE="$2" + shift 2 + ;; + --port) + PORT="$2" + shift 2 + ;; + --nats-url) + NATS_URL="$2" + shift 2 + ;; + --extra-args) + EXTRA_ARGS="$2" + shift 2 + ;; + --help) + usage + ;; + *) + echo "Error: Unknown parameter passed: $1" >&2 + usage + ;; + esac +done + +# Load environment variables with defaults +PORT=${PORT:-9003} +NATS_URL=${NATS_URL:-nats://localhost:4222} +MODE=${MODE:-dev} +EXTRA_ARGS=${EXTRA_ARGS:-""} + +# ------------------------------ +# Load Environment +# ------------------------------ +source ./scripts/set_env.sh + +# Print the configuration being used +echo -e "\n==========================================" +echo "⚙️ Configuration" +echo -e "==========================================" + +# Runtime Configuration +echo "Runtime Settings:" +echo "→ Mode: ${MODE:-dev}" +echo "→ API Port: ${PORT:-9003}" +echo "→ NATS URL: ${NATS_URL:-"nats://localhost:4222"}" +if [ -n "$EXTRA_ARGS" ]; then + echo "→ Extra Arguments: $EXTRA_ARGS" +fi + +echo -e "==========================================\n" + +# Define common arguments +COMMON_ARGS=( + "--port" "${PORT:-9003}" + "--nats-url" "${NATS_URL:-"nats://localhost:4222"}" +) + +# Execute based on mode +if [ "${MODE:-dev}" == "dev" ]; then + cargo run -p sv-webserver -- "${COMMON_ARGS[@]}" ${EXTRA_ARGS} +else + cargo build --profile profiling --package sv-webserver + samply record ./target/profiling/sv-webserver "${COMMON_ARGS[@]}" ${EXTRA_ARGS} +fi diff --git a/scripts/set_env.sh b/scripts/set_env.sh index da21ea07..36efa113 100755 --- a/scripts/set_env.sh +++ b/scripts/set_env.sh @@ -40,13 +40,14 @@ cleanup_env() { fi } -# Load initial environment -load_env - # Clean up previous auto-generated content cleanup_env +# Load initial environment +load_env + # Set and export network-specific variables +export NETWORK=$NETWORK export RESERVED_NODES=$(eval echo "\$${NETWORK_UPPER}_RESERVED_NODES") export RELAYER_V2_LISTENING_CONTRACTS=$(eval echo "\$${NETWORK_UPPER}_RELAYER_V2_LISTENING_CONTRACTS") export RELAYER_DA_DEPLOY_HEIGHT=$(eval echo "\$${NETWORK_UPPER}_RELAYER_DA_DEPLOY_HEIGHT") @@ -54,17 +55,6 @@ export RELAYER=$(eval echo "\$${NETWORK_UPPER}_RELAYER") export SYNC_HEADER_BATCH_SIZE=$(eval echo "\$${NETWORK_UPPER}_SYNC_HEADER_BATCH_SIZE") export RELAYER_LOG_PAGE_SIZE=$(eval echo "\$${NETWORK_UPPER}_RELAYER_LOG_PAGE_SIZE") export CHAIN_CONFIG=$NETWORK -export NETWORK=$NETWORK -export USE_METRICS="$(echo "$USE_METRICS")" -export USE_ELASTIC_LOGGING="$(echo "$USE_ELASTIC_LOGGING")" -export AWS_S3_ENABLED="$(echo "$AWS_S3_ENABLED")" -export AWS_ACCESS_KEY_ID="$(echo "$AWS_ACCESS_KEY_ID")" -export AWS_SECRET_ACCESS_KEY="$(echo "$AWS_SECRET_ACCESS_KEY")" -export AWS_REGION="$(echo "$AWS_REGION")" -export AWS_ENDPOINT_URL="$(echo "$AWS_ENDPOINT_URL")" -export AWS_S3_BUCKET_NAME="$(echo "$AWS_S3_BUCKET_NAME")" -export JWT_AUTH_SECRET="$(echo "$JWT_AUTH_SECRET")" -export NATS_URL="$(echo "$NATS_URL")" # Append network-specific variables to .env file { @@ -79,14 +69,4 @@ export NATS_URL="$(echo "$NATS_URL")" echo "SYNC_HEADER_BATCH_SIZE=$SYNC_HEADER_BATCH_SIZE" echo "RELAYER_LOG_PAGE_SIZE=$RELAYER_LOG_PAGE_SIZE" echo "CHAIN_CONFIG=$CHAIN_CONFIG" - echo "USE_METRICS=$USE_METRICS" - echo "USE_ELASTIC_LOGGING=$USE_ELASTIC_LOGGING" - echo "AWS_S3_ENABLED=$AWS_S3_ENABLED" - echo "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" - echo "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" - echo "AWS_REGION=$AWS_REGION" - echo "AWS_ENDPOINT_URL=$AWS_ENDPOINT_URL" - echo "AWS_S3_BUCKET_NAME=$AWS_S3_BUCKET_NAME" - echo "JWT_AUTH_SECRET=$JWT_AUTH_SECRET" - echo "NATS_URL=$NATS_URL" } >> .env diff --git a/tarpaulin.toml b/tarpaulin.toml index 1bc0cf6d..ce0c18cd 100644 --- a/tarpaulin.toml +++ b/tarpaulin.toml @@ -63,10 +63,10 @@ engine = "Llvm" # ========================================== # ignore due to wasm incompatibility -# [cov_fuel_streams_publisher] +# [cov_sv_publisher] # name = "Fuel Streams Publisher Coverage Analysis" # packages = [ -# "fuel-streams-publisher" +# "sv-publisher" # ] # all-features = true # run-types = [ diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 9f1c81b8..747eb520 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -22,14 +22,9 @@ name = "special_integration_tests" path = "src/main.rs" [dependencies] -anyhow = { workspace = true } -async-trait = { workspace = true } fuel-core = { workspace = true, features = ["test-helpers"] } -fuel-core-importer = { workspace = true, features = ["test-helpers"] } -fuel-core-types = { workspace = true } fuel-streams = { workspace = true, features = ["test-helpers"] } fuel-streams-core = { workspace = true, features = ["test-helpers"] } -fuel-streams-publisher = { workspace = true, features = ["test-helpers"] } futures = { workspace = true } rand = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread", "macros", "test-util"] } diff --git a/tests/tests/client.rs b/tests/tests/client.rs index a0178a7c..29ffcb5d 100644 --- a/tests/tests/client.rs +++ b/tests/tests/client.rs @@ -73,7 +73,9 @@ async fn multiple_client_connections() -> BoxedResult<()> { #[tokio::test] async fn public_user_cannot_create_streams() -> BoxedResult<()> { - let opts = NatsClientOpts::new(FuelNetwork::Local) + let network = FuelNetwork::Local; + let opts = NatsClientOpts::public_opts() + .with_url(network.to_nats_url()) .with_rdn_namespace() .with_timeout(1); let client = NatsClient::connect(&opts).await?; @@ -95,7 +97,9 @@ async fn public_user_cannot_create_streams() -> BoxedResult<()> { #[tokio::test] async fn public_user_cannot_create_stores() -> BoxedResult<()> { - let opts = NatsClientOpts::new(FuelNetwork::Local) + let network = FuelNetwork::Local; + let opts = NatsClientOpts::public_opts() + .with_url(network.to_nats_url()) .with_rdn_namespace() .with_timeout(1); @@ -116,7 +120,9 @@ async fn public_user_cannot_create_stores() -> BoxedResult<()> { #[tokio::test] async fn public_user_cannot_delete_stores() -> BoxedResult<()> { + let network = FuelNetwork::Local; let opts = NatsClientOpts::admin_opts() + .with_url(network.to_nats_url()) .with_rdn_namespace() .with_timeout(1); @@ -131,7 +137,8 @@ async fn public_user_cannot_delete_stores() -> BoxedResult<()> { }) .await?; - let opts = NatsClientOpts::new(FuelNetwork::Local) + let opts = NatsClientOpts::public_opts() + .with_url(network.to_nats_url()) .with_rdn_namespace() .with_timeout(1); let client = NatsClient::connect(&opts).await?; @@ -164,7 +171,9 @@ async fn public_user_cannot_delete_stream() -> BoxedResult<()> { }) .await?; - let public_opts = opts.clone().with_role(FuelNetworkUserRole::Default); + let network = FuelNetwork::Local; + let public_opts = + NatsClientOpts::public_opts().with_url(network.to_nats_url()); let public_client = NatsClient::connect(&public_opts).await?; assert!( @@ -181,21 +190,29 @@ async fn public_user_cannot_delete_stream() -> BoxedResult<()> { #[tokio::test] async fn public_user_can_access_streams_after_created() { - let opts = NatsClientOpts::new(FuelNetwork::Local) + let network = FuelNetwork::Local; + let admin_opts = NatsClientOpts::admin_opts() + .with_url(network.to_nats_url()) .with_rdn_namespace() .with_timeout(1); - let admin_opts = opts.clone().with_role(FuelNetworkUserRole::Admin); - assert!(NatsClient::connect(&admin_opts).await.is_ok()); + let public_opts = NatsClientOpts::public_opts() + .with_url(network.to_nats_url()) + .with_rdn_namespace() + .with_timeout(1); - let public_opts = opts.clone().with_role(FuelNetworkUserRole::Default); + assert!(NatsClient::connect(&admin_opts).await.is_ok()); assert!(NatsClient::connect(&public_opts).await.is_ok()); } #[tokio::test] async fn public_and_admin_user_can_access_streams_after_created( ) -> BoxedResult<()> { - let admin_opts = NatsClientOpts::admin_opts(); + let network = FuelNetwork::Local; + let admin_opts = NatsClientOpts::admin_opts() + .with_url(network.to_nats_url()) + .with_rdn_namespace() + .with_timeout(1); let s3_opts = Arc::new(S3ClientOpts::admin_opts()); let admin_tasks: Vec>> = (0..100) .map(|_| { @@ -210,8 +227,12 @@ async fn public_and_admin_user_can_access_streams_after_created( }) .collect(); - let public_opts = NatsClientOpts::new(FuelNetwork::Local); - let s3_public_opts = Arc::new(S3ClientOpts::new(FuelNetwork::Local)); + let public_opts = NatsClientOpts::public_opts() + .with_url(network.to_nats_url()) + .with_rdn_namespace() + .with_timeout(1); + let s3_public_opts = + Arc::new(S3ClientOpts::new(S3Env::Local, S3Role::Public)); let public_tasks: Vec>> = (0..100) .map(|_| { let opts: NatsClientOpts = public_opts.clone(); diff --git a/tests/tests/publisher.rs b/tests/tests/publisher.rs index ecd8bcf6..19eb20a2 100644 --- a/tests/tests/publisher.rs +++ b/tests/tests/publisher.rs @@ -1,346 +1,416 @@ -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -use fuel_core::combined_database::CombinedDatabase; -use fuel_core_importer::ImporterResult; -use fuel_core_types::blockchain::SealedBlock; -use fuel_streams_core::prelude::*; -use fuel_streams_publisher::{ - publisher::shutdown::ShutdownController, - shutdown::get_controller_and_token, - FuelCoreLike, - Publisher, -}; -use futures::StreamExt; -use tokio::sync::broadcast::{self, Receiver, Sender}; - -// TODO - Re-implement with `mockall` and `mock` macros -struct TestFuelCore { - chain_id: FuelCoreChainId, - base_asset_id: FuelCoreAssetId, - database: CombinedDatabase, - blocks_broadcaster: Sender, - receipts: Option>, -} - -impl TestFuelCore { - fn default( - blocks_broadcaster: Sender, - ) -> Self { - Self { - chain_id: FuelCoreChainId::default(), - base_asset_id: FuelCoreAssetId::zeroed(), - database: CombinedDatabase::default(), - blocks_broadcaster, - receipts: None, - } - } - fn with_receipts(mut self, receipts: Vec) -> Self { - self.receipts = Some(receipts); - self - } - fn arc(self) -> Arc { - Arc::new(self) - } -} - -#[async_trait::async_trait] -impl FuelCoreLike for TestFuelCore { - async fn start(&self) -> anyhow::Result<()> { - Ok(()) - } - fn is_started(&self) -> bool { - true - } - async fn await_synced_at_least_once( - &self, - _historical: bool, - ) -> anyhow::Result<()> { - Ok(()) - } - async fn stop(&self) {} - - async fn await_offchain_db_sync( - &self, - _block_id: &FuelCoreBlockId, - ) -> anyhow::Result<()> { - Ok(()) - } - - fn base_asset_id(&self) -> &FuelCoreAssetId { - &self.base_asset_id - } - fn chain_id(&self) -> &FuelCoreChainId { - &self.chain_id - } - - fn database(&self) -> &CombinedDatabase { - &self.database - } - - fn blocks_subscription( - &self, - ) -> Receiver { - self.blocks_broadcaster.subscribe() - } - - fn get_receipts( - &self, - _tx_id: &FuelCoreBytes32, - ) -> anyhow::Result>> { - Ok(self.receipts.clone()) - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn doesnt_publish_any_message_when_no_block_has_been_mined() { - let (blocks_broadcaster, _) = broadcast::channel::(1); - let s3_client = Arc::new(S3Client::new_for_testing().await); - let publisher = new_publisher(blocks_broadcaster.clone(), &s3_client).await; - - let shutdown_controller = start_publisher(&publisher).await; - stop_publisher(shutdown_controller).await; - - assert!(publisher.get_fuel_streams().is_empty().await); -} - -#[tokio::test(flavor = "multi_thread")] -async fn publishes_a_block_message_when_a_single_block_has_been_mined() { - let (blocks_broadcaster, _) = broadcast::channel::(1); - let s3_client = Arc::new(S3Client::new_for_testing().await); - let publisher = new_publisher(blocks_broadcaster.clone(), &s3_client).await; - - publish_block(&publisher, &blocks_broadcaster).await; - - assert!(publisher - .get_fuel_streams() - .blocks() - .get_last_published(BlocksSubject::WILDCARD) - .await - .is_ok_and(|result| result.is_some())); - s3_client.cleanup_after_testing().await; -} - -#[tokio::test(flavor = "multi_thread")] -async fn publishes_transaction_for_each_published_block() { - let (blocks_broadcaster, _) = broadcast::channel::(1); - let s3_client = Arc::new(S3Client::new_for_testing().await); - let publisher = new_publisher(blocks_broadcaster.clone(), &s3_client).await; - - publish_block(&publisher, &blocks_broadcaster).await; - - assert!(publisher - .get_fuel_streams() - .transactions() - .get_last_published(TransactionsSubject::WILDCARD) - .await - .is_ok_and(|result| result.is_some())); - s3_client.cleanup_after_testing().await; -} - -#[tokio::test(flavor = "multi_thread")] -async fn publishes_receipts() { - let (blocks_broadcaster, _) = broadcast::channel::(1); - - let receipts = [ - FuelCoreReceipt::Call { - id: FuelCoreContractId::default(), - to: Default::default(), - amount: 0, - asset_id: Default::default(), - gas: 0, - param1: 0, - param2: 0, - pc: 0, - is: 0, - }, - FuelCoreReceipt::Return { - id: FuelCoreContractId::default(), - val: 0, - pc: 0, - is: 0, - }, - FuelCoreReceipt::ReturnData { - id: FuelCoreContractId::default(), - ptr: 0, - len: 0, - digest: FuelCoreBytes32::default(), - pc: 0, - is: 0, - data: None, - }, - FuelCoreReceipt::Revert { - id: FuelCoreContractId::default(), - ra: 0, - pc: 0, - is: 0, - }, - FuelCoreReceipt::Log { - id: FuelCoreContractId::default(), - ra: 0, - rb: 0, - rc: 0, - rd: 0, - pc: 0, - is: 0, - }, - FuelCoreReceipt::LogData { - id: FuelCoreContractId::default(), - ra: 0, - rb: 0, - ptr: 0, - len: 0, - digest: FuelCoreBytes32::default(), - pc: 0, - is: 0, - data: None, - }, - FuelCoreReceipt::Transfer { - id: FuelCoreContractId::default(), - to: FuelCoreContractId::default(), - amount: 0, - asset_id: FuelCoreAssetId::default(), - pc: 0, - is: 0, - }, - FuelCoreReceipt::TransferOut { - id: FuelCoreContractId::default(), - to: FuelCoreAddress::default(), - amount: 0, - asset_id: FuelCoreAssetId::default(), - pc: 0, - is: 0, - }, - FuelCoreReceipt::Mint { - sub_id: FuelCoreBytes32::default(), - contract_id: FuelCoreContractId::default(), - val: 0, - pc: 0, - is: 0, - }, - FuelCoreReceipt::Burn { - sub_id: FuelCoreBytes32::default(), - contract_id: FuelCoreContractId::default(), - val: 0, - pc: 0, - is: 0, - }, - ]; - - let fuel_core = TestFuelCore::default(blocks_broadcaster.clone()) - .with_receipts(receipts.to_vec()) - .arc(); - - let s3_client = Arc::new(S3Client::new_for_testing().await); - let publisher = - Publisher::new_for_testing(&nats_client().await, &s3_client, fuel_core) - .await - .unwrap(); - - publish_block(&publisher, &blocks_broadcaster).await; - - let mut receipts_stream = publisher - .get_fuel_streams() - .receipts() - .catchup(10) - .await - .unwrap(); - - let receipts: HashSet = receipts.iter().map(Into::into).collect(); - while let Some(Some(receipt)) = receipts_stream.next().await { - assert!(receipts.contains(&receipt)); - } - - s3_client.cleanup_after_testing().await; -} - -#[tokio::test(flavor = "multi_thread")] -async fn publishes_inputs() { - let (blocks_broadcaster, _) = broadcast::channel::(1); - let s3_client = Arc::new(S3Client::new_for_testing().await); - let publisher = new_publisher(blocks_broadcaster.clone(), &s3_client).await; - - publish_block(&publisher, &blocks_broadcaster).await; - - assert!(publisher - .get_fuel_streams() - .inputs() - .get_last_published(InputsByIdSubject::WILDCARD) - .await - .is_ok_and(|result| result.is_some())); - s3_client.cleanup_after_testing().await; -} - -async fn new_publisher( - broadcaster: Sender, - s3_client: &Arc, -) -> Publisher { - let fuel_core = TestFuelCore::default(broadcaster).arc(); - Publisher::new_for_testing(&nats_client().await, s3_client, fuel_core) - .await - .unwrap() -} - -async fn publish_block( - publisher: &Publisher, - blocks_broadcaster: &Sender, -) { - let shutdown_controller = start_publisher(publisher).await; - send_block(blocks_broadcaster); - stop_publisher(shutdown_controller).await; -} - -async fn start_publisher(publisher: &Publisher) -> ShutdownController { - let (shutdown_controller, shutdown_token) = get_controller_and_token(); - tokio::spawn({ - let publisher = publisher.clone(); - async move { - publisher.run(shutdown_token, true).await.unwrap(); - } - }); - wait_for_publisher_to_start().await; - shutdown_controller -} -async fn stop_publisher(shutdown_controller: ShutdownController) { - wait_for_publisher_to_process_block().await; - - assert!(shutdown_controller.initiate_shutdown().is_ok()); -} - -async fn wait_for_publisher_to_start() { - tokio::time::sleep(std::time::Duration::from_secs(1)).await; -} -async fn wait_for_publisher_to_process_block() { - tokio::time::sleep(std::time::Duration::from_secs(1)).await; -} - -fn send_block(broadcaster: &Sender) { - let block = create_test_block(); - assert!(broadcaster.send(block).is_ok()); -} -fn create_test_block() -> ImporterResult { - let mut block_entity = FuelCoreBlock::default(); - let tx = FuelCoreTransaction::default_test_tx(); - - *block_entity.transactions_mut() = vec![tx]; - - ImporterResult { - shared_result: Arc::new(FuelCoreImportResult { - sealed_block: SealedBlock { - entity: block_entity, - ..Default::default() - }, - ..Default::default() - }), - changes: Arc::new(HashMap::new()), - } -} - -async fn nats_client() -> NatsClient { - let opts = NatsClientOpts::admin_opts().with_rdn_namespace(); - NatsClient::connect(&opts) - .await - .expect("NATS connection failed") -} +// use std::{collections::HashMap, sync::Arc}; + +// use fuel_core::{ +// combined_database::CombinedDatabase, +// service::{Config, FuelService}, +// ShutdownListener, +// }; +// use fuel_core_importer::ImporterResult; +// use fuel_core_types::blockchain::SealedBlock; +// use fuel_streams_core::prelude::*; +// use tokio::sync::broadcast::{self, Receiver, Sender}; + +// // TODO - Re-implement with `mockall` and `mock` macros +// struct TestFuelCore { +// fuel_service: FuelService, +// chain_id: FuelCoreChainId, +// base_asset_id: FuelCoreAssetId, +// database: CombinedDatabase, +// blocks_broadcaster: Sender, +// receipts: Option>, +// } + +// impl TestFuelCore { +// fn default( +// blocks_broadcaster: Sender, +// ) -> Self { +// let mut shutdown = ShutdownListener::spawn(); +// let service = FuelService::new( +// Default::default(), +// Config::local_node(), +// &mut shutdown, +// ) +// .unwrap(); +// Self { +// fuel_service: service, +// chain_id: FuelCoreChainId::default(), +// base_asset_id: FuelCoreAssetId::zeroed(), +// database: CombinedDatabase::default(), +// blocks_broadcaster, +// receipts: None, +// } +// } +// fn with_receipts(mut self, receipts: Vec) -> Self { +// self.receipts = Some(receipts); +// self +// } +// fn arc(self) -> Arc { +// Arc::new(self) +// } +// } + +// #[async_trait::async_trait] +// impl FuelCoreLike for TestFuelCore { +// async fn start(&self) -> anyhow::Result<()> { +// Ok(()) +// } +// fn is_started(&self) -> bool { +// true +// } +// fn fuel_service(&self) -> &FuelService { +// &self.fuel_service +// } +// async fn await_synced_at_least_once( +// &self, +// _historical: bool, +// ) -> anyhow::Result<()> { +// Ok(()) +// } +// async fn stop(&self) {} + +// async fn await_offchain_db_sync( +// &self, +// _block_id: &FuelCoreBlockId, +// ) -> anyhow::Result<()> { +// Ok(()) +// } + +// fn base_asset_id(&self) -> &FuelCoreAssetId { +// &self.base_asset_id +// } +// fn chain_id(&self) -> &FuelCoreChainId { +// &self.chain_id +// } + +// fn database(&self) -> &CombinedDatabase { +// &self.database +// } + +// fn blocks_subscription( +// &self, +// ) -> Receiver { +// self.blocks_broadcaster.subscribe() +// } + +// fn get_receipts( +// &self, +// _tx_id: &FuelCoreBytes32, +// ) -> anyhow::Result>> { +// Ok(self.receipts.clone()) +// } + +// fn get_tx_status( +// &self, +// _tx_id: &FuelCoreBytes32, +// ) -> anyhow::Result> { +// Ok(Some(FuelCoreTransactionStatus::Success { +// receipts: self.receipts.clone().unwrap_or_default(), +// block_height: 0.into(), +// result: None, +// time: FuelCoreTai64::now(), +// total_gas: 0, +// total_fee: 0, +// })) +// } +// } + +// #[tokio::test(flavor = "multi_thread")] +// async fn doesnt_publish_any_message_when_no_block_has_been_mined() { +// let (blocks_broadcaster, _) = broadcast::channel::(1); +// let s3_client = Arc::new(S3Client::new_for_testing().await); +// let publisher = new_publisher(blocks_broadcaster.clone(), &s3_client).await; + +// let shutdown_controller = start_publisher(&publisher).await; +// stop_publisher(shutdown_controller).await; + +// assert!(publisher.get_fuel_streams().is_empty().await); +// } + +// #[tokio::test(flavor = "multi_thread")] +// async fn publishes_a_block_message_when_a_single_block_has_been_mined() { +// let (blocks_broadcaster, _) = broadcast::channel::(1); +// let s3_client = Arc::new(S3Client::new_for_testing().await); +// let publisher = new_publisher(blocks_broadcaster.clone(), &s3_client).await; + +// publish_block(&publisher, &blocks_broadcaster).await; + +// assert!(publisher +// .get_fuel_streams() +// .blocks() +// .get_last_published(BlocksSubject::WILDCARD) +// .await +// .is_ok_and(|result| result.is_some())); +// s3_client.cleanup_after_testing().await; +// } + +// #[tokio::test(flavor = "multi_thread")] +// async fn publishes_transaction_for_each_published_block() { +// let (blocks_broadcaster, _) = broadcast::channel::(1); +// let s3_client = Arc::new(S3Client::new_for_testing().await); +// let publisher = new_publisher(blocks_broadcaster.clone(), &s3_client).await; + +// publish_block(&publisher, &blocks_broadcaster).await; + +// assert!(publisher +// .get_fuel_streams() +// .transactions() +// .get_last_published(TransactionsSubject::WILDCARD) +// .await +// .is_ok_and(|result| result.is_some())); +// s3_client.cleanup_after_testing().await; +// } + +// #[tokio::test(flavor = "multi_thread")] +// async fn publishes_receipts() { +// let (blocks_broadcaster, _) = broadcast::channel::(1); + +// let receipts = [ +// FuelCoreReceipt::Call { +// id: FuelCoreContractId::default(), +// to: Default::default(), +// amount: 0, +// asset_id: Default::default(), +// gas: 0, +// param1: 0, +// param2: 0, +// pc: 0, +// is: 0, +// }, +// FuelCoreReceipt::Return { +// id: FuelCoreContractId::default(), +// val: 0, +// pc: 0, +// is: 0, +// }, +// FuelCoreReceipt::ReturnData { +// id: FuelCoreContractId::default(), +// ptr: 0, +// len: 0, +// digest: FuelCoreBytes32::default(), +// pc: 0, +// is: 0, +// data: None, +// }, +// FuelCoreReceipt::Revert { +// id: FuelCoreContractId::default(), +// ra: 0, +// pc: 0, +// is: 0, +// }, +// FuelCoreReceipt::Log { +// id: FuelCoreContractId::default(), +// ra: 0, +// rb: 0, +// rc: 0, +// rd: 0, +// pc: 0, +// is: 0, +// }, +// FuelCoreReceipt::LogData { +// id: FuelCoreContractId::default(), +// ra: 0, +// rb: 0, +// ptr: 0, +// len: 0, +// digest: FuelCoreBytes32::default(), +// pc: 0, +// is: 0, +// data: None, +// }, +// FuelCoreReceipt::Transfer { +// id: FuelCoreContractId::default(), +// to: FuelCoreContractId::default(), +// amount: 0, +// asset_id: FuelCoreAssetId::default(), +// pc: 0, +// is: 0, +// }, +// FuelCoreReceipt::TransferOut { +// id: FuelCoreContractId::default(), +// to: FuelCoreAddress::default(), +// amount: 0, +// asset_id: FuelCoreAssetId::default(), +// pc: 0, +// is: 0, +// }, +// FuelCoreReceipt::Mint { +// sub_id: FuelCoreBytes32::default(), +// contract_id: FuelCoreContractId::default(), +// val: 0, +// pc: 0, +// is: 0, +// }, +// FuelCoreReceipt::Burn { +// sub_id: FuelCoreBytes32::default(), +// contract_id: FuelCoreContractId::default(), +// val: 0, +// pc: 0, +// is: 0, +// }, +// ]; + +// let fuel_core = TestFuelCore::default(blocks_broadcaster.clone()) +// .with_receipts(receipts.to_vec()) +// .arc(); + +// let s3_client = Arc::new(S3Client::new_for_testing().await); +// let publisher = +// Publisher::new_for_testing(&nats_client().await, &s3_client, fuel_core) +// .await +// .unwrap(); + +// publish_block(&publisher, &blocks_broadcaster).await; + +// let mut receipts_stream = publisher +// .get_fuel_streams() +// .receipts() +// .catchup(10) +// .await +// .unwrap(); + +// let expected_receipts: Vec = +// receipts.iter().map(Into::into).collect(); +// let mut found_receipts = Vec::new(); + +// while let Some(Some(receipt)) = receipts_stream.next().await { +// found_receipts.push(receipt); +// } +// assert_eq!( +// found_receipts.len(), +// expected_receipts.len(), +// "Number of receipts doesn't match" +// ); + +// // Create sets of receipt identifiers +// let found_ids: std::collections::HashSet<_> = found_receipts +// .into_iter() +// .map(|r| match r { +// Receipt::Call(r) => r.id, +// Receipt::Return(r) => r.id, +// Receipt::ReturnData(r) => r.id, +// Receipt::Revert(r) => r.id, +// Receipt::Log(r) => r.id, +// Receipt::LogData(r) => r.id, +// Receipt::Transfer(r) => r.id, +// Receipt::TransferOut(r) => r.id, +// Receipt::Mint(r) => r.contract_id, +// Receipt::Burn(r) => r.contract_id, +// Receipt::Panic(r) => r.id, +// _ => unreachable!(), +// }) +// .collect(); + +// let expected_ids: std::collections::HashSet<_> = expected_receipts +// .into_iter() +// .map(|r| match r { +// Receipt::Call(r) => r.id, +// Receipt::Return(r) => r.id, +// Receipt::ReturnData(r) => r.id, +// Receipt::Revert(r) => r.id, +// Receipt::Log(r) => r.id, +// Receipt::LogData(r) => r.id, +// Receipt::Transfer(r) => r.id, +// Receipt::TransferOut(r) => r.id, +// Receipt::Mint(r) => r.contract_id, +// Receipt::Burn(r) => r.contract_id, +// Receipt::Panic(r) => r.id, +// _ => unreachable!(), +// }) +// .collect(); + +// assert_eq!( +// found_ids, expected_ids, +// "Published receipt IDs don't match expected IDs" +// ); + +// s3_client.cleanup_after_testing().await; +// } + +// #[tokio::test(flavor = "multi_thread")] +// async fn publishes_inputs() { +// let (blocks_broadcaster, _) = broadcast::channel::(1); +// let s3_client = Arc::new(S3Client::new_for_testing().await); +// let publisher = new_publisher(blocks_broadcaster.clone(), &s3_client).await; + +// publish_block(&publisher, &blocks_broadcaster).await; + +// assert!(publisher +// .get_fuel_streams() +// .inputs() +// .get_last_published(InputsByIdSubject::WILDCARD) +// .await +// .is_ok_and(|result| result.is_some())); +// s3_client.cleanup_after_testing().await; +// } + +// async fn new_publisher( +// broadcaster: Sender, +// s3_client: &Arc, +// ) -> Publisher { +// let fuel_core = TestFuelCore::default(broadcaster).arc(); +// Publisher::new_for_testing(&nats_client().await, s3_client, fuel_core) +// .await +// .unwrap() +// } + +// async fn publish_block( +// publisher: &Publisher, +// blocks_broadcaster: &Sender, +// ) { +// let shutdown_controller = start_publisher(publisher).await; +// send_block(blocks_broadcaster); +// stop_publisher(shutdown_controller).await; +// } + +// async fn start_publisher(publisher: &Publisher) -> ShutdownController { +// let (shutdown_controller, shutdown_token) = get_controller_and_token(); +// tokio::spawn({ +// let publisher = publisher.clone(); +// async move { +// publisher.run(shutdown_token, true).await.unwrap(); +// } +// }); +// wait_for_publisher_to_start().await; +// shutdown_controller +// } +// async fn stop_publisher(shutdown_controller: ShutdownController) { +// wait_for_publisher_to_process_block().await; + +// assert!(shutdown_controller.initiate_shutdown().is_ok()); +// } + +// async fn wait_for_publisher_to_start() { +// tokio::time::sleep(std::time::Duration::from_secs(1)).await; +// } +// async fn wait_for_publisher_to_process_block() { +// tokio::time::sleep(std::time::Duration::from_secs(1)).await; +// } + +// fn send_block(broadcaster: &Sender) { +// let block = create_test_block(); +// assert!(broadcaster.send(block).is_ok()); +// } +// fn create_test_block() -> ImporterResult { +// let mut block_entity = FuelCoreBlock::default(); +// let tx = FuelCoreTransaction::default_test_tx(); + +// *block_entity.transactions_mut() = vec![tx]; + +// ImporterResult { +// shared_result: Arc::new(FuelCoreImportResult { +// sealed_block: SealedBlock { +// entity: block_entity, +// ..Default::default() +// }, +// ..Default::default() +// }), +// changes: Arc::new(HashMap::new()), +// } +// } + +// async fn nats_client() -> NatsClient { +// let opts = NatsClientOpts::admin_opts().with_rdn_namespace(); +// NatsClient::connect(&opts) +// .await +// .expect("NATS connection failed") +// }