diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py index 43e8437a0c960..e30a338cb5f1f 100644 --- a/.github/scripts/generate-prdoc.py +++ b/.github/scripts/generate-prdoc.py @@ -39,17 +39,18 @@ def from_pr_number(n, audience, bump, force): def translate_audience(audience): aliases = { 'runtime_dev': 'Runtime Dev', - 'runtime_user': 'Runtime Operator', + 'runtime_user': 'Runtime User', 'node_dev': 'Node Dev', - 'node_user': 'Node User', + 'node_operator': 'Node Operator', + 'todo': 'Todo', } - if audience in aliases: - to = aliases[audience] - print(f"Translated audience '{audience}' to '{to}'") - audience = to + mapped = [aliases.get(a) for a in audience] + if len(mapped) == 1: + mapped = mapped[0] - return audience + print(f"Translated audience '{audience}' to '{mapped}'") + return mapped def create_prdoc(pr, audience, title, description, patch, bump, force): path = f"prdoc/pr_{pr}.prdoc" @@ -128,7 +129,7 @@ def yaml_multiline_string_presenter(dumper, data): # parse_args is also used by cmd/cmd.py # if pr_required is False, then --pr is optional, as it can be derived from the PR comment body def setup_parser(parser=None, pr_required=True): - allowed_audiences = ["runtime_dev", "runtime_user", "node_dev", "node_operator"] + allowed_audiences = ["runtime_dev", "runtime_user", "node_dev", "node_operator", "todo"] if parser is None: parser = argparse.ArgumentParser() parser.add_argument("--pr", type=int, required=pr_required, help="The PR number to generate the PrDoc for.") @@ -144,11 +145,7 @@ def main(args): print(f"Args: {args}, force: {args.force}") setup_yaml() try: - # Convert snake_case audience arguments to title case - mapped_audiences = [snake_to_title(a) for a in args.audience] - if len(mapped_audiences) == 1: - mapped_audiences = mapped_audiences[0] - from_pr_number(args.pr, mapped_audiences, args.bump, args.force) + from_pr_number(args.pr, args.audience, args.bump, args.force) return 0 except Exception as e: print(f"Error generating prdoc: {e}") diff --git a/.github/workflows/build-publish-images.yml b/.github/workflows/build-publish-images.yml index 01b2c0cbfd80c..afc4297afdb65 100644 --- a/.github/workflows/build-publish-images.yml +++ b/.github/workflows/build-publish-images.yml @@ -240,10 +240,6 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 - with: - # tldr: we need to checkout the branch HEAD explicitly because of our dynamic versioning approach while building the substrate binary - # see https://github.com/paritytech/ci_cd/issues/682#issuecomment-1340953589 - ref: ${{ github.head_ref || github.ref_name }} - name: build run: | mkdir -p ./artifacts/substrate/ diff --git a/.github/workflows/tests-evm.yml b/.github/workflows/tests-evm.yml new file mode 100644 index 0000000000000..2c98fa39d23a3 --- /dev/null +++ b/.github/workflows/tests-evm.yml @@ -0,0 +1,104 @@ +name: EVM test suite + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + preflight: + uses: ./.github/workflows/reusable-preflight.yml + + evm-test-suite: + needs: [preflight] + runs-on: ${{ needs.preflight.outputs.RUNNER }} + if: ${{ needs.preflight.outputs.changes_rust }} + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.IMAGE }} + env: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-C debug-assertions" + RUST_BACKTRACE: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + forklift cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-rpc + forklift cargo build --bin substrate-node + + - name: Checkout evm-tests + uses: actions/checkout@v4 + with: + repository: paritytech/evm-test-suite + ref: 7762a35a380023a800d213b8ff98f3fb45500661 + path: evm-test-suite + + - uses: actions/setup-node@v4 + with: + node-version: 22 + + - name: script + env: + # EVM tests don't work with batchSize 300 on self-hosted runners in docker container + BATCH_SIZE: 100 + run: | + echo "Change to the evm-test-suite directory" + cd evm-test-suite + echo "Download the resolc binary" + wget https://github.com/paritytech/revive/releases/download/v0.1.0-dev.9/resolc -q + chmod +x resolc + mv resolc /usr/local/bin + resolc --version + + echo "Check that binaries are in place" + export NODE_BIN_PATH=$(readlink -f ../target/debug/substrate-node) + export ETH_RPC_PATH=$(readlink -f ../target/production/eth-rpc) + export RESOLC_PATH=/usr/local/bin/resolc + echo $NODE_BIN_PATH $ETH_RPC_PATH $RESOLC_PATH + + echo "Install npm dependencies" + npm install + # cat matter-labs-tests/hardhat.config.ts | grep batchSize + + echo "Installing solc" + wget https://github.com/ethereum/solidity/releases/download/v0.8.28/solc-static-linux -q + chmod +x solc-static-linux + mv solc-static-linux /usr/local/bin/solc + echo "Run the tests" + echo "bash init.sh --kitchensink -- --matter-labs -- $NODE_BIN_PATH $ETH_RPC_PATH $RESOLC_PATH" + bash init.sh --kitchensink -- --matter-labs -- $NODE_BIN_PATH $ETH_RPC_PATH $RESOLC_PATH + + - name: Collect tests results + if: always() + uses: actions/upload-artifact@v4 + with: + name: evm-test-suite-${{ github.sha }} + path: evm-test-suite/test-logs/matter-labs-tests.log + + confirm-required-test-evm-jobs-passed: + runs-on: ubuntu-latest + name: All test misc tests passed + # If any new job gets added, be sure to add it to this array + needs: + - evm-test-suite + if: always() && !cancelled() + steps: + - run: | + tee resultfile <<< '${{ toJSON(needs) }}' + FAILURES=$(cat resultfile | grep '"result": "failure"' | wc -l) + if [ $FAILURES -gt 0 ]; then + echo "### At least one required job failed ❌" >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo '### Good job! All the required jobs passed 🚀' >> $GITHUB_STEP_SUMMARY + fi diff --git a/.github/workflows/zombienet-reusable-preflight.yml b/.github/workflows/zombienet-reusable-preflight.yml index f5ac43741ad62..decbe11a4609c 100644 --- a/.github/workflows/zombienet-reusable-preflight.yml +++ b/.github/workflows/zombienet-reusable-preflight.yml @@ -65,6 +65,11 @@ on: SOURCE_REF_SLUG: value: ${{ jobs.preflight.outputs.SOURCE_REF_SLUG }} + BUILD_RUN_ID: + value: ${{ jobs.wait_build_images.outputs.BUILD_RUN_ID }} + description: | + Id of the build run, needed to download the artifacts. + # Zombie vars PUSHGATEWAY_URL: value: ${{ jobs.preflight.outputs.PUSHGATEWAY_URL }} @@ -216,10 +221,13 @@ jobs: needs: [ci-env] runs-on: ubuntu-latest timeout-minutes: 30 + outputs: + BUILD_RUN_ID: ${{ steps.wait_build.outputs.BUILD_RUN_ID }} steps: - name: Checkout uses: actions/checkout@v4 - name: Wait until "Build and push images" workflow is done + id: wait_build env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | @@ -239,4 +247,15 @@ jobs: fi sleep 10 done - echo "CI workflow is done." + + #check if the build succeeded + RUN_INFO=($(gh run ls -c $SHA -w "Build and push images" --json name,conclusion,databaseId --jq '.[] | select(.name == "Build and push images") | .conclusion, .databaseId')) + CONCLUSION=${RUN_INFO[@]:0:1} + BUILD_RUN_ID=${RUN_INFO[@]:1:1} + if [[ $CONCLUSION == "success" ]]; then + echo "CI workflow succeeded. (build run_id: ${BUILD_RUN_ID})" + echo "BUILD_RUN_ID=${BUILD_RUN_ID}" >> $GITHUB_OUTPUT + else + echo "::warning:: CI workflow ('Build and push images') fails with conclusion: $CONCLUSION" + exit 1 + fi; diff --git a/.github/workflows/zombienet_cumulus.yml b/.github/workflows/zombienet_cumulus.yml new file mode 100644 index 0000000000000..c2231060c9aa1 --- /dev/null +++ b/.github/workflows/zombienet_cumulus.yml @@ -0,0 +1,315 @@ +name: Zombienet Cumulus + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + RUN_IN_CONTAINER: 1 + FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR: 1 + LOCAL_DIR: "./cumulus/zombienet/tests" + GHA_CLUSTER_SERVER_ADDR: "https://kubernetes.default:443" + +# only run if we have changes in [subtrate, cumulus, polkadot] directories or this workflow. +jobs: + preflight: + uses: ./.github/workflows/zombienet-reusable-preflight.yml + + zombienet-cumulus-0001-sync_blocks_from_tip_without_connected_collator: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0001-sync_blocks_from_tip_without_connected_collator.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* + + zombienet-cumulus-0002-pov_recovery: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0002-pov_recovery.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* + + zombienet-cumulus-0003-full_node_catching_up: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0003-full_node_catching_up.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* + + zombienet-cumulus-0004-runtime_upgrade: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - uses: actions/download-artifact@v4.1.8 + with: + name: build-test-parachain-${{ needs.preflight.outputs.SOURCE_REF_SLUG }} + github-token: ${{ secrets.GITHUB_TOKEN }} + run-id: ${{ needs.preflight.outputs.BUILD_RUN_ID }} + + - name: tar + run: tar -xvf artifacts.tar + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + ls -ltr * + cp ./artifacts/zombienet/wasm_binary_spec_version_incremented.rs.compact.compressed.wasm /tmp/ + ls /tmp + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0004-runtime_upgrade.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* + + + zombienet-cumulus-0005-migrate_solo_to_para: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0005-migrate_solo_to_para.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* + + zombienet-cumulus-0006-rpc_collator_builds_blocks: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0006-rpc_collator_builds_blocks.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* + + zombienet-cumulus-0007-full_node_warp_sync: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0007-full_node_warp_sync.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* + + zombienet-cumulus-0008-elastic_authoring: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0008-elastic_authoring.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* + + zombienet-cumulus-0009-elastic_pov_recovery: + needs: [preflight] + if: ${{ needs.preflight.outputs.changes_substrate || needs.preflight.outputs.changes_cumulus || needs.preflight.outputs.changes_polkadot }} + runs-on: ${{ needs.preflight.outputs.ZOMBIENET_RUNNER }} # NOTE: should be zombienet-arc-runner (without quotes) + timeout-minutes: 60 + container: + image: ${{ needs.preflight.outputs.ZOMBIENET_IMAGE }} + env: + RELAY_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/polkadot-debug:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + COL_IMAGE: "${{ needs.preflight.outputs.TEMP_IMAGES_BASE }}/test-parachain:${{ needs.preflight.outputs.DOCKER_IMAGES_VERSION }}" + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + echo "RELAY_IMAGE: $RELAY_IMAGE" + echo "COL_IMAGE: $COL_IMAGE" + export DEBUG=${{ needs.preflight.outputs.DEBUG }} + /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh \ + --local-dir="$(pwd)/$LOCAL_DIR" \ + --concurrency=1 \ + --test="0009-elastic_pov_recovery.zndsl" + + - name: upload logs + uses: actions/upload-artifact@v4 + with: + name: zombienet-logs-${{ github.job }}-${{ github.sha }} + path: | + /tmp/zombie*/logs/* diff --git a/.gitlab/pipeline/zombienet.yml b/.gitlab/pipeline/zombienet.yml index c48bca8af48be..227787173f8de 100644 --- a/.gitlab/pipeline/zombienet.yml +++ b/.gitlab/pipeline/zombienet.yml @@ -1,7 +1,7 @@ .zombienet-refs: extends: .build-refs variables: - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.119" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.120" PUSHGATEWAY_URL: "http://zombienet-prometheus-pushgateway.managed-monitoring:9091/metrics/job/zombie-metrics" DEBUG: "zombie,zombie::network-node,zombie::kube::client::logs" ZOMBIE_PROVIDER: "k8s" diff --git a/.gitlab/pipeline/zombienet/polkadot.yml b/.gitlab/pipeline/zombienet/polkadot.yml index 4d8d4947daa5c..1721a9b9c3333 100644 --- a/.gitlab/pipeline/zombienet/polkadot.yml +++ b/.gitlab/pipeline/zombienet/polkadot.yml @@ -112,14 +112,6 @@ zombienet-polkadot-functional-0004-parachains-disputes-garbage-candidate: --local-dir="${LOCAL_DIR}/functional" --test="0004-parachains-garbage-candidate.zndsl" -.zombienet-polkadot-functional-0005-parachains-disputes-past-session: - extends: - - .zombienet-polkadot-common - script: - - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh - --local-dir="${LOCAL_DIR}/functional" - --test="0005-parachains-disputes-past-session.zndsl" - zombienet-polkadot-functional-0006-parachains-max-tranche0: extends: - .zombienet-polkadot-common @@ -188,7 +180,7 @@ zombienet-polkadot-functional-0015-coretime-shared-core: extends: - .zombienet-polkadot-common before_script: - - !reference [ .zombienet-polkadot-common, before_script ] + - !reference [.zombienet-polkadot-common, before_script] - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh @@ -207,7 +199,7 @@ zombienet-polkadot-functional-0018-shared-core-idle-parachain: extends: - .zombienet-polkadot-common before_script: - - !reference [ .zombienet-polkadot-common, before_script ] + - !reference [.zombienet-polkadot-common, before_script] - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh @@ -218,7 +210,7 @@ zombienet-polkadot-functional-0019-coretime-collation-fetching-fairness: extends: - .zombienet-polkadot-common before_script: - - !reference [ .zombienet-polkadot-common, before_script ] + - !reference [.zombienet-polkadot-common, before_script] - cp --remove-destination ${LOCAL_DIR}/assign-core.js ${LOCAL_DIR}/functional script: - /home/nonroot/zombie-net/scripts/ci/run-test-local-env-manager.sh @@ -354,7 +346,7 @@ zombienet-polkadot-malus-0001-dispute-valid: - job: build-polkadot-zombienet-tests artifacts: true before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] + - !reference [".zombienet-polkadot-common", "before_script"] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" script: # we want to use `--no-capture` in zombienet tests. @@ -369,7 +361,7 @@ zombienet-polkadot-elastic-scaling-slot-based-3cores: - job: build-polkadot-zombienet-tests artifacts: true before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] + - !reference [".zombienet-polkadot-common", "before_script"] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export CUMULUS_IMAGE="docker.io/paritypr/test-parachain:${PIPELINE_IMAGE_TAG}" - export X_INFRA_INSTANCE=spot # use spot by default @@ -386,7 +378,7 @@ zombienet-polkadot-elastic-scaling-slot-based-12cores: - job: build-polkadot-zombienet-tests artifacts: true before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] + - !reference [".zombienet-polkadot-common", "before_script"] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export CUMULUS_IMAGE="docker.io/paritypr/test-parachain:${PIPELINE_IMAGE_TAG}" - export X_INFRA_INSTANCE=spot # use spot by default @@ -403,7 +395,7 @@ zombienet-polkadot-elastic-scaling-doesnt-break-parachains: - job: build-polkadot-zombienet-tests artifacts: true before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] + - !reference [".zombienet-polkadot-common", "before_script"] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export X_INFRA_INSTANCE=spot # use spot by default variables: @@ -421,7 +413,7 @@ zombienet-polkadot-elastic-scaling-basic-3cores: - job: build-polkadot-zombienet-tests artifacts: true before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] + - !reference [".zombienet-polkadot-common", "before_script"] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export CUMULUS_IMAGE="${COL_IMAGE}" - export X_INFRA_INSTANCE=spot # use spot by default @@ -438,7 +430,7 @@ zombienet-polkadot-functional-sync-backing: - job: build-polkadot-zombienet-tests artifacts: true before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] + - !reference [".zombienet-polkadot-common", "before_script"] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" # Hardcoded to an old polkadot-parachain image, pre async backing. - export CUMULUS_IMAGE="docker.io/paritypr/polkadot-parachain-debug:master-99623e62" @@ -456,7 +448,7 @@ zombienet-polkadot-functional-async-backing-6-seconds-rate: - job: build-polkadot-zombienet-tests artifacts: true before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] + - !reference [".zombienet-polkadot-common", "before_script"] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export X_INFRA_INSTANCE=spot # use spot by default script: @@ -472,7 +464,7 @@ zombienet-polkadot-functional-duplicate-collations: - job: build-polkadot-zombienet-tests artifacts: true before_script: - - !reference [ ".zombienet-polkadot-common", "before_script" ] + - !reference [".zombienet-polkadot-common", "before_script"] - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" - export X_INFRA_INSTANCE=spot # use spot by default script: @@ -480,3 +472,19 @@ zombienet-polkadot-functional-duplicate-collations: - unset NEXTEST_FAILURE_OUTPUT - unset NEXTEST_SUCCESS_OUTPUT - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- functional::duplicate_collations::duplicate_collations_test + +zombienet-polkadot-disputes-slashing: + extends: + - .zombienet-polkadot-common + needs: + - job: build-polkadot-zombienet-tests + artifacts: true + before_script: + - !reference [".zombienet-polkadot-common", "before_script"] + - export POLKADOT_IMAGE="${ZOMBIENET_INTEGRATION_TEST_IMAGE}" + - export X_INFRA_INSTANCE=spot # use spot by default + script: + # we want to use `--no-capture` in zombienet tests. + - unset NEXTEST_FAILURE_OUTPUT + - unset NEXTEST_SUCCESS_OUTPUT + - cargo nextest run --archive-file ./artifacts/polkadot-zombienet-tests.tar.zst --no-capture -- disabling::slashing diff --git a/Cargo.lock b/Cargo.lock index 38857f568fff3..4882caf032bf9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,9 +236,9 @@ dependencies = [ "dunce", "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "syn-solidity 0.4.2", "tiny-keccak", ] @@ -252,9 +252,9 @@ dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", "proc-macro-error2", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -268,9 +268,9 @@ dependencies = [ "heck 0.5.0", "indexmap 2.7.0", "proc-macro-error2", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "syn-solidity 0.8.15", "tiny-keccak", ] @@ -284,9 +284,9 @@ dependencies = [ "const-hex", "dunce", "heck 0.5.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "syn-solidity 0.8.15", ] @@ -433,9 +433,9 @@ dependencies = [ "include_dir", "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -633,7 +633,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" dependencies = [ - "quote 1.0.37", + "quote 1.0.38", "syn 1.0.109", ] @@ -643,7 +643,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.37", + "quote 1.0.38", "syn 1.0.109", ] @@ -655,7 +655,7 @@ checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" dependencies = [ "num-bigint", "num-traits", - "quote 1.0.37", + "quote 1.0.38", "syn 1.0.109", ] @@ -667,8 +667,8 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -769,8 +769,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -863,9 +863,9 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "synstructure 0.13.1", ] @@ -875,9 +875,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -1288,7 +1288,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ - "quote 1.0.37", + "quote 1.0.38", "syn 1.0.109", ] @@ -1549,9 +1549,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -1566,9 +1566,9 @@ version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -1647,8 +1647,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -1801,12 +1801,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -2041,9 +2041,9 @@ dependencies = [ [[package]] name = "bounded-collections" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d077619e9c237a5d1875166f5e8033e8f6bff0c96f8caf81e1c2d7738c431bf" +checksum = "32ed0a820ed50891d36358e997d27741a6142e382242df40ff01c89bcdcc7a2b" dependencies = [ "log", "parity-scale-codec", @@ -3169,7 +3169,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "846501f4575cd66766a40bb7ab6d8e960adc7eb49f753c8232bd8e0e09cf6ca2" dependencies = [ - "quote 1.0.37", + "quote 1.0.38", "syn 1.0.109", ] @@ -3531,8 +3531,8 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -3543,9 +3543,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -3584,9 +3584,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb844bd05be34d91eb67101329aeba9d3337094c04fd8507d821db7ebb488eaf" dependencies = [ "proc-macro-error2", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -3762,8 +3762,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b" dependencies = [ "nom", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -3846,8 +3846,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a54b9c40054eb8999c5d1d36fdc90e4e5f7ff0d1d9621706f360b3cbc8beb828" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -3858,8 +3858,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb5437e327e861081c91270becff184859f706e3e50f5301a9d4dc8eb50752c3" dependencies = [ "convert_case 0.6.0", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -3926,6 +3926,26 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "const_format" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2 1.0.93", + "quote 1.0.38", + "unicode-xid 0.2.4", +] + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -5025,9 +5045,9 @@ name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -5037,9 +5057,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "befbaf3a1ce23ac8476481484fef5f4d500cbd15b4dad6380ce1d28134b0c1f7" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -5811,9 +5831,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -5850,10 +5870,10 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "scratch", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -5868,9 +5888,9 @@ version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -5891,10 +5911,10 @@ checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "strsim 0.11.1", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -5904,8 +5924,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", - "quote 1.0.37", - "syn 2.0.87", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -5996,8 +6016,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -6007,9 +6027,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6018,9 +6038,9 @@ version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62d671cc41a825ebabc75757b62d3d168c577f9149b2d49ece1dad1f72119d25" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6029,9 +6049,9 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6041,8 +6061,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case 0.4.0", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "rustc_version 0.4.0", "syn 1.0.109", ] @@ -6062,9 +6082,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "unicode-xid 0.2.4", ] @@ -6167,9 +6187,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6227,10 +6247,10 @@ dependencies = [ "common-path", "derive-syn-parse", "once_cell", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "regex", - "syn 2.0.87", + "syn 2.0.98", "termcolor", "toml 0.8.19", "walkdir", @@ -6291,8 +6311,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -6456,9 +6476,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6476,9 +6496,9 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6487,9 +6507,9 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6665,7 +6685,7 @@ checksum = "8c321610643004cf908ec0f5f2aa0d8f1f8e14b540562a2887a1111ff1ecbf7b" dependencies = [ "crunchy", "fixed-hash", - "impl-codec 0.7.0", + "impl-codec 0.7.1", "impl-rlp 0.4.0", "impl-serde 0.5.0", "scale-info", @@ -6696,7 +6716,7 @@ checksum = "1ab15ed80916029f878e0267c3a9f92b67df55e79af370bf66199059ae2b4ee3" dependencies = [ "ethbloom 0.14.1", "fixed-hash", - "impl-codec 0.7.0", + "impl-codec 0.7.1", "impl-rlp 0.4.0", "impl-serde 0.5.0", "primitive-types 0.13.1", @@ -6750,9 +6770,9 @@ dependencies = [ "file-guard", "fs-err", "prettyplease", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6833,9 +6853,9 @@ dependencies = [ "expander", "indexmap 2.7.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -6927,9 +6947,9 @@ dependencies = [ [[package]] name = "finality-grandpa" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36530797b9bf31cd4ff126dcfee8170f86b00cfdcea3269d73133cc0415945c3" +checksum = "b4f8f43dc520133541781ec03a8cab158ae8b7f7169cdf22e9050aa6cf0fbdfc" dependencies = [ "either", "futures", @@ -7265,11 +7285,11 @@ dependencies = [ "frame-support 28.0.0", "parity-scale-codec", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "scale-info", "sp-arithmetic 23.0.0", - "syn 2.0.87", + "syn 2.0.98", "trybuild", ] @@ -7280,9 +7300,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8156f209055d352994ecd49e19658c6b469d7c6de923bd79868957d0dcfb6f71" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -7600,8 +7620,8 @@ dependencies = [ "parity-scale-codec", "pretty_assertions", "proc-macro-warning", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "regex", "scale-info", "sp-core 28.0.0", @@ -7610,7 +7630,7 @@ dependencies = [ "sp-metadata-ir 0.6.0", "sp-runtime 31.0.1", "static_assertions", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -7627,10 +7647,10 @@ dependencies = [ "itertools 0.11.0", "macro_magic", "proc-macro-warning", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -7639,9 +7659,9 @@ version = "10.0.0" dependencies = [ "frame-support-procedural-tools-derive 11.0.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -7652,18 +7672,18 @@ checksum = "bead15a320be1764cdd50458c4cfacb23e0cee65f64f500f8e34136a94c7eeca" dependencies = [ "frame-support-procedural-tools-derive 12.0.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "frame-support-procedural-tools-derive" version = "11.0.0" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -7672,9 +7692,9 @@ version = "12.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed971c6435503a099bdac99fe4c5bea08981709e5b5a0a8535a1856f48561191" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -7995,9 +8015,9 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -8707,7 +8727,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.7", + "socket2 0.4.9", "tokio", "tower-service", "tracing", @@ -8923,9 +8943,9 @@ dependencies = [ [[package]] name = "impl-codec" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67aa010c1e3da95bf151bd8b4c059b2ed7e75387cdb969b4f8f2723a43f9941" +checksum = "2d40b9d5e17727407e55028eafc22b2dc68781786e6d7eb8a21103f5058e3a14" dependencies = [ "parity-scale-codec", ] @@ -8990,13 +9010,13 @@ dependencies = [ [[package]] name = "impl-trait-for-tuples" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" +checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 1.0.109", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -9014,8 +9034,8 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", ] [[package]] @@ -9438,9 +9458,9 @@ checksum = "c06c01ae0007548e73412c08e2285ffe5d723195bf268bce67b1b77c3bb2a14d" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -10168,9 +10188,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206e0aa0ebe004d778d79fb0966aa0de996c19894e2c0605ba2f8524dd4443d8" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -10585,8 +10605,8 @@ checksum = "cc33f9f0351468d26fbc53d9ce00a096c8522ecb42f19b50f34f2c422f76d21d" dependencies = [ "macro_magic_core", "macro_magic_macros", - "quote 1.0.37", - "syn 2.0.87", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -10598,9 +10618,9 @@ dependencies = [ "const-random", "derive-syn-parse", "macro_magic_core_macros", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -10609,9 +10629,9 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -10621,8 +10641,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", - "quote 1.0.37", - "syn 2.0.87", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -10949,8 +10969,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -10961,9 +10981,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" dependencies = [ "cfg-if", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -11099,8 +11119,8 @@ checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro-error", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", "synstructure 0.12.6", ] @@ -11147,8 +11167,8 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -11572,9 +11592,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -11739,9 +11759,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -11796,8 +11816,8 @@ dependencies = [ "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -13096,9 +13116,9 @@ dependencies = [ name = "pallet-contracts-proc-macro" version = "18.0.0" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -13107,9 +13127,9 @@ version = "23.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94226cbd48516b7c310eb5dae8d50798c1ce73a7421dc0977c55b7fc2237a283" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -14803,6 +14823,7 @@ dependencies = [ name = "pallet-revive" version = "0.1.0" dependencies = [ + "alloy-core", "array-bytes", "assert_matches", "derive_more 0.99.17", @@ -14894,6 +14915,7 @@ dependencies = [ "pallet-revive 0.1.0", "pallet-revive-fixtures 0.1.0", "parity-scale-codec", + "pretty_assertions", "rlp 0.6.1", "sc-cli", "sc-rpc", @@ -14902,6 +14924,7 @@ dependencies = [ "sp-arithmetic 23.0.0", "sp-core 28.0.0", "sp-crypto-hashing 0.1.0", + "sp-runtime 31.0.1", "sp-weights 27.0.0", "sqlx", "static_init", @@ -15009,9 +15032,9 @@ dependencies = [ name = "pallet-revive-proc-macro" version = "0.1.0" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -15020,9 +15043,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc16d1f7cee6a1ee6e8cd710e16230d59fb4935316c1704cf770e4d2335f8d4" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -15464,10 +15487,10 @@ name = "pallet-staking-reward-curve" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "sp-runtime 31.0.1", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -16405,7 +16428,7 @@ checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", "rand", - "rand_core 0.6.4", + "rand_core 0.5.1", "serde", "unicode-normalization", ] @@ -16438,29 +16461,31 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" dependencies = [ "arrayvec 0.7.4", "bitvec", "byte-slice-cast", "bytes", + "const_format", "impl-trait-for-tuples", "parity-scale-codec-derive", + "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 1.0.109", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -16487,7 +16512,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.93", "syn 1.0.109", "synstructure 0.12.6", ] @@ -16936,9 +16961,9 @@ checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -16977,9 +17002,9 @@ version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -19867,6 +19892,7 @@ dependencies = [ "subxt", "subxt-signer", "tokio", + "tokio-util", "zombienet-sdk", ] @@ -20040,9 +20066,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ "polkavm-common 0.9.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20052,9 +20078,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7855353a5a783dd5d09e3b915474bddf66575f5a3cf45dec8d1c5e051ba320dc" dependencies = [ "polkavm-common 0.10.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20064,9 +20090,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12d2840cc62a0550156b1676fed8392271ddf2fab4a00661db56231424674624" dependencies = [ "polkavm-common 0.18.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20076,9 +20102,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cffca9d51b21153395a192b65698457687bc51daa41026629895542ccaa65c2" dependencies = [ "polkavm-common 0.19.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20088,7 +20114,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl 0.9.0", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -20098,7 +20124,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9324fe036de37c17829af233b46ef6b5562d4a0c09bb7fdb9f8378856dee30cf" dependencies = [ "polkavm-derive-impl 0.10.0", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -20108,7 +20134,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c16669ddc7433e34c1007d31080b80901e3e8e523cb9d4b441c3910cf9294b" dependencies = [ "polkavm-derive-impl 0.18.0", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -20118,7 +20144,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc0dc0cf2e8f4d30874131eccfa36bdabd4a52cfb79c15f8630508abaf06a2a6" dependencies = [ "polkavm-derive-impl 0.19.0", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -20378,8 +20404,8 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ - "proc-macro2 1.0.86", - "syn 2.0.87", + "proc-macro2 1.0.93", + "syn 2.0.98", ] [[package]] @@ -20404,7 +20430,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" dependencies = [ "fixed-hash", - "impl-codec 0.7.0", + "impl-codec 0.7.1", "impl-num-traits 0.2.0", "impl-rlp 0.4.0", "impl-serde 0.5.0", @@ -20454,8 +20480,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", "version_check", ] @@ -20466,8 +20492,8 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "version_check", ] @@ -20477,8 +20503,8 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", ] [[package]] @@ -20488,9 +20514,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ "proc-macro-error-attr2", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20505,9 +20531,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20521,9 +20547,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -20586,9 +20612,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20661,7 +20687,7 @@ checksum = "f8650aabb6c35b860610e9cff5dc1af886c9e25073b7b1712a68972af4281302" dependencies = [ "bytes", "heck 0.5.0", - "itertools 0.13.0", + "itertools 0.12.1", "log", "multimap", "once_cell", @@ -20670,7 +20696,7 @@ dependencies = [ "prost 0.13.2", "prost-types", "regex", - "syn 2.0.87", + "syn 2.0.98", "tempfile", ] @@ -20682,8 +20708,8 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -20695,9 +20721,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20707,10 +20733,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", - "itertools 0.13.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "itertools 0.12.1", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -20896,11 +20922,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.93", ] [[package]] @@ -21118,9 +21144,9 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -21770,12 +21796,12 @@ checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ "cfg-if", "glob", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.87", + "syn 2.0.98", "unicode-ident", ] @@ -22333,9 +22359,9 @@ name = "sc-chain-spec-derive" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -23276,6 +23302,7 @@ dependencies = [ name = "sc-network-test" version = "0.8.0" dependencies = [ + "async-channel 1.9.0", "async-trait", "futures", "futures-timer", @@ -23782,9 +23809,9 @@ name = "sc-tracing-proc-macro" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -23903,9 +23930,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ed9401effa946b493f9f84dc03714cca98119b230497df6f3df6b84a2b03648" dependencies = [ "darling", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -23931,9 +23958,9 @@ checksum = "102fbc6236de6c53906c0b262f12c7aa69c2bdc604862c12728f5f4d370bc137" dependencies = [ "darling", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -23957,9 +23984,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -23978,10 +24005,10 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc4c70c7fea2eef1740f0081d3fe385d8bee1eef11e9272d3bec7dc8e5438e0" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "scale-info", - "syn 2.0.87", + "syn 2.0.98", "thiserror", ] @@ -24032,8 +24059,8 @@ version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "serde_derive_internals", "syn 1.0.109", ] @@ -24282,9 +24309,9 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.214" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -24319,13 +24346,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -24334,8 +24361,8 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -25712,9 +25739,9 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -25727,9 +25754,9 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -25742,9 +25769,9 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -26366,6 +26393,53 @@ dependencies = [ "zeroize", ] +[[package]] +name = "sp-core" +version = "35.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4532774405a712a366a98080cbb4daa28c38ddff0ec595902ad6ee6a78a809f8" +dependencies = [ + "array-bytes", + "bitflags 1.3.2", + "blake2 0.10.6", + "bounded-collections", + "bs58", + "dyn-clonable", + "ed25519-zebra 4.0.3", + "futures", + "hash-db", + "hash256-std-hasher", + "impl-serde 0.5.0", + "itertools 0.11.0", + "k256", + "libsecp256k1", + "log", + "merlin", + "parity-bip39", + "parity-scale-codec", + "parking_lot 0.12.3", + "paste", + "primitive-types 0.13.1", + "rand", + "scale-info", + "schnorrkel 0.11.4", + "secp256k1 0.28.2", + "secrecy 0.8.0", + "serde", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.30.0", + "sp-runtime-interface 29.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 22.0.0", + "ss58-registry", + "substrate-bip39 0.6.0", + "thiserror", + "tracing", + "w3f-bls", + "zeroize", +] + [[package]] name = "sp-core-fuzz" version = "0.0.0" @@ -26491,9 +26565,9 @@ dependencies = [ name = "sp-crypto-hashing-proc-macro" version = "0.1.0" dependencies = [ - "quote 1.0.37", + "quote 1.0.38", "sp-crypto-hashing 0.1.0", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -26502,9 +26576,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b85d0f1f1e44bd8617eb2a48203ee854981229e3e79e6f468c7175d5fd37489b" dependencies = [ - "quote 1.0.37", + "quote 1.0.38", "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -26520,18 +26594,18 @@ name = "sp-debug-derive" version = "8.0.0" source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] name = "sp-debug-derive" version = "14.0.0" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -26540,9 +26614,9 @@ version = "14.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48d09fa0a5f7299fb81ee25ae3853d26200f7a348148aed6de76be905c007dbe" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -26587,6 +26661,17 @@ dependencies = [ "sp-storage 21.0.0", ] +[[package]] +name = "sp-externalities" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30cbf059dce180a8bf8b6c8b08b6290fa3d1c7f069a60f1df038ab5dd5fc0ba6" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-storage 22.0.0", +] + [[package]] name = "sp-genesis-builder" version = "0.8.0" @@ -27189,6 +27274,26 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "sp-runtime-interface" +version = "29.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51e83d940449837a8b2a01b4d877dd22d896fd14d3d3ade875787982da994a33" +dependencies = [ + "bytes", + "impl-trait-for-tuples", + "parity-scale-codec", + "polkavm-derive 0.9.1", + "primitive-types 0.13.1", + "sp-externalities 0.30.0", + "sp-runtime-interface-proc-macro 18.0.0", + "sp-std 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 22.0.0", + "sp-tracing 17.0.1", + "sp-wasm-interface 21.0.1", + "static_assertions", +] + [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" @@ -27196,9 +27301,9 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "Inflector", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -27208,9 +27313,9 @@ dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -27222,9 +27327,9 @@ dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -27519,6 +27624,19 @@ dependencies = [ "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "sp-storage" +version = "22.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee3b70ca340e41cde9d2e069d354508a6e37a6573d66f7cc38f11549002f64ec" +dependencies = [ + "impl-serde 0.5.0", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive 14.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "sp-test-primitives" version = "2.0.0" @@ -27791,10 +27909,10 @@ version = "13.0.0" dependencies = [ "parity-scale-codec", "proc-macro-warning", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "sp-version 29.0.0", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -27804,9 +27922,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5aee8f6730641a65fcf0c8f9b1e448af4b3bb083d08058b47528188bccc7b7a7" dependencies = [ "parity-scale-codec", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -27979,11 +28097,11 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "sqlx-core", "sqlx-macros-core", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -27997,8 +28115,8 @@ dependencies = [ "heck 0.5.0", "hex", "once_cell", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "serde", "serde_json", "sha2 0.10.8", @@ -28006,7 +28124,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.87", + "syn 2.0.98", "tempfile", "tokio", "url", @@ -28123,8 +28241,8 @@ checksum = "5e6915280e2d0db8911e5032a5c275571af6bdded2916abd691a659be25d3439" dependencies = [ "Inflector", "num-format", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "serde", "serde_json", "unicode-xid 0.2.4", @@ -28148,8 +28266,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -28424,8 +28542,8 @@ checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ "cfg_aliases 0.1.1", "memchr", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -28488,8 +28606,8 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -28524,8 +28642,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "rustversion", "syn 1.0.109", ] @@ -28537,10 +28655,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "rustversion", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -28550,10 +28668,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "rustversion", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -28993,9 +29111,9 @@ dependencies = [ [[package]] name = "subxt" -version = "0.38.0" +version = "0.38.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c53029d133e4e0cb7933f1fe06f2c68804b956de9bb8fa930ffca44e9e5e4230" +checksum = "1c17d7ec2359d33133b63c97e28c8b7cd3f0a5bc6ce567ae3aef9d9e85be3433" dependencies = [ "async-trait", "derive-where", @@ -29037,12 +29155,12 @@ checksum = "3cfcfb7d9589f3df0ac87c4988661cf3fb370761fcb19f2fd33104cc59daf22a" dependencies = [ "heck 0.5.0", "parity-scale-codec", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.87", + "syn 2.0.98", "thiserror", ] @@ -29101,11 +29219,11 @@ dependencies = [ "darling", "parity-scale-codec", "proc-macro-error2", - "quote 1.0.37", + "quote 1.0.38", "scale-typegen", "subxt-codegen", "subxt-utils-fetchmetadata", - "syn 2.0.87", + "syn 2.0.98", ] [[package]] @@ -29270,19 +29388,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.87" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "unicode-ident", ] @@ -29293,9 +29411,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b837ef12ab88835251726eb12237655e61ec8dc8a280085d1961cdc3dfd047" dependencies = [ "paste", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -29305,9 +29423,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219389c1ebe89f8333df8bdfb871f6631c552ff399c23cac02480b6088aad8f0" dependencies = [ "paste", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -29331,8 +29449,8 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", "unicode-xid 0.2.4", ] @@ -29343,9 +29461,9 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -29473,9 +29591,9 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -29654,8 +29772,8 @@ version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "syn 1.0.109", ] @@ -29665,9 +29783,9 @@ version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -29827,9 +29945,9 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -30078,9 +30196,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -30120,9 +30238,9 @@ dependencies = [ "assert_matches", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -30705,9 +30823,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "wasm-bindgen-shared", ] @@ -30729,7 +30847,7 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ - "quote 1.0.37", + "quote 1.0.38", "wasm-bindgen-macro-support", ] @@ -30739,9 +30857,9 @@ version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -31907,10 +32025,10 @@ version = "7.0.0" dependencies = [ "Inflector", "frame-support 28.0.0", - "proc-macro2 1.0.86", - "quote 1.0.37", + "proc-macro2 1.0.93", + "quote 1.0.38", "staging-xcm 7.0.0", - "syn 2.0.87", + "syn 2.0.98", "trybuild", ] @@ -31921,9 +32039,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87fb4f14094d65c500a59bcf540cf42b99ee82c706edd6226a92e769ad60563e" dependencies = [ "Inflector", - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -32145,9 +32263,9 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -32165,9 +32283,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.86", - "quote 1.0.37", - "syn 2.0.87", + "proc-macro2 1.0.93", + "quote 1.0.38", + "syn 2.0.98", ] [[package]] @@ -32188,9 +32306,9 @@ dependencies = [ [[package]] name = "zombienet-configuration" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ced2fca1322821431f03d06dcf2ea74d3a7369760b6c587b372de6eada3ce43" +checksum = "03caa9f916aedb12e8443521c87604fe54fbde163a58018780108d86761310dc" dependencies = [ "anyhow", "lazy_static", @@ -32202,15 +32320,16 @@ dependencies = [ "thiserror", "tokio", "toml 0.8.19", + "tracing", "url", "zombienet-support", ] [[package]] name = "zombienet-orchestrator" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ecd17133c3129547b6472591b5e58d4aee1fc63c965a3418fd56d33a8a4e82" +checksum = "8330f46e4584a306ed567702307a697b8a2771681233548263f5bc3f639fcdec" dependencies = [ "anyhow", "async-trait", @@ -32226,7 +32345,7 @@ dependencies = [ "serde", "serde_json", "sha2 0.10.8", - "sp-core 34.0.0", + "sp-core 35.0.0", "subxt", "subxt-signer", "thiserror", @@ -32241,9 +32360,9 @@ dependencies = [ [[package]] name = "zombienet-prom-metrics-parser" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23702db0819a050c8a0130a769b105695137020a64207b4597aa021f06924552" +checksum = "8a52a796a1521cf6420cc6384eac9ef25a146d453b568969774af643f3ecdc97" dependencies = [ "pest", "pest_derive", @@ -32252,9 +32371,9 @@ dependencies = [ [[package]] name = "zombienet-provider" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e903843c62cd811e7730ccc618dcd14444d20e8aadfcd7d7561c7b47d8f984" +checksum = "7121ed12016baf318afdcaa96e59d134a3299f40ad5cb67fa6e8ae561db97d26" dependencies = [ "anyhow", "async-trait", @@ -32283,9 +32402,9 @@ dependencies = [ [[package]] name = "zombienet-sdk" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e457b12c8fdc7003c12dd56855da09812ac11dd232e4ec01acccb2899fe05e44" +checksum = "732a89935216d4cde1e538075af2f9e0e974e49895004694ab113c7040725ae5" dependencies = [ "async-trait", "futures", @@ -32301,9 +32420,9 @@ dependencies = [ [[package]] name = "zombienet-support" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43547d65b19a92cf0ee44380239d82ef345e7d26f7b04b9e0ecf48496af6346b" +checksum = "1f0c215aa994335125b75f9ad7c227a4ae6c281c074a6e6a42800f2fdfa59c8b" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 0d415fe4fdbd4..2b635d5966b68 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -640,7 +640,7 @@ bitvec = { version = "1.0.1", default-features = false } blake2 = { version = "0.10.4", default-features = false } blake2b_simd = { version = "1.0.2", default-features = false } blake3 = { version = "1.5" } -bounded-collections = { version = "0.2.2", default-features = false } +bounded-collections = { version = "0.2.3", default-features = false } bounded-vec = { version = "0.7" } bp-asset-hub-rococo = { path = "bridges/chains/chain-asset-hub-rococo", default-features = false } bp-asset-hub-westend = { path = "bridges/chains/chain-asset-hub-westend", default-features = false } @@ -688,7 +688,7 @@ clap-num = { version = "1.0.2" } clap_complete = { version = "4.5.13" } cmd_lib = { version = "1.9.5" } coarsetime = { version = "0.1.22" } -codec = { version = "3.6.12", default-features = false, package = "parity-scale-codec" } +codec = { version = "3.7.4", default-features = false, package = "parity-scale-codec" } collectives-westend-emulated-chain = { path = "cumulus/parachains/integration-tests/emulated/chains/parachains/collectives/collectives-westend" } collectives-westend-runtime = { path = "cumulus/parachains/runtimes/collectives/collectives-westend" } color-eyre = { version = "0.6.3", default-features = false } @@ -767,7 +767,7 @@ fatality = { version = "0.1.1" } fdlimit = { version = "0.3.0" } femme = { version = "2.2.1" } filetime = { version = "0.2.16" } -finality-grandpa = { version = "0.16.2", default-features = false } +finality-grandpa = { version = "0.16.3", default-features = false } finality-relay = { path = "bridges/relays/finality" } first-pallet = { package = "polkadot-sdk-docs-first-pallet", path = "docs/sdk/packages/guides/first-pallet", default-features = false } first-runtime = { package = "polkadot-sdk-docs-first-runtime", path = "docs/sdk/packages/guides/first-runtime", default-features = false } @@ -1321,7 +1321,7 @@ substrate-test-runtime-client = { path = "substrate/test-utils/runtime/client" } substrate-test-runtime-transaction-pool = { path = "substrate/test-utils/runtime/transaction-pool" } substrate-test-utils = { path = "substrate/test-utils" } substrate-wasm-builder = { path = "substrate/utils/wasm-builder", default-features = false } -subxt = { version = "0.38", default-features = false } +subxt = { version = "0.38.1", default-features = false } subxt-metadata = { version = "0.38.0", default-features = false } subxt-signer = { version = "0.38" } syn = { version = "2.0.87" } diff --git a/README.md b/README.md index 24352cc28a1a9..79c1c8ec88468 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,9 @@ curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/paritytec ## 📚 Documentation -* [🦀 rust-docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html) +* [Polkadot Documentation Portal](https://docs.polkadot.com) +* [🦀 rust-docs](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/index.html): Where we keep track of +the API docs of our Rust crates. Includes: * [Introduction](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/polkadot_sdk/index.html) to each component of the Polkadot SDK: Substrate, FRAME, Cumulus, and XCM * [Guides](https://paritytech.github.io/polkadot-sdk/master/polkadot_sdk_docs/guides/index.html), diff --git a/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml b/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml index f59f689bf6b5c..0d17ccd0f99f5 100644 --- a/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml +++ b/bridges/testing/environments/rococo-westend/bridge_hub_rococo_local_network.toml @@ -9,22 +9,19 @@ chain = "rococo-local" [[relaychain.nodes]] name = "alice-rococo-validator" validator = true - rpc_port = 9932 - ws_port = 9942 + rpc_port = 9942 balance = 2000000000000 [[relaychain.nodes]] name = "bob-rococo-validator" validator = true - rpc_port = 9933 - ws_port = 9943 + rpc_port = 9943 balance = 2000000000000 [[relaychain.nodes]] name = "charlie-rococo-validator" validator = true - rpc_port = 9934 - ws_port = 9944 + rpc_port = 9944 balance = 2000000000000 [[parachains]] @@ -37,8 +34,7 @@ cumulus_based = true name = "bridge-hub-rococo-collator1" validator = true command = "{{POLKADOT_PARACHAIN_BINARY}}" - rpc_port = 8933 - ws_port = 8943 + rpc_port = 8943 args = [ "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] @@ -48,8 +44,7 @@ cumulus_based = true name = "bridge-hub-rococo-collator2" validator = true command = "{{POLKADOT_PARACHAIN_BINARY}}" - rpc_port = 8934 - ws_port = 8944 + rpc_port = 8944 args = [ "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] @@ -61,8 +56,7 @@ cumulus_based = true [[parachains.collators]] name = "asset-hub-rococo-collator1" - rpc_port = 9911 - ws_port = 9910 + rpc_port = 9910 command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" diff --git a/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml b/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml index 6ab03ad5fe2c3..ce4630cca985e 100644 --- a/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml +++ b/bridges/testing/environments/rococo-westend/bridge_hub_westend_local_network.toml @@ -9,22 +9,19 @@ chain = "westend-local" [[relaychain.nodes]] name = "alice-westend-validator" validator = true - rpc_port = 9935 - ws_port = 9945 + rpc_port = 9945 balance = 2000000000000 [[relaychain.nodes]] name = "bob-westend-validator" validator = true - rpc_port = 9936 - ws_port = 9946 + rpc_port = 9946 balance = 2000000000000 [[relaychain.nodes]] name = "charlie-westend-validator" validator = true - rpc_port = 9937 - ws_port = 9947 + rpc_port = 9947 balance = 2000000000000 [[parachains]] @@ -37,8 +34,7 @@ cumulus_based = true name = "bridge-hub-westend-collator1" validator = true command = "{{POLKADOT_PARACHAIN_BINARY}}" - rpc_port = 8935 - ws_port = 8945 + rpc_port = 8945 args = [ "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] @@ -48,8 +44,7 @@ cumulus_based = true name = "bridge-hub-westend-collator2" validator = true command = "{{POLKADOT_PARACHAIN_BINARY}}" - rpc_port = 8936 - ws_port = 8946 + rpc_port = 8946 args = [ "-lparachain=debug,runtime::bridge=trace,xcm=trace,txpool=trace" ] @@ -61,8 +56,7 @@ cumulus_based = true [[parachains.collators]] name = "asset-hub-westend-collator1" - rpc_port = 9011 - ws_port = 9010 + rpc_port = 9010 command = "{{POLKADOT_PARACHAIN_BINARY}}" args = [ "-lparachain=debug,xcm=trace,runtime::bridge=trace,txpool=trace" diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 4f885525fca02..aa4803626d91f 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -385,6 +385,7 @@ fn build_polkadot_full_node( prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, enable_approval_voting_parallel: false, + keep_finalized_for: None, }, )?; diff --git a/cumulus/client/relay-chain-rpc-interface/Cargo.toml b/cumulus/client/relay-chain-rpc-interface/Cargo.toml index 50b438e342370..0940a4e9636c6 100644 --- a/cumulus/client/relay-chain-rpc-interface/Cargo.toml +++ b/cumulus/client/relay-chain-rpc-interface/Cargo.toml @@ -48,8 +48,8 @@ rand = { workspace = true, default-features = true } schnellru = { workspace = true } serde = { workspace = true, default-features = true } serde_json = { workspace = true, default-features = true } -smoldot = { default_features = false, features = ["std"], workspace = true } -smoldot-light = { default_features = false, features = ["std"], workspace = true } +smoldot = { default-features = false, features = ["std"], workspace = true } +smoldot-light = { default-features = false, features = ["std"], workspace = true } thiserror = { workspace = true } tracing = { workspace = true, default-features = true } url = { workspace = true } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 43b7bf0ba1184..ba40bfd2a3abd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1648,7 +1648,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -1682,7 +1682,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs index cd72703104ad0..b4922c6befcfd 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/pallet_message_queue.rs @@ -15,32 +15,34 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemine-dev"), DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot-parachain +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain -// statemine-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/statemine/src/weights +// --extrinsic=* +// --runtime=target/production/wbuild/asset-hub-rococo-runtime/asset_hub_rococo_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; @@ -48,131 +50,150 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 13_668_000 picoseconds. - Weight::from_parts(13_668_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `260` + // Estimated: `6044` + // Minimum execution time: 14_658_000 picoseconds. + Weight::from_parts(14_990_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 11_106_000 picoseconds. - Weight::from_parts(11_106_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `255` + // Estimated: `6044` + // Minimum execution time: 13_260_000 picoseconds. + Weight::from_parts(13_568_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` + // Measured: `42` // Estimated: `3517` - // Minimum execution time: 4_921_000 picoseconds. - Weight::from_parts(4_921_000, 0) + // Minimum execution time: 4_874_000 picoseconds. + Weight::from_parts(5_076_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_879_000 picoseconds. - Weight::from_parts(6_879_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `109` + // Estimated: `108986` + // Minimum execution time: 7_154_000 picoseconds. + Weight::from_parts(7_424_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 7_564_000 picoseconds. - Weight::from_parts(7_564_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `109` + // Estimated: `108986` + // Minimum execution time: 7_372_000 picoseconds. + Weight::from_parts(7_549_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 59_963_000 picoseconds. - Weight::from_parts(59_963_000, 0) + // Minimum execution time: 268_796_000 picoseconds. + Weight::from_parts(275_170_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 7_200_000 picoseconds. - Weight::from_parts(7_200_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) + // Measured: `208` + // Estimated: `3517` + // Minimum execution time: 8_531_000 picoseconds. + Weight::from_parts(8_923_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `198` + // Estimated: `3517` + // Minimum execution time: 7_083_000 picoseconds. + Weight::from_parts(7_351_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 41_366_000 picoseconds. - Weight::from_parts(41_366_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 88_053_000 picoseconds. + Weight::from_parts(89_630_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 60_538_000 picoseconds. - Weight::from_parts(60_538_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 113_929_000 picoseconds. + Weight::from_parts(117_331_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 73_665_000 picoseconds. - Weight::from_parts(73_665_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 181_265_000 picoseconds. + Weight::from_parts(191_245_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index f73db17194bcf..5fb6b522abf1b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1844,7 +1844,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -1878,7 +1878,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; @@ -2307,6 +2307,67 @@ impl_runtime_apis! { key ) } + + fn trace_block( + block: Block, + config: pallet_revive::evm::TracerConfig + ) -> Vec<(u32, pallet_revive::evm::CallTrace)> { + use pallet_revive::tracing::trace; + let mut tracer = config.build(Revive::evm_gas_from_weight); + let mut traces = vec![]; + let (header, extrinsics) = block.deconstruct(); + + Executive::initialize_block(&header); + for (index, ext) in extrinsics.into_iter().enumerate() { + trace(&mut tracer, || { + let _ = Executive::apply_extrinsic(ext); + }); + + if let Some(tx_trace) = tracer.collect_traces().pop() { + traces.push((index as u32, tx_trace)); + } + } + + traces + } + + fn trace_tx( + block: Block, + tx_index: u32, + config: pallet_revive::evm::TracerConfig + ) -> Option { + use pallet_revive::tracing::trace; + let mut tracer = config.build(Revive::evm_gas_from_weight); + let (header, extrinsics) = block.deconstruct(); + + Executive::initialize_block(&header); + for (index, ext) in extrinsics.into_iter().enumerate() { + if index as u32 == tx_index { + trace(&mut tracer, || { + let _ = Executive::apply_extrinsic(ext); + }); + break; + } else { + let _ = Executive::apply_extrinsic(ext); + } + } + + tracer.collect_traces().pop() + } + + fn trace_call( + tx: pallet_revive::evm::GenericTransaction, + config: pallet_revive::evm::TracerConfig) + -> Result + { + use pallet_revive::tracing::trace; + let mut tracer = config.build(Revive::evm_gas_from_weight); + trace(&mut tracer, || { + Self::eth_transact(tx) + })?; + + Ok(tracer.collect_traces().pop().expect("eth_transact succeeded, trace must exist, qed")) + } } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs index cd72703104ad0..649058d57f44c 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/pallet_message_queue.rs @@ -15,32 +15,34 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("statemine-dev"), DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot-parachain +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain -// statemine-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/assets/statemine/src/weights +// --extrinsic=* +// --runtime=target/production/wbuild/asset-hub-westend-runtime/asset_hub_westend_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; @@ -48,131 +50,150 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 13_668_000 picoseconds. - Weight::from_parts(13_668_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `260` + // Estimated: `6044` + // Minimum execution time: 14_762_000 picoseconds. + Weight::from_parts(15_170_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 11_106_000 picoseconds. - Weight::from_parts(11_106_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `255` + // Estimated: `6044` + // Minimum execution time: 13_040_000 picoseconds. + Weight::from_parts(13_763_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` + // Measured: `42` // Estimated: `3517` - // Minimum execution time: 4_921_000 picoseconds. - Weight::from_parts(4_921_000, 0) + // Minimum execution time: 4_919_000 picoseconds. + Weight::from_parts(5_213_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_879_000 picoseconds. - Weight::from_parts(6_879_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `109` + // Estimated: `108986` + // Minimum execution time: 7_497_000 picoseconds. + Weight::from_parts(7_748_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 7_564_000 picoseconds. - Weight::from_parts(7_564_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `109` + // Estimated: `108986` + // Minimum execution time: 7_545_000 picoseconds. + Weight::from_parts(7_795_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 59_963_000 picoseconds. - Weight::from_parts(59_963_000, 0) + // Minimum execution time: 262_800_000 picoseconds. + Weight::from_parts(272_183_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 7_200_000 picoseconds. - Weight::from_parts(7_200_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) + // Measured: `208` + // Estimated: `3517` + // Minimum execution time: 8_440_000 picoseconds. + Weight::from_parts(8_894_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `198` + // Estimated: `3517` + // Minimum execution time: 7_192_000 picoseconds. + Weight::from_parts(7_484_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 41_366_000 picoseconds. - Weight::from_parts(41_366_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 83_423_000 picoseconds. + Weight::from_parts(84_122_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 60_538_000 picoseconds. - Weight::from_parts(60_538_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 109_221_000 picoseconds. + Weight::from_parts(110_617_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 73_665_000 picoseconds. - Weight::from_parts(73_665_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105646` + // Estimated: `108986` + // Minimum execution time: 172_899_000 picoseconds. + Weight::from_parts(175_824_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index fdba23a2915f4..c940af694b220 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -1052,7 +1052,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -1084,7 +1084,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs index b6fee47d14351..9a5873149e723 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/pallet_message_queue.rs @@ -15,32 +15,34 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot-parachain +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain -// bridge-hub-rococo-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights +// --extrinsic=* +// --runtime=target/production/wbuild/bridge-hub-rococo-runtime/bridge_hub_rococo_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; @@ -48,131 +50,150 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 11_446_000 picoseconds. - Weight::from_parts(11_446_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `294` + // Estimated: `6212` + // Minimum execution time: 18_921_000 picoseconds. + Weight::from_parts(19_644_000, 0) + .saturating_add(Weight::from_parts(0, 6212)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 10_613_000 picoseconds. - Weight::from_parts(10_613_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `289` + // Estimated: `6212` + // Minimum execution time: 17_301_000 picoseconds. + Weight::from_parts(17_941_000, 0) + .saturating_add(Weight::from_parts(0, 6212)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 4_854_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) + // Measured: `76` + // Estimated: `3601` + // Minimum execution time: 5_302_000 picoseconds. + Weight::from_parts(5_517_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_748_000 picoseconds. - Weight::from_parts(5_748_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `143` + // Estimated: `109014` + // Minimum execution time: 7_464_000 picoseconds. + Weight::from_parts(7_768_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_136_000 picoseconds. - Weight::from_parts(6_136_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `143` + // Estimated: `109014` + // Minimum execution time: 7_558_000 picoseconds. + Weight::from_parts(7_975_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 59_505_000 picoseconds. - Weight::from_parts(59_505_000, 0) + // Minimum execution time: 267_875_000 picoseconds. + Weight::from_parts(272_181_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 6_506_000 picoseconds. - Weight::from_parts(6_506_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) + // Measured: `242` + // Estimated: `3601` + // Minimum execution time: 8_828_000 picoseconds. + Weight::from_parts(9_393_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `232` + // Estimated: `3601` + // Minimum execution time: 7_366_000 picoseconds. + Weight::from_parts(7_957_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 40_646_000 picoseconds. - Weight::from_parts(40_646_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105680` + // Estimated: `109014` + // Minimum execution time: 84_533_000 picoseconds. + Weight::from_parts(85_719_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 51_424_000 picoseconds. - Weight::from_parts(51_424_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105680` + // Estimated: `109014` + // Minimum execution time: 110_543_000 picoseconds. + Weight::from_parts(111_463_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 81_153_000 picoseconds. - Weight::from_parts(81_153_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105680` + // Estimated: `109014` + // Minimum execution time: 176_967_000 picoseconds. + Weight::from_parts(178_917_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 45d08ba8546cd..01f72fd055a25 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -945,7 +945,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -974,7 +974,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs index b6fee47d14351..d26e502d9e916 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/pallet_message_queue.rs @@ -15,32 +15,34 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot-parachain +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain -// bridge-hub-rococo-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights +// --extrinsic=* +// --runtime=target/production/wbuild/bridge-hub-rococo-runtime/bridge_hub_rococo_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; @@ -48,131 +50,150 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 11_446_000 picoseconds. - Weight::from_parts(11_446_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `294` + // Estimated: `6212` + // Minimum execution time: 18_955_000 picoseconds. + Weight::from_parts(19_553_000, 0) + .saturating_add(Weight::from_parts(0, 6212)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 10_613_000 picoseconds. - Weight::from_parts(10_613_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `289` + // Estimated: `6212` + // Minimum execution time: 17_078_000 picoseconds. + Weight::from_parts(18_027_000, 0) + .saturating_add(Weight::from_parts(0, 6212)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` - // Estimated: `3517` - // Minimum execution time: 4_854_000 picoseconds. - Weight::from_parts(4_854_000, 0) - .saturating_add(Weight::from_parts(0, 3517)) + // Measured: `76` + // Estimated: `3601` + // Minimum execution time: 5_214_000 picoseconds. + Weight::from_parts(5_347_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_748_000 picoseconds. - Weight::from_parts(5_748_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `143` + // Estimated: `109014` + // Minimum execution time: 7_382_000 picoseconds. + Weight::from_parts(7_721_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_136_000 picoseconds. - Weight::from_parts(6_136_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `143` + // Estimated: `109014` + // Minimum execution time: 7_255_000 picoseconds. + Weight::from_parts(7_759_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 59_505_000 picoseconds. - Weight::from_parts(59_505_000, 0) + // Minimum execution time: 268_956_000 picoseconds. + Weight::from_parts(276_513_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 6_506_000 picoseconds. - Weight::from_parts(6_506_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) + // Measured: `242` + // Estimated: `3601` + // Minimum execution time: 8_808_000 picoseconds. + Weight::from_parts(9_297_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `232` + // Estimated: `3601` + // Minimum execution time: 7_475_000 picoseconds. + Weight::from_parts(7_786_000, 0) + .saturating_add(Weight::from_parts(0, 3601)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 40_646_000 picoseconds. - Weight::from_parts(40_646_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105680` + // Estimated: `109014` + // Minimum execution time: 84_497_000 picoseconds. + Weight::from_parts(85_598_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 51_424_000 picoseconds. - Weight::from_parts(51_424_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105680` + // Estimated: `109014` + // Minimum execution time: 110_679_000 picoseconds. + Weight::from_parts(113_677_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(136), added: 2611, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105549), added: 108024, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 81_153_000 picoseconds. - Weight::from_parts(81_153_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105680` + // Estimated: `109014` + // Minimum execution time: 176_951_000 picoseconds. + Weight::from_parts(178_671_000, 0) + .saturating_add(Weight::from_parts(0, 109014)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs index 5e087832f0e82..c662cd355c73b 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/lib.rs @@ -628,6 +628,7 @@ impl pallet_scheduler::Config for Runtime { type WeightInfo = weights::pallet_scheduler::WeightInfo; type OriginPrivilegeCmp = EqualOrGreatestRootCmp; type Preimages = Preimage; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -1062,7 +1063,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -1085,7 +1086,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs index 0bb6d3d0f1c45..729d4cab96eef 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/pallet_message_queue.rs @@ -15,32 +15,34 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-24, STEPS: `2`, REPEAT: `1`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `i9`, CPU: `13th Gen Intel(R) Core(TM) i9-13900K` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("collectives-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/release/polkadot-parachain +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain -// collectives-polkadot-dev -// --pallet -// pallet_message_queue -// --extrinsic -// * -// --execution -// wasm -// --wasm-execution -// compiled -// --output -// parachains/runtimes/collectives/collectives-polkadot/src/weights +// --extrinsic=* +// --runtime=target/production/wbuild/collectives-westend-runtime/collectives_westend_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; @@ -48,131 +50,150 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 11_440_000 picoseconds. - Weight::from_parts(11_440_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `223` + // Estimated: `6044` + // Minimum execution time: 13_878_000 picoseconds. + Weight::from_parts(14_334_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 11_077_000 picoseconds. - Weight::from_parts(11_077_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `218` + // Estimated: `6044` + // Minimum execution time: 12_461_000 picoseconds. + Weight::from_parts(13_125_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `6` // Estimated: `3517` - // Minimum execution time: 3_977_000 picoseconds. - Weight::from_parts(3_977_000, 0) + // Minimum execution time: 4_217_000 picoseconds. + Weight::from_parts(4_454_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 4_831_000 picoseconds. - Weight::from_parts(4_831_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Estimated: `108986` + // Minimum execution time: 6_567_000 picoseconds. + Weight::from_parts(6_833_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_192_000 picoseconds. - Weight::from_parts(5_192_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Estimated: `108986` + // Minimum execution time: 6_685_000 picoseconds. + Weight::from_parts(7_020_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 58_750_000 picoseconds. - Weight::from_parts(58_750_000, 0) + // Minimum execution time: 270_143_000 picoseconds. + Weight::from_parts(279_326_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 5_107_000 picoseconds. - Weight::from_parts(5_107_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) + // Measured: `171` + // Estimated: `3517` + // Minimum execution time: 8_047_000 picoseconds. + Weight::from_parts(8_332_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `161` + // Estimated: `3517` + // Minimum execution time: 6_495_000 picoseconds. + Weight::from_parts(6_881_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 46_814_000 picoseconds. - Weight::from_parts(46_814_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105609` + // Estimated: `108986` + // Minimum execution time: 87_888_000 picoseconds. + Weight::from_parts(90_107_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 52_510_000 picoseconds. - Weight::from_parts(52_510_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105609` + // Estimated: `108986` + // Minimum execution time: 115_155_000 picoseconds. + Weight::from_parts(117_129_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 71_930_000 picoseconds. - Weight::from_parts(71_930_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105609` + // Estimated: `108986` + // Minimum execution time: 180_170_000 picoseconds. + Weight::from_parts(183_187_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs index 5ad4026e0b2b4..48c5859715f4b 100644 --- a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/lib.rs @@ -766,7 +766,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -783,7 +783,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/contracts/contracts-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/weights/pallet_message_queue.rs new file mode 100644 index 0000000000000..a413f03df042b --- /dev/null +++ b/cumulus/parachains/runtimes/contracts/contracts-rococo/src/weights/pallet_message_queue.rs @@ -0,0 +1,202 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/contracts-rococo-runtime/contracts_rococo_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/contracts/contracts-rococo/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_message_queue`. +pub struct WeightInfo(PhantomData); +impl pallet_message_queue::WeightInfo for WeightInfo { + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn ready_ring_knit() -> Weight { + // Proof Size summary in bytes: + // Measured: `202` + // Estimated: `6044` + // Minimum execution time: 14_083_000 picoseconds. + Weight::from_parts(14_655_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn ready_ring_unknit() -> Weight { + // Proof Size summary in bytes: + // Measured: `197` + // Estimated: `6044` + // Minimum execution time: 12_765_000 picoseconds. + Weight::from_parts(13_198_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(3)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn service_queue_base() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `3517` + // Minimum execution time: 2_700_000 picoseconds. + Weight::from_parts(2_892_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn service_page_base_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 4_849_000 picoseconds. + Weight::from_parts(4_985_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn service_page_base_no_completion() -> Weight { + // Proof Size summary in bytes: + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 5_049_000 picoseconds. + Weight::from_parts(5_165_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn service_page_item() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 258_710_000 picoseconds. + Weight::from_parts(261_251_000, 0) + .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + fn bump_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `150` + // Estimated: `3517` + // Minimum execution time: 7_453_000 picoseconds. + Weight::from_parts(7_790_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3517` + // Minimum execution time: 6_311_000 picoseconds. + Weight::from_parts(6_576_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn reap_page() -> Weight { + // Proof Size summary in bytes: + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 80_897_000 picoseconds. + Weight::from_parts(81_550_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn execute_overweight_page_removed() -> Weight { + // Proof Size summary in bytes: + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 107_875_000 picoseconds. + Weight::from_parts(108_864_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) + fn execute_overweight_page_updated() -> Weight { + // Proof Size summary in bytes: + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 171_534_000 picoseconds. + Weight::from_parts(172_497_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().writes(2)) + } +} diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs index a837b8d25dcf7..54cfc64930b80 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/coretime.rs @@ -111,6 +111,7 @@ enum CoretimeProviderCalls { parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); + pub const MinimumCreditPurchase: Balance = UNITS / 10; pub RevenueAccumulationAccount: AccountId = BrokerPalletId::get().into_sub_account_truncating(b"burnstash"); } @@ -319,4 +320,5 @@ impl pallet_broker::Config for Runtime { type SovereignAccountOf = SovereignAccountOf; type MaxAutoRenewals = ConstU32<100>; type PriceAdapter = pallet_broker::CenterTargetPrice; + type MinimumCreditPurchase = MinimumCreditPurchase; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs index 622a40e1d8dc0..ccf8003639acd 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/lib.rs @@ -929,7 +929,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; @@ -951,7 +951,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_message_queue.rs index 2b04ff3da8f7b..fbeb9129986f8 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/pallet_message_queue.rs @@ -1,5 +1,4 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,28 +15,31 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2024-01-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-j8vvqcjr-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=coretime-rococo-dev -// --wasm-execution=compiled -// --pallet=pallet_message_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* +// --runtime=target/production/wbuild/coretime-rococo-runtime/coretime_rococo_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights +// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --json -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/ +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,10 +58,10 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `223` + // Measured: `202` // Estimated: `6044` - // Minimum execution time: 11_120_000 picoseconds. - Weight::from_parts(11_605_000, 0) + // Minimum execution time: 14_043_000 picoseconds. + Weight::from_parts(14_521_000, 0) .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -70,10 +72,10 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `218` + // Measured: `197` // Estimated: `6044` - // Minimum execution time: 9_795_000 picoseconds. - Weight::from_parts(10_300_000, 0) + // Minimum execution time: 12_773_000 picoseconds. + Weight::from_parts(13_314_000, 0) .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -82,48 +84,48 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` + // Measured: `0` // Estimated: `3517` - // Minimum execution time: 3_277_000 picoseconds. - Weight::from_parts(3_426_000, 0) + // Minimum execution time: 2_676_000 picoseconds. + Weight::from_parts(2_793_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_016_000 picoseconds. - Weight::from_parts(5_237_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 4_720_000 picoseconds. + Weight::from_parts(4_986_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_118_000 picoseconds. - Weight::from_parts(5_347_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 4_968_000 picoseconds. + Weight::from_parts(5_153_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 175_756_000 picoseconds. - Weight::from_parts(177_423_000, 0) + // Minimum execution time: 266_666_000 picoseconds. + Weight::from_parts(268_848_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -133,53 +135,67 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `171` + // Measured: `150` // Estimated: `3517` - // Minimum execution time: 6_515_000 picoseconds. - Weight::from_parts(6_953_000, 0) + // Minimum execution time: 7_434_000 picoseconds. + Weight::from_parts(7_712_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3517` + // Minimum execution time: 6_206_000 picoseconds. + Weight::from_parts(6_456_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 57_649_000 picoseconds. - Weight::from_parts(59_093_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 80_746_000 picoseconds. + Weight::from_parts(81_878_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 73_366_000 picoseconds. - Weight::from_parts(74_402_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 108_455_000 picoseconds. + Weight::from_parts(109_672_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 116_063_000 picoseconds. - Weight::from_parts(117_532_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 171_607_000 picoseconds. + Weight::from_parts(173_083_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs index 805861b1f8bdb..9aa9e699b65da 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/coretime.rs @@ -111,6 +111,7 @@ enum CoretimeProviderCalls { parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); + pub const MinimumCreditPurchase: Balance = UNITS / 10; pub RevenueAccumulationAccount: AccountId = BrokerPalletId::get().into_sub_account_truncating(b"burnstash"); } @@ -332,4 +333,5 @@ impl pallet_broker::Config for Runtime { type SovereignAccountOf = SovereignAccountOf; type MaxAutoRenewals = ConstU32<20>; type PriceAdapter = pallet_broker::CenterTargetPrice; + type MinimumCreditPurchase = MinimumCreditPurchase; } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs index 7312c9c1639d2..3d544aea469f3 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/lib.rs @@ -921,7 +921,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; @@ -943,7 +943,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_message_queue.rs index ec7db3d260bb7..d4a139836bd96 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/pallet_message_queue.rs @@ -1,5 +1,4 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,27 +16,30 @@ //! Autogenerated weights for `pallet_message_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("coretime-westend-dev")`, DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot-parachain +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=coretime-westend-dev -// --wasm-execution=compiled -// --pallet=pallet_message_queue -// --no-storage-info -// --no-median-slopes -// --no-min-squares // --extrinsic=* +// --runtime=target/production/wbuild/coretime-westend-runtime/coretime_westend_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights +// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --json -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/ +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -56,10 +58,10 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `223` + // Measured: `202` // Estimated: `6044` - // Minimum execution time: 10_918_000 picoseconds. - Weight::from_parts(11_224_000, 0) + // Minimum execution time: 13_916_000 picoseconds. + Weight::from_parts(14_583_000, 0) .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -70,10 +72,10 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `218` + // Measured: `197` // Estimated: `6044` - // Minimum execution time: 9_649_000 picoseconds. - Weight::from_parts(10_056_000, 0) + // Minimum execution time: 12_766_000 picoseconds. + Weight::from_parts(13_164_000, 0) .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -82,48 +84,48 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` + // Measured: `0` // Estimated: `3517` - // Minimum execution time: 3_134_000 picoseconds. - Weight::from_parts(3_197_000, 0) + // Minimum execution time: 2_632_000 picoseconds. + Weight::from_parts(2_767_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 4_915_000 picoseconds. - Weight::from_parts(5_127_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 4_846_000 picoseconds. + Weight::from_parts(5_035_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_011_000 picoseconds. - Weight::from_parts(5_150_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 4_934_000 picoseconds. + Weight::from_parts(5_111_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:0 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 168_806_000 picoseconds. - Weight::from_parts(170_795_000, 0) + // Minimum execution time: 263_957_000 picoseconds. + Weight::from_parts(272_724_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -133,53 +135,67 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `171` + // Measured: `150` // Estimated: `3517` - // Minimum execution time: 6_413_000 picoseconds. - Weight::from_parts(6_797_000, 0) + // Minimum execution time: 7_560_000 picoseconds. + Weight::from_parts(7_816_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3517` + // Minimum execution time: 6_220_000 picoseconds. + Weight::from_parts(6_556_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 52_734_000 picoseconds. - Weight::from_parts(54_106_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 84_171_000 picoseconds. + Weight::from_parts(87_323_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 68_400_000 picoseconds. - Weight::from_parts(70_336_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 111_961_000 picoseconds. + Weight::from_parts(113_984_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 109_496_000 picoseconds. - Weight::from_parts(111_632_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 175_635_000 picoseconds. + Weight::from_parts(184_127_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs index 75f45297fe2cd..4de0c4976ffda 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/lib.rs @@ -446,7 +446,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -462,7 +462,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs index 3e3d521227969..eda475f32a0e4 100644 --- a/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/pallet_message_queue.rs @@ -1,5 +1,4 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,26 +15,30 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("glutton-westend-dev-1300")`, DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// target/production/polkadot-parachain +// frame-omni-bencher +// v1 // benchmark // pallet -// --steps=50 -// --repeat=20 // --extrinsic=* +// --runtime=target/production/wbuild/glutton-westend-runtime/glutton_westend_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights // --wasm-execution=compiled +// --steps=50 +// --repeat=20 // --heap-pages=4096 -// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json -// --pallet=pallet_message_queue -// --chain=glutton-westend-dev-1300 -// --header=./cumulus/file_header.txt -// --output=./cumulus/parachains/runtimes/glutton/glutton-westend/src/weights/ +// --no-storage-info +// --no-min-squares +// --no-median-slopes +// --genesis-builder-policy=none #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,10 +57,10 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `223` + // Measured: `202` // Estimated: `6044` - // Minimum execution time: 10_833_000 picoseconds. - Weight::from_parts(11_237_000, 0) + // Minimum execution time: 14_968_000 picoseconds. + Weight::from_parts(15_441_000, 0) .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -68,10 +71,10 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `218` + // Measured: `197` // Estimated: `6044` - // Minimum execution time: 9_399_000 picoseconds. - Weight::from_parts(9_773_000, 0) + // Minimum execution time: 13_495_000 picoseconds. + Weight::from_parts(14_157_000, 0) .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -80,45 +83,50 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` + // Measured: `0` // Estimated: `3517` - // Minimum execution time: 3_277_000 picoseconds. - Weight::from_parts(3_358_000, 0) + // Minimum execution time: 2_852_000 picoseconds. + Weight::from_parts(2_945_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_429_000 picoseconds. - Weight::from_parts(5_667_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 5_089_000 picoseconds. + Weight::from_parts(5_217_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 5_538_000 picoseconds. - Weight::from_parts(5_803_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 5_257_000 picoseconds. + Weight::from_parts(5_390_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 89_888_000 picoseconds. - Weight::from_parts(90_929_000, 0) + // Minimum execution time: 259_173_000 picoseconds. + Weight::from_parts(265_117_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) @@ -126,53 +134,67 @@ impl pallet_message_queue::WeightInfo for WeightInfo /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `171` + // Measured: `150` // Estimated: `3517` - // Minimum execution time: 6_129_000 picoseconds. - Weight::from_parts(6_414_000, 0) + // Minimum execution time: 7_493_000 picoseconds. + Weight::from_parts(7_788_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3517` + // Minimum execution time: 6_307_000 picoseconds. + Weight::from_parts(6_486_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 52_366_000 picoseconds. - Weight::from_parts(53_500_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 81_336_000 picoseconds. + Weight::from_parts(82_288_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 67_848_000 picoseconds. - Weight::from_parts(68_910_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 107_710_000 picoseconds. + Weight::from_parts(108_639_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) - /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65585), added: 68060, mode: `MaxEncodedLen`) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `69050` - // Minimum execution time: 107_564_000 picoseconds. - Weight::from_parts(109_377_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 171_037_000 picoseconds. + Weight::from_parts(178_092_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs index cb0282b17a6ce..68c51175415c5 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/lib.rs @@ -877,7 +877,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; @@ -899,7 +899,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_message_queue.rs index 47c6790140736..ec5defcd80b7d 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/pallet_message_queue.rs @@ -13,11 +13,38 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Need to rerun +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/people-rococo-runtime/people_rococo_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; @@ -25,131 +52,150 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 13_668_000 picoseconds. - Weight::from_parts(13_668_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `202` + // Estimated: `6044` + // Minimum execution time: 14_277_000 picoseconds. + Weight::from_parts(14_628_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 11_106_000 picoseconds. - Weight::from_parts(11_106_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `197` + // Estimated: `6044` + // Minimum execution time: 12_644_000 picoseconds. + Weight::from_parts(13_374_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` + // Measured: `0` // Estimated: `3517` - // Minimum execution time: 4_921_000 picoseconds. - Weight::from_parts(4_921_000, 0) + // Minimum execution time: 2_789_000 picoseconds. + Weight::from_parts(2_892_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_879_000 picoseconds. - Weight::from_parts(6_879_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 4_966_000 picoseconds. + Weight::from_parts(5_157_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 7_564_000 picoseconds. - Weight::from_parts(7_564_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 5_217_000 picoseconds. + Weight::from_parts(5_356_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 59_963_000 picoseconds. - Weight::from_parts(59_963_000, 0) + // Minimum execution time: 262_988_000 picoseconds. + Weight::from_parts(269_525_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 7_200_000 picoseconds. - Weight::from_parts(7_200_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) + // Measured: `150` + // Estimated: `3517` + // Minimum execution time: 7_398_000 picoseconds. + Weight::from_parts(7_797_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3517` + // Minimum execution time: 6_328_000 picoseconds. + Weight::from_parts(6_519_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 41_366_000 picoseconds. - Weight::from_parts(41_366_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 85_635_000 picoseconds. + Weight::from_parts(87_418_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 60_538_000 picoseconds. - Weight::from_parts(60_538_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 111_921_000 picoseconds. + Weight::from_parts(113_304_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 73_665_000 picoseconds. - Weight::from_parts(73_665_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 174_402_000 picoseconds. + Weight::from_parts(178_474_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs index 050256dd4f6a3..980fb8db47320 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/lib.rs @@ -875,7 +875,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; @@ -897,7 +897,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_message_queue.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_message_queue.rs index 47c6790140736..77adfbe500e2f 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_message_queue.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/pallet_message_queue.rs @@ -13,11 +13,38 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Need to rerun +//! Autogenerated weights for `pallet_message_queue` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 + +// Executed Command: +// frame-omni-bencher +// v1 +// benchmark +// pallet +// --extrinsic=* +// --runtime=target/production/wbuild/people-westend-runtime/people_westend_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/cumulus/file_header.txt +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights +// --wasm-execution=compiled +// --steps=50 +// --repeat=20 +// --heap-pages=4096 +// --no-storage-info +// --no-min-squares +// --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] +#![allow(missing_docs)] use frame_support::{traits::Get, weights::Weight}; use core::marker::PhantomData; @@ -25,131 +52,150 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `189` - // Estimated: `7534` - // Minimum execution time: 13_668_000 picoseconds. - Weight::from_parts(13_668_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `202` + // Estimated: `6044` + // Minimum execution time: 14_247_000 picoseconds. + Weight::from_parts(14_692_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `184` - // Estimated: `7534` - // Minimum execution time: 11_106_000 picoseconds. - Weight::from_parts(11_106_000, 0) - .saturating_add(Weight::from_parts(0, 7534)) + // Measured: `197` + // Estimated: `6044` + // Minimum execution time: 12_776_000 picoseconds. + Weight::from_parts(13_230_000, 0) + .saturating_add(Weight::from_parts(0, 6044)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `6` + // Measured: `0` // Estimated: `3517` - // Minimum execution time: 4_921_000 picoseconds. - Weight::from_parts(4_921_000, 0) + // Minimum execution time: 2_752_000 picoseconds. + Weight::from_parts(2_902_000, 0) .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 6_879_000 picoseconds. - Weight::from_parts(6_879_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 4_888_000 picoseconds. + Weight::from_parts(5_066_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `72` - // Estimated: `69050` - // Minimum execution time: 7_564_000 picoseconds. - Weight::from_parts(7_564_000, 0) - .saturating_add(Weight::from_parts(0, 69050)) + // Measured: `47` + // Estimated: `108986` + // Minimum execution time: 5_073_000 picoseconds. + Weight::from_parts(5_354_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 59_963_000 picoseconds. - Weight::from_parts(59_963_000, 0) + // Minimum execution time: 260_238_000 picoseconds. + Weight::from_parts(268_228_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(5), added: 500, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `99` - // Estimated: `5007` - // Minimum execution time: 7_200_000 picoseconds. - Weight::from_parts(7_200_000, 0) - .saturating_add(Weight::from_parts(0, 5007)) + // Measured: `150` + // Estimated: `3517` + // Minimum execution time: 7_627_000 picoseconds. + Weight::from_parts(7_845_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `140` + // Estimated: `3517` + // Minimum execution time: 6_133_000 picoseconds. + Weight::from_parts(6_650_000, 0) + .saturating_add(Weight::from_parts(0, 3517)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 41_366_000 picoseconds. - Weight::from_parts(41_366_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 80_780_000 picoseconds. + Weight::from_parts(81_524_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 60_538_000 picoseconds. - Weight::from_parts(60_538_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 107_347_000 picoseconds. + Weight::from_parts(108_410_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(52), added: 2527, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(65585), added: 68060, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(52), added: 2527, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(105521), added: 107996, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65667` - // Estimated: `72567` - // Minimum execution time: 73_665_000 picoseconds. - Weight::from_parts(73_665_000, 0) - .saturating_add(Weight::from_parts(0, 72567)) + // Measured: `105588` + // Estimated: `108986` + // Minimum execution time: 170_768_000 picoseconds. + Weight::from_parts(172_748_000, 0) + .saturating_add(Weight::from_parts(0, 108986)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 095ae2ed9e962..2b93e391c2e8f 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -1119,7 +1119,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -1135,7 +1135,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch}; + use frame_benchmarking::BenchmarkBatch; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/docs/contributor/DEPRECATION_CHECKLIST.md b/docs/contributor/DEPRECATION_CHECKLIST.md index 687c0a7cd7da0..1ffd2b4dae643 100644 --- a/docs/contributor/DEPRECATION_CHECKLIST.md +++ b/docs/contributor/DEPRECATION_CHECKLIST.md @@ -38,8 +38,8 @@ when building the code. ## Update examples and tutorials Make sure that the rust docs are updated. -We also need [https://docs.substrate.io/](https://docs.substrate.io/) to be updated accordingly. The repo behind it is -[https://github.com/substrate-developer-hub/substrate-docs](https://github.com/substrate-developer-hub/substrate-docs). +We also need [https://docs.polkadot.com/](https://docs.polkadot.com/) to be updated accordingly. The repo behind it is +[https://github.com/polkadot-developers/polkadot-docs](https://github.com/polkadot-developers/polkadot-docs). ## Announce the deprecation and removal diff --git a/docs/mermaid/IA.mmd b/docs/mermaid/IA.mmd index dcf9806dcb623..60a59f6e0ab94 100644 --- a/docs/mermaid/IA.mmd +++ b/docs/mermaid/IA.mmd @@ -1,5 +1,5 @@ flowchart - parity[paritytech.github.io] --> devhub[polkadot_sdk_docs] + dot[docs.polkadot.com] --> devhub[polkadot_sdk_docs] devhub --> polkadot_sdk devhub --> reference_docs diff --git a/docs/sdk/src/external_resources.rs b/docs/sdk/src/external_resources.rs index 939874d12f137..dc68ad25badf9 100644 --- a/docs/sdk/src/external_resources.rs +++ b/docs/sdk/src/external_resources.rs @@ -9,6 +9,6 @@ //! //! - [Polkadot NFT Marketplace Tutorial by Polkadot Fellow Shawn Tabrizi](https://www.shawntabrizi.com/substrate-collectables-workshop/) //! - [DOT Code School](https://dotcodeschool.com/) -//! - [Polkadot Developers](https://github.com/polkadot-developers/) +//! - [Polkadot Developers Github Organization](https://github.com/polkadot-developers/) //! - [Polkadot Blockchain Academy](https://github.com/Polkadot-Blockchain-Academy) -//! - [Polkadot Wiki: Build](https://wiki.polkadot.network/docs/build-guide) +//! - [Polkadot Wiki](https://wiki.polkadot.network/) diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs index e2c5fc93479cd..d96239ca89424 100644 --- a/docs/sdk/src/lib.rs +++ b/docs/sdk/src/lib.rs @@ -2,8 +2,8 @@ //! //! The Polkadot SDK Developer Documentation. //! -//! This crate is a *minimal*, but *always-accurate* source of information for those wishing to -//! build on the Polkadot SDK. +//! This crate is a *minimal*, *always-accurate* and low level source of truth about Polkadot-SDK. +//! For more high level docs, please go to [docs.polkadot.com](https://docs.polkadot.com). //! //! ## Getting Started //! diff --git a/docs/sdk/src/meta_contributing.rs b/docs/sdk/src/meta_contributing.rs index d68d9bca18b11..663da2cc54b32 100644 --- a/docs/sdk/src/meta_contributing.rs +++ b/docs/sdk/src/meta_contributing.rs @@ -120,12 +120,6 @@ //! //! //! -//! ## `docs.substrate.io` -//! -//! This crate is meant to gradually replace `docs.substrate.io`. As any content is added here, the -//! corresponding counter-part should be marked as deprecated, as described -//! [here](https://github.com/paritytech/polkadot-sdk-docs/issues/26). -//! //! ## `crates.io` and Publishing //! //! As it stands now, this crate cannot be published to crates.io because of its use of diff --git a/docs/sdk/src/reference_docs/development_environment_advice.rs b/docs/sdk/src/reference_docs/development_environment_advice.rs index a5f38bb280def..104782933da74 100644 --- a/docs/sdk/src/reference_docs/development_environment_advice.rs +++ b/docs/sdk/src/reference_docs/development_environment_advice.rs @@ -109,6 +109,10 @@ //! //! ### Cargo Remote //! +//! Warning: cargo remote by default doesn't transfer hidden files to the remote machine. But hidden +//! files can be useful, e.g. for sqlx usage. On the other hand using `--transfer-hidden` flag will +//! transfer `.git` which is big. +//! //! If you have a powerful remote server available, you may consider using //! [cargo-remote](https://github.com/sgeisler/cargo-remote) to execute cargo commands on it, //! freeing up local resources for other tasks like `rust-analyzer`. diff --git a/polkadot/cli/src/cli.rs b/polkadot/cli/src/cli.rs index 6187eb79a362c..721a471499208 100644 --- a/polkadot/cli/src/cli.rs +++ b/polkadot/cli/src/cli.rs @@ -152,6 +152,12 @@ pub struct RunCmd { /// should not be used unless explicitly advised to. It will be removed in the future. #[arg(long, default_value = "true", action=ArgAction::Set)] pub enable_approval_voting_parallel: bool, + + /// How long finalized data should be kept in the availability store (in hours). + /// Only used for testnets. If not specified, set to 1 hour. Always set to 25 hours for live + /// networks. + #[arg(long)] + pub keep_finalized_for: Option, } #[allow(missing_docs)] diff --git a/polkadot/cli/src/command.rs b/polkadot/cli/src/command.rs index 02c9b97150c2d..6b716d48783bc 100644 --- a/polkadot/cli/src/command.rs +++ b/polkadot/cli/src/command.rs @@ -235,6 +235,7 @@ where prepare_workers_hard_max_num: cli.run.prepare_workers_hard_max_num, prepare_workers_soft_max_num: cli.run.prepare_workers_soft_max_num, enable_approval_voting_parallel: cli.run.enable_approval_voting_parallel, + keep_finalized_for: cli.run.keep_finalized_for, }, ) .map(|full| full.task_manager)?; diff --git a/polkadot/node/core/av-store/src/lib.rs b/polkadot/node/core/av-store/src/lib.rs index 9da2973773a01..75a0a4b08ed19 100644 --- a/polkadot/node/core/av-store/src/lib.rs +++ b/polkadot/node/core/av-store/src/lib.rs @@ -75,9 +75,6 @@ const TOMBSTONE_VALUE: &[u8] = b" "; /// Unavailable blocks are kept for 1 hour. const KEEP_UNAVAILABLE_FOR: Duration = Duration::from_secs(60 * 60); -/// Finalized data is kept for 25 hours. -const KEEP_FINALIZED_FOR: Duration = Duration::from_secs(25 * 60 * 60); - /// The pruning interval. const PRUNING_INTERVAL: Duration = Duration::from_secs(60 * 5); @@ -423,16 +420,6 @@ struct PruningConfig { pruning_interval: Duration, } -impl Default for PruningConfig { - fn default() -> Self { - Self { - keep_unavailable_for: KEEP_UNAVAILABLE_FOR, - keep_finalized_for: KEEP_FINALIZED_FOR, - pruning_interval: PRUNING_INTERVAL, - } - } -} - /// Configuration for the availability store. #[derive(Debug, Clone, Copy)] pub struct Config { @@ -440,6 +427,8 @@ pub struct Config { pub col_data: u32, /// The column family for availability store meta information. pub col_meta: u32, + /// How long finalized data should be kept (in hours). + pub keep_finalized_for: u32, } trait Clock: Send + Sync { @@ -475,10 +464,16 @@ impl AvailabilityStoreSubsystem { sync_oracle: Box, metrics: Metrics, ) -> Self { + let pruning_config = PruningConfig { + keep_unavailable_for: KEEP_UNAVAILABLE_FOR, + keep_finalized_for: Duration::from_secs(config.keep_finalized_for as u64 * 3600), + pruning_interval: PRUNING_INTERVAL, + }; + Self::with_pruning_config_and_clock( db, config, - PruningConfig::default(), + pruning_config, Box::new(SystemClock), sync_oracle, metrics, diff --git a/polkadot/node/core/av-store/src/tests.rs b/polkadot/node/core/av-store/src/tests.rs index 80043e56976b1..d1da058f9e4fa 100644 --- a/polkadot/node/core/av-store/src/tests.rs +++ b/polkadot/node/core/av-store/src/tests.rs @@ -43,7 +43,8 @@ mod columns { pub const NUM_COLUMNS: u32 = 2; } -const TEST_CONFIG: Config = Config { col_data: columns::DATA, col_meta: columns::META }; +const TEST_CONFIG: Config = + Config { col_data: columns::DATA, col_meta: columns::META, keep_finalized_for: 1 }; type VirtualOverseer = polkadot_node_subsystem_test_helpers::TestSubsystemContextHandle; diff --git a/polkadot/node/service/src/lib.rs b/polkadot/node/service/src/lib.rs index ce17ae5f51b90..077fb5c578a83 100644 --- a/polkadot/node/service/src/lib.rs +++ b/polkadot/node/service/src/lib.rs @@ -128,6 +128,9 @@ pub type FullClient = sc_service::TFullClient< /// imported and generated. const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512; +/// The number of hours to keep finalized data in the availability store for live networks. +const KEEP_FINALIZED_FOR_LIVE_NETWORKS: u32 = 25; + /// Provides the header and block number for a hash. /// /// Decouples `sc_client_api::Backend` and `sp_blockchain::HeaderBackend`. @@ -628,6 +631,8 @@ pub struct NewFullParams { pub prepare_workers_soft_max_num: Option, /// An optional absolute number of pvf workers that can be spawned in the pvf prepare pool. pub prepare_workers_hard_max_num: Option, + /// How long finalized data should be kept in the availability store (in hours) + pub keep_finalized_for: Option, pub overseer_gen: OverseerGenerator, pub overseer_message_channel_capacity_override: Option, #[allow(dead_code)] @@ -691,11 +696,6 @@ impl IsParachainNode { } } -pub const AVAILABILITY_CONFIG: AvailabilityConfig = AvailabilityConfig { - col_data: parachains_db::REAL_COLUMNS.col_availability_data, - col_meta: parachains_db::REAL_COLUMNS.col_availability_meta, -}; - /// Create a new full node of arbitrary runtime and executor. /// /// This is an advanced feature and not recommended for general use. Generally, `build_full` is @@ -727,6 +727,7 @@ pub fn new_full< execute_workers_max_num, prepare_workers_soft_max_num, prepare_workers_hard_max_num, + keep_finalized_for, enable_approval_voting_parallel, }: NewFullParams, ) -> Result { @@ -972,11 +973,21 @@ pub fn new_full< let fetch_chunks_threshold = if config.chain_spec.is_polkadot() { None } else { Some(FETCH_CHUNKS_THRESHOLD) }; + let availability_config = AvailabilityConfig { + col_data: parachains_db::REAL_COLUMNS.col_availability_data, + col_meta: parachains_db::REAL_COLUMNS.col_availability_meta, + keep_finalized_for: if matches!(config.chain_spec.identify_chain(), Chain::Rococo) { + keep_finalized_for.unwrap_or(1) + } else { + KEEP_FINALIZED_FOR_LIVE_NETWORKS + }, + }; + Some(ExtendedOverseerGenArgs { keystore, parachains_db, candidate_validation_config, - availability_config: AVAILABILITY_CONFIG, + availability_config, pov_req_receiver, chunk_req_v1_receiver, chunk_req_v2_receiver, diff --git a/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs index 3300def2235ee..62588454471e9 100644 --- a/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs +++ b/polkadot/node/subsystem-bench/src/lib/availability/av_store_helpers.rs @@ -26,7 +26,8 @@ mod columns { pub const NUM_COLUMNS: u32 = 2; } -const TEST_CONFIG: Config = Config { col_data: columns::DATA, col_meta: columns::META }; +const TEST_CONFIG: Config = + Config { col_data: columns::DATA, col_meta: columns::META, keep_finalized_for: 1 }; pub fn new_av_store(dependencies: &TestEnvironmentDependencies) -> AvailabilityStoreSubsystem { let metrics = Metrics::try_register(&dependencies.registry).unwrap(); diff --git a/polkadot/node/test/service/src/lib.rs b/polkadot/node/test/service/src/lib.rs index 75fd0d9af3013..dea2682b61b6d 100644 --- a/polkadot/node/test/service/src/lib.rs +++ b/polkadot/node/test/service/src/lib.rs @@ -101,6 +101,7 @@ pub fn new_full( prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, enable_approval_voting_parallel: false, + keep_finalized_for: None, }, ), sc_network::config::NetworkBackendType::Litep2p => @@ -123,6 +124,7 @@ pub fn new_full( prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, enable_approval_voting_parallel: false, + keep_finalized_for: None, }, ), } diff --git a/polkadot/parachain/test-parachains/adder/collator/src/main.rs b/polkadot/parachain/test-parachains/adder/collator/src/main.rs index 416e58b0a8ac6..58fa3e841ef8e 100644 --- a/polkadot/parachain/test-parachains/adder/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/adder/collator/src/main.rs @@ -98,6 +98,7 @@ fn main() -> Result<()> { prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, enable_approval_voting_parallel: false, + keep_finalized_for: None, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/parachain/test-parachains/undying/collator/src/main.rs b/polkadot/parachain/test-parachains/undying/collator/src/main.rs index 9d993dd818b2f..8b18cbb422e87 100644 --- a/polkadot/parachain/test-parachains/undying/collator/src/main.rs +++ b/polkadot/parachain/test-parachains/undying/collator/src/main.rs @@ -100,6 +100,7 @@ fn main() -> Result<()> { prepare_workers_hard_max_num: None, prepare_workers_soft_max_num: None, enable_approval_voting_parallel: false, + keep_finalized_for: None, }, ) .map_err(|e| e.to_string())?; diff --git a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs index ab011bfc4ae12..ed56170cbcdec 100644 --- a/polkadot/runtime/parachains/src/assigner_coretime/tests.rs +++ b/polkadot/runtime/parachains/src/assigner_coretime/tests.rs @@ -20,13 +20,13 @@ use crate::{ assigner_coretime::{mock_helpers::GenesisConfigBuilder, pallet::Error, Schedule}, initializer::SessionChangeNotification, mock::{ - new_test_ext, Balances, CoretimeAssigner, OnDemand, Paras, ParasShared, RuntimeOrigin, - Scheduler, System, Test, + new_test_ext, CoretimeAssigner, OnDemand, Paras, ParasShared, RuntimeOrigin, Scheduler, + System, Test, }, paras::{ParaGenesisArgs, ParaKind}, scheduler::common::Assignment, }; -use frame_support::{assert_noop, assert_ok, pallet_prelude::*, traits::Currency}; +use frame_support::{assert_noop, assert_ok, pallet_prelude::*}; use pallet_broker::TaskId; use polkadot_primitives::{BlockNumber, Id as ParaId, SessionIndex, ValidationCode}; @@ -494,9 +494,9 @@ fn pop_assignment_for_core_works() { // Initialize the parathread, wait for it to be ready, then add an // on demand order to later pop with our Coretime assigner. schedule_blank_para(para_id, ParaKind::Parathread); - Balances::make_free_balance_be(&alice, amt); + on_demand::Credits::::insert(&alice, amt); run_to_block(1, |n| if n == 1 { Some(Default::default()) } else { None }); - assert_ok!(OnDemand::place_order_allow_death(RuntimeOrigin::signed(alice), amt, para_id)); + assert_ok!(OnDemand::place_order_with_credits(RuntimeOrigin::signed(alice), amt, para_id)); // Case 1: Assignment idle assert_ok!(CoretimeAssigner::assign_core( diff --git a/polkadot/runtime/parachains/src/coretime/benchmarking.rs b/polkadot/runtime/parachains/src/coretime/benchmarking.rs index 49e3d8a88c015..aaa4a4f9ee9a9 100644 --- a/polkadot/runtime/parachains/src/coretime/benchmarking.rs +++ b/polkadot/runtime/parachains/src/coretime/benchmarking.rs @@ -96,4 +96,14 @@ mod benchmarks { Some(BlockNumberFor::::from(20u32)), ) } + + #[benchmark] + fn credit_account() { + // Setup + let root_origin = ::RuntimeOrigin::root(); + let who: T::AccountId = whitelisted_caller(); + + #[extrinsic_call] + _(root_origin as ::RuntimeOrigin, who, 1_000_000u32.into()) + } } diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 5656e92b90be0..e961d5fa76a8b 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -48,7 +48,7 @@ const LOG_TARGET: &str = "runtime::parachains::coretime"; pub trait WeightInfo { fn request_core_count() -> Weight; fn request_revenue_at() -> Weight; - //fn credit_account() -> Weight; + fn credit_account() -> Weight; fn assign_core(s: u32) -> Weight; } @@ -62,19 +62,18 @@ impl WeightInfo for TestWeightInfo { fn request_revenue_at() -> Weight { Weight::MAX } - // TODO: Add real benchmarking functionality for each of these to - // benchmarking.rs, then uncomment here and in trait definition. - //fn credit_account() -> Weight { - // Weight::MAX - //} + fn credit_account() -> Weight { + Weight::MAX + } fn assign_core(_s: u32) -> Weight { Weight::MAX } } /// Shorthand for the Balance type the runtime is using. -pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; +pub type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; /// Broker pallet index on the coretime chain. Used to /// @@ -120,8 +119,6 @@ pub mod pallet { type RuntimeOrigin: From<::RuntimeOrigin> + Into::RuntimeOrigin>>; type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// The runtime's definition of a Currency. - type Currency: Currency; /// The ParaId of the coretime chain. #[pallet::constant] type BrokerId: Get; @@ -195,18 +192,19 @@ pub mod pallet { Self::notify_revenue(when) } - //// TODO Impl me! - ////#[pallet::weight(::WeightInfo::credit_account())] - //#[pallet::call_index(3)] - //pub fn credit_account( - // origin: OriginFor, - // _who: T::AccountId, - // _amount: BalanceOf, - //) -> DispatchResult { - // // Ignore requests not coming from the coretime chain or root. - // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; - // Ok(()) - //} + #[pallet::weight(::WeightInfo::credit_account())] + #[pallet::call_index(3)] + pub fn credit_account( + origin: OriginFor, + who: T::AccountId, + amount: BalanceOf, + ) -> DispatchResult { + // Ignore requests not coming from the coretime chain or root. + Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; + + on_demand::Pallet::::credit_account(who, amount.saturated_into()); + Ok(()) + } /// Receive instructions from the `ExternalBrokerOrigin`, detailing how a specific core is /// to be used. diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index ee1990a7b618a..0ab2737640604 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -368,6 +368,9 @@ impl pallet_message_queue::WeightInfo for TestMessageQueueWeight { fn service_page_item() -> Weight { Weight::zero() } + fn set_service_head() -> Weight { + Weight::zero() + } fn bump_service_head() -> Weight { Weight::zero() } @@ -434,7 +437,6 @@ impl Get for BrokerPot { impl coretime::Config for Test { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; - type Currency = pallet_balances::Pallet; type BrokerId = BrokerId; type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; diff --git a/polkadot/runtime/parachains/src/on_demand/benchmarking.rs b/polkadot/runtime/parachains/src/on_demand/benchmarking.rs index d494a77a5c4db..4a996848bb029 100644 --- a/polkadot/runtime/parachains/src/on_demand/benchmarking.rs +++ b/polkadot/runtime/parachains/src/on_demand/benchmarking.rs @@ -91,6 +91,20 @@ mod benchmarks { _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) } + #[benchmark] + fn place_order_with_credits(s: Linear<1, MAX_FILL_BENCH>) { + // Setup + let caller: T::AccountId = whitelisted_caller(); + let para_id = ParaId::from(111u32); + init_parathread::(para_id); + Credits::::insert(&caller, BalanceOf::::max_value()); + + Pallet::::populate_queue(para_id, s); + + #[extrinsic_call] + _(RawOrigin::Signed(caller.into()), BalanceOf::::max_value(), para_id) + } + impl_benchmark_test_suite!( Pallet, crate::mock::new_test_ext( diff --git a/polkadot/runtime/parachains/src/on_demand/mod.rs b/polkadot/runtime/parachains/src/on_demand/mod.rs index 66400eb00fd9d..c8ff4b1ae4a5d 100644 --- a/polkadot/runtime/parachains/src/on_demand/mod.rs +++ b/polkadot/runtime/parachains/src/on_demand/mod.rs @@ -73,6 +73,7 @@ pub use pallet::*; pub trait WeightInfo { fn place_order_allow_death(s: u32) -> Weight; fn place_order_keep_alive(s: u32) -> Weight; + fn place_order_with_credits(s: u32) -> Weight; } /// A weight info that is only suitable for testing. @@ -86,6 +87,19 @@ impl WeightInfo for TestWeightInfo { fn place_order_keep_alive(_: u32) -> Weight { Weight::MAX } + + fn place_order_with_credits(_: u32) -> Weight { + Weight::MAX + } +} + +/// Defines how the account wants to pay for on-demand. +#[derive(Encode, Decode, TypeInfo, Debug, PartialEq, Clone, Eq)] +enum PaymentType { + /// Use credits to purchase on-demand coretime. + Credits, + /// Use account's free balance to purchase on-demand coretime. + Balance, } #[frame_support::pallet] @@ -169,6 +183,11 @@ pub mod pallet { pub type Revenue = StorageValue<_, BoundedVec, T::MaxHistoricalRevenue>, ValueQuery>; + /// Keeps track of credits owned by each account. + #[pallet::storage] + pub type Credits = + StorageMap<_, Blake2_128Concat, T::AccountId, BalanceOf, ValueQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -176,6 +195,8 @@ pub mod pallet { OnDemandOrderPlaced { para_id: ParaId, spot_price: BalanceOf, ordered_by: T::AccountId }, /// The value of the spot price has likely changed SpotPriceSet { spot_price: BalanceOf }, + /// An account was given credits. + AccountCredited { who: T::AccountId, amount: BalanceOf }, } #[pallet::error] @@ -185,6 +206,8 @@ pub mod pallet { /// The current spot price is higher than the max amount specified in the `place_order` /// call, making it invalid. SpotPriceHigherThanMaxAmount, + /// The account doesn't have enough credits to purchase on-demand coretime. + InsufficientCredits, } #[pallet::hooks] @@ -235,13 +258,21 @@ pub mod pallet { /// - `OnDemandOrderPlaced` #[pallet::call_index(0)] #[pallet::weight(::WeightInfo::place_order_allow_death(QueueStatus::::get().size()))] + #[allow(deprecated)] + #[deprecated(note = "This will be removed in favor of using `place_order_with_credits`")] pub fn place_order_allow_death( origin: OriginFor, max_amount: BalanceOf, para_id: ParaId, ) -> DispatchResult { let sender = ensure_signed(origin)?; - Pallet::::do_place_order(sender, max_amount, para_id, AllowDeath) + Pallet::::do_place_order( + sender, + max_amount, + para_id, + AllowDeath, + PaymentType::Balance, + ) } /// Same as the [`place_order_allow_death`](Self::place_order_allow_death) call , but with a @@ -261,13 +292,55 @@ pub mod pallet { /// - `OnDemandOrderPlaced` #[pallet::call_index(1)] #[pallet::weight(::WeightInfo::place_order_keep_alive(QueueStatus::::get().size()))] + #[allow(deprecated)] + #[deprecated(note = "This will be removed in favor of using `place_order_with_credits`")] pub fn place_order_keep_alive( origin: OriginFor, max_amount: BalanceOf, para_id: ParaId, ) -> DispatchResult { let sender = ensure_signed(origin)?; - Pallet::::do_place_order(sender, max_amount, para_id, KeepAlive) + Pallet::::do_place_order( + sender, + max_amount, + para_id, + KeepAlive, + PaymentType::Balance, + ) + } + + /// Create a single on demand core order with credits. + /// Will charge the owner's on-demand credit account the spot price for the current block. + /// + /// Parameters: + /// - `origin`: The sender of the call, on-demand credits will be withdrawn from this + /// account. + /// - `max_amount`: The maximum number of credits to spend from the origin to place an + /// order. + /// - `para_id`: A `ParaId` the origin wants to provide blockspace for. + /// + /// Errors: + /// - `InsufficientCredits` + /// - `QueueFull` + /// - `SpotPriceHigherThanMaxAmount` + /// + /// Events: + /// - `OnDemandOrderPlaced` + #[pallet::call_index(2)] + #[pallet::weight(::WeightInfo::place_order_with_credits(QueueStatus::::get().size()))] + pub fn place_order_with_credits( + origin: OriginFor, + max_amount: BalanceOf, + para_id: ParaId, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + Pallet::::do_place_order( + sender, + max_amount, + para_id, + KeepAlive, + PaymentType::Credits, + ) } } } @@ -349,6 +422,18 @@ where }); } + /// Adds credits to the specified account. + /// + /// Parameters: + /// - `who`: Credit receiver. + /// - `amount`: The amount of new credits the account will receive. + pub fn credit_account(who: T::AccountId, amount: BalanceOf) { + Credits::::mutate(who.clone(), |credits| { + *credits = credits.saturating_add(amount); + }); + Pallet::::deposit_event(Event::::AccountCredited { who, amount }); + } + /// Helper function for `place_order_*` calls. Used to differentiate between placing orders /// with a keep alive check or to allow the account to be reaped. The amount charged is /// stored to the pallet account to be later paid out as revenue. @@ -358,6 +443,7 @@ where /// - `max_amount`: The maximum balance to withdraw from the origin to place an order. /// - `para_id`: A `ParaId` the origin wants to provide blockspace for. /// - `existence_requirement`: Whether or not to ensure that the account will not be reaped. + /// - `payment_type`: Defines how the user wants to pay for on-demand. /// /// Errors: /// - `InsufficientBalance`: from the Currency implementation @@ -371,6 +457,7 @@ where max_amount: BalanceOf, para_id: ParaId, existence_requirement: ExistenceRequirement, + payment_type: PaymentType, ) -> DispatchResult { let config = configuration::ActiveConfig::::get(); @@ -391,22 +478,39 @@ where Error::::QueueFull ); - // Charge the sending account the spot price. The amount will be teleported to the - // broker chain once it requests revenue information. - let amt = T::Currency::withdraw( - &sender, - spot_price, - WithdrawReasons::FEE, - existence_requirement, - )?; - - // Consume the negative imbalance and deposit it into the pallet account. Make sure the - // account preserves even without the existential deposit. - let pot = Self::account_id(); - if !System::::account_exists(&pot) { - System::::inc_providers(&pot); + match payment_type { + PaymentType::Balance => { + // Charge the sending account the spot price. The amount will be teleported to + // the broker chain once it requests revenue information. + let amt = T::Currency::withdraw( + &sender, + spot_price, + WithdrawReasons::FEE, + existence_requirement, + )?; + + // Consume the negative imbalance and deposit it into the pallet account. Make + // sure the account preserves even without the existential deposit. + let pot = Self::account_id(); + if !System::::account_exists(&pot) { + System::::inc_providers(&pot); + } + T::Currency::resolve_creating(&pot, amt); + }, + PaymentType::Credits => { + let credits = Credits::::get(&sender); + + // Charge the sending account the spot price in credits. + let new_credits_value = + credits.checked_sub(&spot_price).ok_or(Error::::InsufficientCredits)?; + + if new_credits_value.is_zero() { + Credits::::remove(&sender); + } else { + Credits::::insert(&sender, new_credits_value); + } + }, } - T::Currency::resolve_creating(&pot, amt); // Add the amount to the current block's (index 0) revenue information. Revenue::::mutate(|bounded_revenue| { @@ -619,7 +723,7 @@ where /// Increases the affinity of a `ParaId` to a specified `CoreIndex`. /// Adds to the count of the `CoreAffinityCount` if an entry is found and the core_index - /// matches. A non-existent entry will be initialized with a count of 1 and uses the supplied + /// matches. A non-existent entry will be initialized with a count of 1 and uses the supplied /// `CoreIndex`. fn increase_affinity(para_id: ParaId, core_index: CoreIndex) { ParaIdAffinity::::mutate(para_id, |maybe_affinity| match maybe_affinity { diff --git a/polkadot/runtime/parachains/src/on_demand/tests.rs b/polkadot/runtime/parachains/src/on_demand/tests.rs index 7da16942c7ad6..a435598d8f555 100644 --- a/polkadot/runtime/parachains/src/on_demand/tests.rs +++ b/polkadot/runtime/parachains/src/on_demand/tests.rs @@ -98,6 +98,7 @@ fn place_order_run_to_blocknumber(para_id: ParaId, blocknumber: Option::get(); + // Initialize the parathread and wait for it to be ready. schedule_blank_para(para_id, ParaKind::Parathread); Balances::make_free_balance_be(&alice, amt); @@ -327,6 +332,71 @@ fn place_order_keep_alive_keeps_alive() { OnDemand::place_order_keep_alive(RuntimeOrigin::signed(alice), max_amt, para_id), BalancesError::::InsufficientBalance ); + + Balances::make_free_balance_be(&alice, max_amt); + assert_ok!(OnDemand::place_order_keep_alive( + RuntimeOrigin::signed(alice), + max_amt, + para_id + ),); + + let queue_status = QueueStatus::::get(); + let spot_price = queue_status.traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); + assert_eq!(Balances::free_balance(&alice), max_amt.saturating_sub(spot_price)); + assert_eq!( + FreeEntries::::get().pop(), + Some(EnqueuedOrder::new(QueueIndex(0), para_id)) + ); + }); +} + +#[test] +fn place_order_with_credits() { + let alice = 1u64; + let initial_credit = 10_000_000u128; + let para_id = ParaId::from(111); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let config = configuration::ActiveConfig::::get(); + + // Initialize the parathread and wait for it to be ready. + schedule_blank_para(para_id, ParaKind::Parathread); + OnDemand::credit_account(alice, initial_credit); + assert_eq!(Credits::::get(alice), initial_credit); + + assert!(!Paras::is_parathread(para_id)); + run_to_block(100, |n| if n == 100 { Some(Default::default()) } else { None }); + assert!(Paras::is_parathread(para_id)); + + let queue_status = QueueStatus::::get(); + let spot_price = queue_status.traffic.saturating_mul_int( + config.scheduler_params.on_demand_base_fee.saturated_into::>(), + ); + + // Create an order and pay for it with credits. + assert_ok!(OnDemand::place_order_with_credits( + RuntimeOrigin::signed(alice), + initial_credit, + para_id + )); + assert_eq!(Credits::::get(alice), initial_credit.saturating_sub(spot_price)); + assert_eq!( + FreeEntries::::get().pop(), + Some(EnqueuedOrder::new(QueueIndex(0), para_id)) + ); + + // Insufficient credits: + Credits::::insert(alice, 1u128); + assert_noop!( + OnDemand::place_order_with_credits( + RuntimeOrigin::signed(alice), + 1_000_000u128, + para_id + ), + Error::::InsufficientCredits + ); }); } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index f6729dd976257..054ec2aa4a931 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -344,6 +344,7 @@ impl pallet_scheduler::Config for Runtime { type WeightInfo = weights::pallet_scheduler::WeightInfo; type OriginPrivilegeCmp = OriginPrivilegeCmp; type Preimages = Preimage; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -1130,7 +1131,6 @@ impl Get for BrokerPot { impl coretime::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; - type Currency = Balances; type BrokerId = BrokerId; type BrokerPotLocation = BrokerPot; type WeightInfo = weights::polkadot_runtime_parachains_coretime::WeightInfo; @@ -2447,7 +2447,7 @@ sp_api::impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; @@ -2470,7 +2470,7 @@ sp_api::impl_runtime_apis! { alloc::string::String, > { use frame_support::traits::WhitelistedStorageKeys; - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; use frame_benchmarking::baseline::Pallet as Baseline; diff --git a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs index 6ebfcd060b642..8956470fdd8a2 100644 --- a/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs +++ b/polkadot/runtime/rococo/src/weights/pallet_message_queue.rs @@ -17,27 +17,28 @@ //! Autogenerated weights for `pallet_message_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-02-29, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-bn-ce5rx-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=rococo-dev +// --extrinsic=* +// --runtime=target/production/wbuild/rococo-runtime/rococo_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights +// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --heap-pages=4096 // --no-storage-info -// --no-median-slopes // --no-min-squares -// --pallet=pallet_message_queue -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./polkadot/file_header.txt -// --output=./polkadot/runtime/rococo/src/weights/ +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -58,8 +59,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `281` // Estimated: `6050` - // Minimum execution time: 12_830_000 picoseconds. - Weight::from_parts(13_476_000, 0) + // Minimum execution time: 13_864_000 picoseconds. + Weight::from_parts(14_513_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) @@ -72,8 +73,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `281` // Estimated: `6050` - // Minimum execution time: 11_583_000 picoseconds. - Weight::from_parts(11_902_000, 0) + // Minimum execution time: 12_517_000 picoseconds. + Weight::from_parts(13_107_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) @@ -84,8 +85,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `42` // Estimated: `3520` - // Minimum execution time: 3_801_000 picoseconds. - Weight::from_parts(3_943_000, 0) + // Minimum execution time: 5_125_000 picoseconds. + Weight::from_parts(5_429_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -96,8 +97,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 5_517_000 picoseconds. - Weight::from_parts(5_861_000, 0) + // Minimum execution time: 7_203_000 picoseconds. + Weight::from_parts(7_737_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -108,8 +109,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `115` // Estimated: `36283` - // Minimum execution time: 5_870_000 picoseconds. - Weight::from_parts(6_028_000, 0) + // Minimum execution time: 7_633_000 picoseconds. + Weight::from_parts(7_861_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) @@ -122,8 +123,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 80_681_000 picoseconds. - Weight::from_parts(81_818_000, 0) + // Minimum execution time: 86_773_000 picoseconds. + Weight::from_parts(88_764_000, 0) .saturating_add(Weight::from_parts(0, 0)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -135,12 +136,26 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `220` // Estimated: `3520` - // Minimum execution time: 8_641_000 picoseconds. - Weight::from_parts(8_995_000, 0) + // Minimum execution time: 8_860_000 picoseconds. + Weight::from_parts(9_403_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `220` + // Estimated: `3520` + // Minimum execution time: 7_822_000 picoseconds. + Weight::from_parts(8_172_000, 0) + .saturating_add(Weight::from_parts(0, 3520)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) @@ -153,8 +168,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 38_473_000 picoseconds. - Weight::from_parts(39_831_000, 0) + // Minimum execution time: 46_314_000 picoseconds. + Weight::from_parts(47_541_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) @@ -171,8 +186,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 48_717_000 picoseconds. - Weight::from_parts(49_724_000, 0) + // Minimum execution time: 56_683_000 picoseconds. + Weight::from_parts(58_600_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) @@ -189,8 +204,8 @@ impl pallet_message_queue::WeightInfo for WeightInfo // Proof Size summary in bytes: // Measured: `32945` // Estimated: `36283` - // Minimum execution time: 72_718_000 picoseconds. - Weight::from_parts(74_081_000, 0) + // Minimum execution time: 82_400_000 picoseconds. + Weight::from_parts(84_090_000, 0) .saturating_add(Weight::from_parts(0, 36283)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_coretime.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_coretime.rs index b2329c098cead..94dc7a4e07508 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_coretime.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_coretime.rs @@ -86,6 +86,22 @@ impl polkadot_runtime_parachains::coretime::WeightInfo .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn credit_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_519_000 picoseconds. + Weight::from_parts(7_803_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) diff --git a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs index 1dd62d129f9a0..f251ad5f6b86b 100644 --- a/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs +++ b/polkadot/runtime/rococo/src/weights/polkadot_runtime_parachains_on_demand.rs @@ -102,4 +102,28 @@ impl polkadot_runtime_parachains::on_demand::WeightInfo .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_with_credits(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `270 + s * (8 ±0)` + // Estimated: `3733 + s * (8 ±0)` + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(28_146_882, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 140 + .saturating_add(Weight::from_parts(21_283, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) + } } diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 1a19b637b798a..f592dc2b61df0 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -659,7 +659,6 @@ impl SendXcm for DummyXcmSender { impl coretime::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; - type Currency = pallet_balances::Pallet; type BrokerId = BrokerId; type WeightInfo = crate::coretime::TestWeightInfo; type SendXcm = DummyXcmSender; diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 7fd2ac53530ac..cade86e586690 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -250,6 +250,7 @@ impl pallet_scheduler::Config for Runtime { type WeightInfo = weights::pallet_scheduler::WeightInfo; type OriginPrivilegeCmp = frame_support::traits::EqualPrivilegeOnly; type Preimages = Preimage; + type BlockNumberProvider = frame_system::Pallet; } parameter_types! { @@ -1356,7 +1357,6 @@ impl Get for BrokerPot { impl coretime::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; type RuntimeEvent = RuntimeEvent; - type Currency = Balances; type BrokerId = BrokerId; type BrokerPotLocation = BrokerPot; type WeightInfo = weights::polkadot_runtime_parachains_coretime::WeightInfo; @@ -2611,7 +2611,7 @@ sp_api::impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use frame_support::traits::StorageInfoTrait; use pallet_session_benchmarking::Pallet as SessionBench; @@ -2639,7 +2639,7 @@ sp_api::impl_runtime_apis! { alloc::string::String, > { use frame_support::traits::WhitelistedStorageKeys; - use frame_benchmarking::{Benchmarking, BenchmarkBatch, BenchmarkError}; + use frame_benchmarking::{BenchmarkBatch, BenchmarkError}; use sp_storage::TrackedStorageKey; // Trying to add benchmarks directly to some pallets caused cyclic dependency issues. // To get around that, we separated the benchmarks into its own crate. diff --git a/polkadot/runtime/westend/src/weights/pallet_message_queue.rs b/polkadot/runtime/westend/src/weights/pallet_message_queue.rs index 17eff94878198..8679769eed247 100644 --- a/polkadot/runtime/westend/src/weights/pallet_message_queue.rs +++ b/polkadot/runtime/westend/src/weights/pallet_message_queue.rs @@ -16,28 +16,29 @@ //! Autogenerated weights for `pallet_message_queue` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner--ss9ysm1-project-163-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=westend-dev +// --extrinsic=* +// --runtime=target/production/wbuild/westend-runtime/westend_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights +// --wasm-execution=compiled // --steps=50 // --repeat=20 +// --heap-pages=4096 // --no-storage-info -// --no-median-slopes // --no-min-squares -// --pallet=pallet_message_queue -// --extrinsic=* -// --execution=wasm -// --wasm-execution=compiled -// --header=./file_header.txt -// --output=./runtime/westend/src/weights/ +// --no-median-slopes #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,142 +51,161 @@ use core::marker::PhantomData; /// Weight functions for `pallet_message_queue`. pub struct WeightInfo(PhantomData); impl pallet_message_queue::WeightInfo for WeightInfo { - /// Storage: MessageQueue ServiceHead (r:1 w:0) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:0) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 12_154_000 picoseconds. - Weight::from_parts(12_559_000, 0) + // Minimum execution time: 17_599_000 picoseconds. + Weight::from_parts(18_154_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue BookStateFor (r:2 w:2) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:2 w:2) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `248` + // Measured: `281` // Estimated: `6050` - // Minimum execution time: 11_166_000 picoseconds. - Weight::from_parts(11_526_000, 0) + // Minimum execution time: 16_074_000 picoseconds. + Weight::from_parts(16_781_000, 0) .saturating_add(Weight::from_parts(0, 6050)) .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: // Measured: `42` // Estimated: `3520` - // Minimum execution time: 4_160_000 picoseconds. - Weight::from_parts(4_445_000, 0) + // Minimum execution time: 5_094_000 picoseconds. + Weight::from_parts(5_356_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(131122), added: 133597, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `134587` - // Minimum execution time: 5_872_000 picoseconds. - Weight::from_parts(6_105_000, 0) + // Minimum execution time: 10_998_000 picoseconds. + Weight::from_parts(11_430_000, 0) .saturating_add(Weight::from_parts(0, 134587)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(131122), added: 133597, mode: MaxEncodedLen) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: // Measured: `115` // Estimated: `134587` - // Minimum execution time: 6_145_000 picoseconds. - Weight::from_parts(6_522_000, 0) + // Minimum execution time: 11_194_000 picoseconds. + Weight::from_parts(11_628_000, 0) .saturating_add(Weight::from_parts(0, 134587)) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `MessageQueue::BookStateFor` (r:0 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:0 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) fn service_page_item() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_117_000 picoseconds. - Weight::from_parts(175_271_000, 0) + // Minimum execution time: 331_274_000 picoseconds. + Weight::from_parts(341_620_000, 0) .saturating_add(Weight::from_parts(0, 0)) + .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: MessageQueue ServiceHead (r:1 w:1) - /// Proof: MessageQueue ServiceHead (max_values: Some(1), max_size: Some(6), added: 501, mode: MaxEncodedLen) - /// Storage: MessageQueue BookStateFor (r:1 w:0) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) + /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `149` + // Measured: `220` // Estimated: `3520` - // Minimum execution time: 6_429_000 picoseconds. - Weight::from_parts(6_743_000, 0) + // Minimum execution time: 12_258_000 picoseconds. + Weight::from_parts(12_885_000, 0) .saturating_add(Weight::from_parts(0, 3520)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(131122), added: 133597, mode: MaxEncodedLen) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(6), added: 501, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `220` + // Estimated: `3520` + // Minimum execution time: 11_292_000 picoseconds. + Weight::from_parts(11_740_000, 0) + .saturating_add(Weight::from_parts(0, 3520)) + .saturating_add(T::DbWeight::get().reads(1)) + .saturating_add(T::DbWeight::get().writes(1)) + } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn reap_page() -> Weight { // Proof Size summary in bytes: // Measured: `131252` // Estimated: `134587` - // Minimum execution time: 97_068_000 picoseconds. - Weight::from_parts(100_467_000, 0) + // Minimum execution time: 113_626_000 picoseconds. + Weight::from_parts(116_218_000, 0) .saturating_add(Weight::from_parts(0, 134587)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(131122), added: 133597, mode: MaxEncodedLen) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: // Measured: `131252` // Estimated: `134587` - // Minimum execution time: 126_674_000 picoseconds. - Weight::from_parts(134_114_000, 0) + // Minimum execution time: 146_573_000 picoseconds. + Weight::from_parts(149_880_000, 0) .saturating_add(Weight::from_parts(0, 134587)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: MessageQueue BookStateFor (r:1 w:1) - /// Proof: MessageQueue BookStateFor (max_values: None, max_size: Some(55), added: 2530, mode: MaxEncodedLen) - /// Storage: MessageQueue Pages (r:1 w:1) - /// Proof: MessageQueue Pages (max_values: None, max_size: Some(131122), added: 133597, mode: MaxEncodedLen) - /// Storage: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Proof Skipped: unknown `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) - /// Storage: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) - /// Proof Skipped: unknown `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(55), added: 2530, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::Pages` (r:1 w:1) + /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(131122), added: 133597, mode: `MaxEncodedLen`) + /// Storage: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Proof: UNKNOWN KEY `0x3a72656c61795f64697370617463685f71756575655f72656d61696e696e675f` (r:0 w:1) + /// Storage: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) + /// Proof: UNKNOWN KEY `0xf5207f03cfdce586301014700e2c2593fad157e461d71fd4c1f936839a5f1f3e` (r:0 w:1) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: // Measured: `131252` // Estimated: `134587` - // Minimum execution time: 204_926_000 picoseconds. - Weight::from_parts(221_900_000, 0) + // Minimum execution time: 223_355_000 picoseconds. + Weight::from_parts(232_747_000, 0) .saturating_add(Weight::from_parts(0, 134587)) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(4)) diff --git a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_coretime.rs b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_coretime.rs index 9df382875f5f1..a36fefb704deb 100644 --- a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_coretime.rs +++ b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_coretime.rs @@ -86,6 +86,22 @@ impl polkadot_runtime_parachains::coretime::WeightInfo .saturating_add(T::DbWeight::get().reads(3)) .saturating_add(T::DbWeight::get().writes(1)) } + /// Storage: `Configuration::PendingConfigs` (r:1 w:1) + /// Proof: `Configuration::PendingConfigs` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `Configuration::BypassConsistencyCheck` (r:1 w:0) + /// Proof: `Configuration::BypassConsistencyCheck` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `ParasShared::CurrentSessionIndex` (r:1 w:0) + /// Proof: `ParasShared::CurrentSessionIndex` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + fn credit_account() -> Weight { + // Proof Size summary in bytes: + // Measured: `151` + // Estimated: `1636` + // Minimum execution time: 7_519_000 picoseconds. + Weight::from_parts(7_803_000, 0) + .saturating_add(Weight::from_parts(0, 1636)) + .saturating_add(T::DbWeight::get().reads(3)) + .saturating_add(T::DbWeight::get().writes(1)) + } /// Storage: `CoretimeAssignmentProvider::CoreDescriptors` (r:1 w:1) /// Proof: `CoretimeAssignmentProvider::CoreDescriptors` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `CoretimeAssignmentProvider::CoreSchedules` (r:0 w:1) diff --git a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_on_demand.rs b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_on_demand.rs index fc7efa6edfcf3..2e84319d0b628 100644 --- a/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_on_demand.rs +++ b/polkadot/runtime/westend/src/weights/polkadot_runtime_parachains_on_demand.rs @@ -96,4 +96,28 @@ impl polkadot_runtime_parachains::on_demand::WeightInfo .saturating_add(T::DbWeight::get().writes(3)) .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) } + /// Storage: `OnDemandAssignmentProvider::QueueStatus` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::QueueStatus` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:0) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `OnDemandAssignmentProvider::Revenue` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::Revenue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::ParaIdAffinity` (r:1 w:0) + /// Proof: `OnDemandAssignmentProvider::ParaIdAffinity` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `OnDemandAssignmentProvider::FreeEntries` (r:1 w:1) + /// Proof: `OnDemandAssignmentProvider::FreeEntries` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + /// The range of component `s` is `[1, 9999]`. + fn place_order_with_credits(s: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `270 + s * (8 ±0)` + // Estimated: `3733 + s * (8 ±0)` + // Minimum execution time: 28_422_000 picoseconds. + Weight::from_parts(28_146_882, 0) + .saturating_add(Weight::from_parts(0, 3733)) + // Standard Error: 140 + .saturating_add(Weight::from_parts(21_283, 0).saturating_mul(s.into())) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) + .saturating_add(Weight::from_parts(0, 8).saturating_mul(s.into())) + } } diff --git a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs index 25a705a39eb73..2da772deb0ed7 100644 --- a/polkadot/xcm/xcm-builder/src/fungible_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungible_adapter.rs @@ -20,11 +20,7 @@ use super::MintLocation; use core::{marker::PhantomData, result}; use frame_support::traits::{ tokens::{ - fungible, - Fortitude::Polite, - Precision::Exact, - Preservation::{Expendable, Preserve}, - Provenance::Minted, + fungible, Fortitude::Polite, Precision::Exact, Preservation::Expendable, Provenance::Minted, }, Get, }; @@ -64,7 +60,7 @@ impl< .ok_or(MatchError::AccountIdConversionFailed)?; let dest = AccountIdConverter::convert_location(to) .ok_or(MatchError::AccountIdConversionFailed)?; - Fungible::transfer(&source, &dest, amount, Preserve) + Fungible::transfer(&source, &dest, amount, Expendable) .map_err(|error| XcmError::FailedToTransactAsset(error.into()))?; Ok(what.clone().into()) } diff --git a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs index a259afc6e6825..59b4ccb13d0c5 100644 --- a/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs +++ b/polkadot/xcm/xcm-builder/src/fungibles_adapter.rs @@ -19,10 +19,7 @@ use core::{marker::PhantomData, result}; use frame_support::traits::{ tokens::{ - fungibles, - Fortitude::Polite, - Precision::Exact, - Preservation::{Expendable, Preserve}, + fungibles, Fortitude::Polite, Precision::Exact, Preservation::Expendable, Provenance::Minted, }, Contains, Get, @@ -58,7 +55,7 @@ impl< .ok_or(MatchError::AccountIdConversionFailed)?; let dest = AccountIdConverter::convert_location(to) .ok_or(MatchError::AccountIdConversionFailed)?; - Assets::transfer(asset_id, &source, &dest, amount, Preserve) + Assets::transfer(asset_id, &source, &dest, amount, Expendable) .map_err(|e| XcmError::FailedToTransactAsset(e.into()))?; Ok(what.clone().into()) } diff --git a/polkadot/zombienet-sdk-tests/Cargo.toml b/polkadot/zombienet-sdk-tests/Cargo.toml index ba7517ddce663..69a1d0375aabc 100644 --- a/polkadot/zombienet-sdk-tests/Cargo.toml +++ b/polkadot/zombienet-sdk-tests/Cargo.toml @@ -18,6 +18,7 @@ serde_json = { workspace = true } subxt = { workspace = true, features = ["substrate-compat"] } subxt-signer = { workspace = true } tokio = { workspace = true, features = ["rt-multi-thread"] } +tokio-util = { workspace = true, features = ["time"] } zombienet-sdk = { workspace = true } [features] diff --git a/polkadot/zombienet-sdk-tests/tests/disabling/mod.rs b/polkadot/zombienet-sdk-tests/tests/disabling/mod.rs new file mode 100644 index 0000000000000..c1806e451bf65 --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/disabling/mod.rs @@ -0,0 +1,4 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +mod slashing; diff --git a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs new file mode 100644 index 0000000000000..2983ace629c6d --- /dev/null +++ b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs @@ -0,0 +1,171 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +//! Test past-session slashing when a malicious validator backs an invalid +//! candidate and a dispute concluding in a future session. We achieve that by +//! making some of the honest nodes go offline. + +use anyhow::anyhow; + +use crate::helpers::{assert_blocks_are_being_finalized, assert_para_throughput}; +use polkadot_primitives::{BlockNumber, CandidateHash, DisputeState, Id as ParaId, SessionIndex}; +use serde_json::json; +use subxt::{OnlineClient, PolkadotConfig}; +use tokio::time::Duration; +use tokio_util::time::FutureExt; +use zombienet_sdk::NetworkConfigBuilder; + +#[tokio::test(flavor = "multi_thread")] +async fn dispute_past_session_slashing() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + r.with_chain("westend-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![ + "--no-hardware-benchmarks".into(), + "-lparachain=debug,runtime=debug".into(), + ]) + .with_genesis_overrides(json!({ + "patch": { + "configuration": { + "config": { + "scheduler_params": { + "group_rotation_frequency": 3, + "max_validators_per_core": 1, + }, + "needed_approvals": 2 + } + } + } + })) + .with_node(|node| node.with_name("honest-validator-0")) + .with_node(|node| node.with_name("honest-validator-1")) + .with_node(|node| node.with_name("honest-flaky-validator-0")) + .with_node(|node| { + node.with_name("malicious-backer") + .with_image( + std::env::var("MALUS_IMAGE") + .unwrap_or("docker.io/paritypr/malus".to_string()) + .as_str(), + ) + .with_command("malus") + .with_subcommand("suggest-garbage-candidate") + .with_args(vec![ + "--no-hardware-benchmarks".into(), + "--insecure-validator-i-know-what-i-do".into(), + "-lMALUS=trace,parachain=debug".into(), + ]) + }) + }) + .with_parachain(|p| { + p.with_id(1337) + .with_default_command("polkadot-parachain") + .with_default_image(images.cumulus.as_str()) + .with_default_args(vec!["-lparachain=debug".into()]) + .with_collator(|n| n.with_name("collator-1337")) + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let malus = network.get_node("malicious-backer")?; + malus.pause().await?; + + let honest = network.get_node("honest-validator-0")?; + let relay_client: OnlineClient = honest.wait_client().await?; + + // Wait for some para blocks being produced + assert_para_throughput(&relay_client, 20, [(ParaId::from(1337), 10..20)].into_iter().collect()) + .await?; + + // Let's initiate a dispute + malus.resume().await?; + // Pause flaky nodes, so a dispute doesn't conclude + let flaky_0 = network.get_node("honest-flaky-validator-0")?; + flaky_0.pause().await?; + + // wait for a dispute to be initiated + let mut best_blocks = relay_client.blocks().subscribe_best().await?; + let mut dispute_session: u32 = u32::MAX; + while let Some(block) = best_blocks.next().await { + // NOTE: we can't use `at_latest` here, because it will utilize latest *finalized* block + // and finality is stalled... + let disputes = relay_client + .runtime_api() + .at(block?.hash()) + .call_raw::)>>( + "ParachainHost_disputes", + None, + ) + .await?; + if let Some((session, _, _)) = disputes.first() { + dispute_session = *session; + break + } + } + + assert_ne!(dispute_session, u32::MAX, "dispute should be initiated"); + log::info!("Dispute initiated, now waiting for a new session"); + + while let Some(block) = best_blocks.next().await { + let current_session = relay_client + .runtime_api() + .at(block?.hash()) + .call_raw::("ParachainHost_session_index_for_child", None) + .await?; + if current_session > dispute_session { + break + } + } + + // We don't need malus anymore + malus.pause().await?; + + let concluded_dispute_metric = + "polkadot_parachain_candidate_dispute_concluded{validity=\"invalid\"}"; + + let timeout_secs: u64 = 120; + // with one offline honest node, dispute should not conclude + honest + .wait_metric_with_timeout(concluded_dispute_metric, |d| d < 1.0, timeout_secs) + .await?; + + // Now resume flaky validators + log::info!("Resuming flaky nodes - dispute should conclude"); + flaky_0.resume().await?; + + honest + .wait_metric_with_timeout(concluded_dispute_metric, |d| d > 0.0, timeout_secs) + .await?; + log::info!("A dispute has concluded"); + + honest + .wait_log_line_count_with_timeout( + "*Successfully reported pending slash*", + true, + 1, + timeout_secs, + ) + .await?; + + assert_blocks_are_being_finalized(&relay_client) + .timeout(Duration::from_secs(400)) // enough for the aggression to kick in + .await? + .unwrap(); + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs b/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs index 470345ca4d621..2b1de28f7b641 100644 --- a/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs +++ b/polkadot/zombienet-sdk-tests/tests/helpers/mod.rs @@ -4,6 +4,7 @@ use polkadot_primitives::Id as ParaId; use std::{collections::HashMap, ops::Range}; use subxt::{OnlineClient, PolkadotConfig}; +use tokio::time::{sleep, Duration}; #[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] pub mod rococo {} @@ -79,3 +80,25 @@ pub async fn assert_finalized_block_height( } Ok(()) } + +/// Assert that finality has not stalled. +pub async fn assert_blocks_are_being_finalized( + client: &OnlineClient, +) -> Result<(), anyhow::Error> { + let mut finalized_blocks = client.blocks().subscribe_finalized().await?; + let first_measurement = finalized_blocks + .next() + .await + .ok_or(anyhow::anyhow!("Can't get finalized block from stream"))?? + .number(); + sleep(Duration::from_secs(12)).await; + let second_measurement = finalized_blocks + .next() + .await + .ok_or(anyhow::anyhow!("Can't get finalized block from stream"))?? + .number(); + + assert!(second_measurement > first_measurement); + + Ok(()) +} diff --git a/polkadot/zombienet-sdk-tests/tests/lib.rs b/polkadot/zombienet-sdk-tests/tests/lib.rs index 9feb9775e450e..d8548b9a23ea8 100644 --- a/polkadot/zombienet-sdk-tests/tests/lib.rs +++ b/polkadot/zombienet-sdk-tests/tests/lib.rs @@ -4,6 +4,8 @@ #[cfg(feature = "zombie-metadata")] mod helpers; +#[cfg(feature = "zombie-metadata")] +mod disabling; #[cfg(feature = "zombie-metadata")] mod elastic_scaling; #[cfg(feature = "zombie-metadata")] diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs index 59a71a83e01ec..daa65c81d8003 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_revenue.rs @@ -14,6 +14,9 @@ use anyhow::anyhow; #[subxt::subxt(runtime_metadata_path = "metadata-files/coretime-rococo-local.scale")] mod coretime_rococo {} +#[subxt::subxt(runtime_metadata_path = "metadata-files/rococo-local.scale")] +mod rococo {} + use crate::helpers::rococo::{ self as rococo_api, runtime_types::{ @@ -43,6 +46,7 @@ use coretime_rococo::{ sp_arithmetic::per_things::Perbill, }, }; +use rococo::on_demand_assignment_provider::events as on_demand_events; type CoretimeRuntimeCall = coretime_api::runtime_types::coretime_rococo_runtime::RuntimeCall; type CoretimeUtilityCall = coretime_api::runtime_types::pallet_utility::pallet::Call; @@ -87,7 +91,7 @@ async fn assert_total_issuance( assert_eq!(ti, actual_ti); } -type ParaEvents = Arc)>>>; +type EventOf = Arc)>>>; macro_rules! trace_event { ($event:ident : $mod:ident => $($ev:ident),*) => { @@ -101,7 +105,7 @@ macro_rules! trace_event { }; } -async fn para_watcher(api: OnlineClient, events: ParaEvents) +async fn para_watcher(api: OnlineClient, events: EventOf) where ::Number: Display, { @@ -129,8 +133,35 @@ where } } -async fn wait_for_para_event bool + Copy>( - events: ParaEvents, +async fn relay_watcher(api: OnlineClient, events: EventOf) +where + ::Number: Display, +{ + let mut blocks_sub = api.blocks().subscribe_finalized().await.unwrap(); + + log::debug!("Starting parachain watcher"); + while let Some(block) = blocks_sub.next().await { + let block = block.unwrap(); + log::debug!("Finalized parachain block {}", block.number()); + + for event in block.events().await.unwrap().iter() { + let event = event.unwrap(); + log::debug!("Got event: {} :: {}", event.pallet_name(), event.variant_name()); + { + events.write().await.push((block.number().into(), event.clone())); + } + + if event.pallet_name() == "OnDemandAssignmentProvider" { + trace_event!(event: on_demand_events => + AccountCredited, SpotPriceSet, OnDemandOrderPlaced + ); + } + } + } +} + +async fn wait_for_event bool + Copy>( + events: EventOf, pallet: &'static str, variant: &'static str, predicate: P, @@ -230,14 +261,22 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { let bob = dev::bob(); - let para_events: ParaEvents = Arc::new(RwLock::new(Vec::new())); + let para_events: EventOf = Arc::new(RwLock::new(Vec::new())); let p_api = para_node.wait_client().await?; let p_events = para_events.clone(); - let _subscriber = tokio::spawn(async move { + let _subscriber1 = tokio::spawn(async move { para_watcher(p_api, p_events).await; }); + let relay_events: EventOf = Arc::new(RwLock::new(Vec::new())); + let r_api = relay_node.wait_client().await?; + let r_events = relay_events.clone(); + + let _subscriber2 = tokio::spawn(async move { + relay_watcher(r_api, r_events).await; + }); + let api: OnlineClient = para_node.wait_client().await?; let _s1 = tokio::spawn(async move { ti_watcher(api, "PARA").await; @@ -276,7 +315,7 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { ) .await?; - wait_for_para_event( + wait_for_event( para_events.clone(), "Balances", "Minted", @@ -328,16 +367,16 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { log::info!("Waiting for a full-length sale to begin"); - // Skip the first sale completeley as it may be a short one. Also, `request_code_count` requires + // Skip the first sale completeley as it may be a short one. Also, `request_core_count` requires // two session boundaries to propagate. Given that the `fast-runtime` session is 10 blocks and // the timeslice is 20 blocks, we should be just in time. let _: coretime_api::broker::events::SaleInitialized = - wait_for_para_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; + wait_for_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; log::info!("Skipped short sale"); let sale: coretime_api::broker::events::SaleInitialized = - wait_for_para_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; + wait_for_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; log::info!("{:?}", sale); // Alice buys a region @@ -349,7 +388,7 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { .sign_and_submit_default(&coretime_api::tx().broker().purchase(1_000_000_000), &alice) .await?; - let purchase = wait_for_para_event( + let purchase = wait_for_event( para_events.clone(), "Broker", "Purchased", @@ -381,19 +420,17 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { ) .await?; - let pooled = wait_for_para_event( - para_events.clone(), - "Broker", - "Pooled", - |e: &broker_events::Pooled| e.region_id.begin == region_begin, - ) - .await; + let pooled = + wait_for_event(para_events.clone(), "Broker", "Pooled", |e: &broker_events::Pooled| { + e.region_id.begin == region_begin + }) + .await; // Wait until the beginning of the timeslice where the region belongs to log::info!("Waiting for the region to begin"); - let hist = wait_for_para_event( + let hist = wait_for_event( para_events.clone(), "Broker", "HistoryInitialized", @@ -443,7 +480,7 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { log::info!("Waiting for Alice's revenue to be ready to claim"); - let claims_ready = wait_for_para_event( + let claims_ready = wait_for_event( para_events.clone(), "Broker", "ClaimsReady", @@ -460,6 +497,54 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { assert_total_issuance(relay_client.clone(), para_client.clone(), total_issuance).await; + // Try purchasing on-demand with credits: + + log::info!("Bob is going to buy on-demand credits for alice"); + + let r = para_client + .tx() + .sign_and_submit_then_watch_default( + &coretime_api::tx().broker().purchase_credit(100_000_000, alice_acc.clone()), + &bob, + ) + .await? + .wait_for_finalized_success() + .await?; + + assert!(r.find_first::()?.is_some()); + + let _account_credited = wait_for_event( + relay_events.clone(), + "OnDemandAssignmentProvider", + "AccountCredited", + |e: &on_demand_events::AccountCredited| e.who == alice_acc && e.amount == 100_000_000, + ) + .await; + + // Once the account is credit we can place an on-demand order using credits + log::info!("Alice is going to place an on-demand order using credits"); + + let r = relay_client + .tx() + .sign_and_submit_then_watch_default( + &rococo_api::tx() + .on_demand_assignment_provider() + .place_order_with_credits(100_000_000, primitives::Id(100)), + &alice, + ) + .await? + .wait_for_finalized_success() + .await?; + + let order = r + .find_first::()? + .unwrap(); + + assert_eq!(order.spot_price, ON_DEMAND_BASE_FEE); + + // NOTE: Purchasing on-demand with credits doesn't affect the total issuance, as the credits are + // purchased on the PC. Therefore we don't check for total issuance changes. + // Alice claims her revenue log::info!("Alice is going to claim her revenue"); @@ -472,7 +557,7 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { ) .await?; - let claim_paid = wait_for_para_event( + let claim_paid = wait_for_event( para_events.clone(), "Broker", "RevenueClaimPaid", @@ -490,16 +575,18 @@ async fn coretime_revenue_test() -> Result<(), anyhow::Error> { // between. let _: coretime_api::broker::events::SaleInitialized = - wait_for_para_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; + wait_for_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; total_issuance.0 -= ON_DEMAND_BASE_FEE / 2; total_issuance.1 -= ON_DEMAND_BASE_FEE / 2; let _: coretime_api::broker::events::SaleInitialized = - wait_for_para_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; + wait_for_event(para_events.clone(), "Broker", "SaleInitialized", |_| true).await; assert_total_issuance(relay_client.clone(), para_client.clone(), total_issuance).await; + assert_eq!(order.spot_price, ON_DEMAND_BASE_FEE); + log::info!("Test finished successfully"); Ok(()) diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml deleted file mode 100644 index a3bbc82e74ba6..0000000000000 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.toml +++ /dev/null @@ -1,47 +0,0 @@ -[settings] -timeout = 1000 -bootnode = true - -[relaychain.genesis.runtimeGenesis.patch.configuration.config] - needed_approvals = 2 - -[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] - max_validators_per_core = 1 - group_rotation_frequency = 2 - -[relaychain] -default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" -chain = "westend-local" # using westend-local to enable slashing -default_command = "polkadot" - -[relaychain.default_resources] -limits = { memory = "4G", cpu = "2" } -requests = { memory = "2G", cpu = "1" } - - [[relaychain.node_groups]] - name = "honest-flaky-validator" - invulnerable = true # it will go offline, we don't want to disable it - count = 2 - args = ["-lparachain=debug"] - - [[relaychain.node_groups]] - name = "honest-validator" - count = 1 - args = ["-lparachain=debug"] - - [[relaychain.node_groups]] - image = "{{MALUS_IMAGE}}" - name = "malus-validator" - command = "malus suggest-garbage-candidate" - args = ["-lMALUS=trace"] - count = 1 - -[[parachains]] -id = 1000 -cumulus_based = true - - [parachains.collator] - name = "alice" - command = "polkadot-parachain" - image = "{{CUMULUS_IMAGE}}" - args = ["-lparachain=debug"] diff --git a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl b/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl deleted file mode 100644 index d92820391d53d..0000000000000 --- a/polkadot/zombienet_tests/functional/0005-parachains-disputes-past-session.zndsl +++ /dev/null @@ -1,57 +0,0 @@ -Description: Past-session dispute slashing -Network: ./0005-parachains-disputes-past-session.toml -Creds: config - -# Ensure nodes are up and running -honest-flaky-validator: reports node_roles is 4 - -# Stop issuing disputes for now -malus-validator: pause - -# Ensure parachain is registered -honest-validator: parachain 1000 is registered within 100 seconds - -# Ensure parachain made progress -honest-validator: parachain 1000 block height is at least 1 within 300 seconds - -# Start issuing disputes -malus-validator: resume - -# Wait for malus to back garbage candidate -malus-validator: log line matches "Suggesting malicious candidate" within 200 seconds - -# Pause first flaky node -# Availability will continue with 3/4 nodes online (incl. malus) -honest-flaky-validator-0: pause - -# Wait for the dispute -honest-flaky-validator-1: reports parachain_candidate_disputes_total is at least 1 within 60 seconds - -# Pause second flaky node so that we do not revert blocks due to f+1 invalid votes -# Availability and finality will stop -honest-flaky-validator-1: pause - -# Wait for 1 full session to pass after the last unconcluded dispute. -# -# TODO: replace with assertion for "New session detected" in logs. I think that -# would match on previous log lines, so we may need to programmatically wait for -# a specific session, requiring zombienet v2. -sleep 110 seconds - -# Now resume flaky validators -honest-flaky-validator: resume - -# Stop issuing disputes -malus-validator: pause - -# Disputes should start concluding now -honest-validator: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is at least 1 within 200 seconds - -# Disputes should always end as "invalid" -honest-validator: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is 0 - -# Check an unsigned extrinsic is submitted -honest-validator: log line contains "Successfully reported pending slash" within 180 seconds - -# Finality should not lag after dispute resolution -honest-validator: reports block height minus finalised block is lower than 8 within 100 seconds diff --git a/prdoc/pr_5990.prdoc b/prdoc/pr_5990.prdoc new file mode 100644 index 0000000000000..ee13ad634dcf5 --- /dev/null +++ b/prdoc/pr_5990.prdoc @@ -0,0 +1,30 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: On-demand credits + +doc: + - audience: [ Runtime User, Runtime Dev ] + description: | + The PR implements functionality on the relay chain for purchasing on-demand + Coretime using credits. This means on-demand Coretime should no longer be + purchased with the relay chain balance but rather with credits acquired + on the Coretime chain. The extrinsic to use for purchasing Coretime is + `place_order_with_credits`. It is worth noting that the PR also introduces + a minimum credit purchase requirement to prevent potential attacks. + +crates: + - name: pallet-broker + bump: major + - name: polkadot-runtime-parachains + bump: major + - name: rococo-runtime + bump: patch + - name: westend-runtime + bump: patch + - name: polkadot-test-runtime + bump: patch + - name: coretime-rococo-runtime + bump: major + - name: coretime-westend-runtime + bump: major diff --git a/prdoc/pr_6059.prdoc b/prdoc/pr_6059.prdoc new file mode 100644 index 0000000000000..9717654fefbb4 --- /dev/null +++ b/prdoc/pr_6059.prdoc @@ -0,0 +1,56 @@ +title: '[mq pallet] Custom next queue selectors' +doc: +- audience: Runtime Dev + description: |- + Changes: + - Expose a `force_set_head` function from the `MessageQueue` pallet via a new trait: `ForceSetHead`. This can be used to force the MQ pallet to process this queue next. + - The change only exposes an internal function through a trait, no audit is required. + + ## Context + + For the Asset Hub Migration (AHM) we need a mechanism to prioritize the inbound upward messages and the inbound downward messages on the AH. To achieve this, a minimal (and no breaking) change is done to the MQ pallet in the form of adding the `force_set_head` function. + + An example use of how to achieve prioritization is then demonstrated in `integration_test.rs::AhmPrioritizer`. Normally, all queues are scheduled round-robin like this: + + `| Relay | Para(1) | Para(2) | ... | Relay | ... ` + + The prioritizer listens to changes to its queue and triggers if either: + - The queue processed in the last block (to keep the general round-robin scheduling) + - The queue did not process since `n` blocks (to prevent starvation if there are too many other queues) + + In either situation, it schedules the queue for a streak of three consecutive blocks, such that it would become: + + `| Relay | Relay | Relay | Para(1) | Para(2) | ... | Relay | Relay | Relay | ... ` + + It basically transforms the round-robin into an elongated round robin. Although different strategies can be injected into the pallet at runtime, this one seems to strike a good balance between general service level and prioritization. +crates: +- name: pallet-message-queue + bump: major +- name: rococo-runtime + bump: minor +- name: westend-runtime + bump: minor +- name: contracts-rococo-runtime + bump: minor +- name: coretime-rococo-runtime + bump: minor +- name: polkadot-runtime-parachains + bump: minor +- name: asset-hub-rococo-runtime + bump: minor +- name: asset-hub-westend-runtime + bump: minor +- name: bridge-hub-rococo-runtime + bump: minor +- name: bridge-hub-westend-runtime + bump: minor +- name: collectives-westend-runtime + bump: minor +- name: coretime-westend-runtime + bump: minor +- name: glutton-westend-runtime + bump: minor +- name: people-rococo-runtime + bump: minor +- name: people-westend-runtime + bump: minor diff --git a/prdoc/pr_7167.prdoc b/prdoc/pr_7167.prdoc new file mode 100644 index 0000000000000..8faae9f8af345 --- /dev/null +++ b/prdoc/pr_7167.prdoc @@ -0,0 +1,16 @@ +title: '[pallet-revive] Add tracing support (2/2)' +doc: +- audience: Runtime Dev + description: |- + - Add debug endpoint to eth-rpc for capturing a block or a single transaction traces + - Use in-memory DB for non-archive node + + See: + - PR #7166 +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor +- name: asset-hub-westend-runtime + bump: minor diff --git a/prdoc/pr_7243.prdoc b/prdoc/pr_7243.prdoc new file mode 100644 index 0000000000000..c04017c29ce43 --- /dev/null +++ b/prdoc/pr_7243.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: 'transfer function Preservation is changed to Expendable in fungible and fungibles adapter' + +doc: + - audience: Runtime Dev + description: | + The Preservation of transfer method of fungible and fungibles adapters is changed from Preserve to Expendable. So the behavior of the TransferAsset will be consistent with the WithdrawAsset function, as in fungible and fungibles adapter. + +crates: +- name: staging-xcm-builder + bump: patch \ No newline at end of file diff --git a/prdoc/pr_7353.prdoc b/prdoc/pr_7353.prdoc new file mode 100644 index 0000000000000..8b1f272525d66 --- /dev/null +++ b/prdoc/pr_7353.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Shorter availability data retention period for testnets + +doc: + - audience: Node Operator + description: | + Allows specifying a shorter availability data retention period for testnets. + +crates: +- name: polkadot-service + bump: patch diff --git a/prdoc/pr_7418.prdoc b/prdoc/pr_7418.prdoc new file mode 100644 index 0000000000000..15e47e2525c1a --- /dev/null +++ b/prdoc/pr_7418.prdoc @@ -0,0 +1,7 @@ +title: Refactor #[benchmarks] macro to don't define trait bounds twice +doc: +- audience: Runtime Dev + description: 'This PR contains a small refactor in the logic of #[benchmarks] so if a where clause is included the expanded code set the bound T:Config inside the where clause' +crates: +- name: frame-support-procedural + bump: patch diff --git a/prdoc/pr_7430.prdoc b/prdoc/pr_7430.prdoc new file mode 100644 index 0000000000000..a3258a72c9396 --- /dev/null +++ b/prdoc/pr_7430.prdoc @@ -0,0 +1,28 @@ +title: '[pallet-revive] fix tracing gas used' +doc: +- audience: Runtime Dev + description: |- + - Charge the nested gas meter for loading the code of the child contract, so that we can properly associate the gas cost to the child call frame. + - Move the enter_child_span and exit_child_span around the do_transaction closure to properly capture all failures + - Add missing trace capture for call transfer +crates: +- name: pallet-revive-fixtures + bump: minor +- name: pallet-revive + bump: minor +- name: pallet-revive-uapi + bump: minor +- name: asset-hub-westend-runtime + bump: minor +- name: pallet-migrations + bump: minor +- name: frame-support + bump: minor +- name: people-rococo-runtime + bump: minor +- name: people-westend-runtime + bump: minor +- name: rococo-runtime + bump: minor +- name: westend-runtime + bump: minor diff --git a/prdoc/pr_7441.prdoc b/prdoc/pr_7441.prdoc new file mode 100644 index 0000000000000..ef956ff4bebfe --- /dev/null +++ b/prdoc/pr_7441.prdoc @@ -0,0 +1,25 @@ +title: 'Update Scheduler to have a configurable block number provider' +doc: +- audience: Runtime Dev + description: |- + This PR makes `pallet_scheduler` configurable by introducing `BlockNumberProvider` in + `pallet_scheduler::Config`. Instead of relying solely on + `frame_system::Pallet::::block_number()`, the scheduler can now use any block number source, + including external providers like the relay chain. + + Parachains can continue using `frame_system::Pallet::` without issue. To retain the + previous behavior, set `BlockNumberProvider` to `frame_system::Pallet::`. + +crates: +- name: collectives-westend-runtime + bump: patch +- name: rococo-runtime + bump: patch +- name: westend-runtime + bump: patch +- name: pallet-democracy + bump: patch +- name: pallet-referenda + bump: patch +- name: pallet-scheduler + bump: major diff --git a/prdoc/pr_7505.prdoc b/prdoc/pr_7505.prdoc new file mode 100644 index 0000000000000..da9f44dcf4060 --- /dev/null +++ b/prdoc/pr_7505.prdoc @@ -0,0 +1,14 @@ +title: '`fatxpool`: transaction statuses metrics added' +doc: +- audience: Node Dev + description: |- + This PR introduces a new mechanism to capture and report Prometheus metrics related to timings of transaction + lifecycle events, which are currently not available. By exposing these timings, we aim to augment transaction-pool + reliability dashboards and extend existing Grafana boards. + + A new `unknown_from_block_import_txs` metric is also introduced. It provides the number of transactions in imported + block which are not known to the node's transaction pool. It allows to monitor alignment of transaction pools + across the nodes in the network. +crates: +- name: sc-transaction-pool + bump: minor diff --git a/prdoc/pr_7506.prdoc b/prdoc/pr_7506.prdoc new file mode 100644 index 0000000000000..7e41afbb3a5e8 --- /dev/null +++ b/prdoc/pr_7506.prdoc @@ -0,0 +1,9 @@ +title: '[pallet-revive] Add eth_get_logs' +doc: +- audience: Runtime Dev + description: "Add support for eth_get_logs rpc method" +crates: +- name: pallet-revive-eth-rpc + bump: minor +- name: pallet-revive + bump: minor diff --git a/prdoc/pr_7554.prdoc b/prdoc/pr_7554.prdoc new file mode 100644 index 0000000000000..4978d01d4060f --- /dev/null +++ b/prdoc/pr_7554.prdoc @@ -0,0 +1,8 @@ +title: 'sc-informant: Print full hash when debug logging is enabled' +doc: +- audience: Node Dev + description: |- + When debugging stuff, it is useful to see the full hashes and not only the "short form". This makes it easier to read logs and follow blocks. +crates: +- name: sc-informant + bump: patch diff --git a/prdoc/pr_7562.prdoc b/prdoc/pr_7562.prdoc new file mode 100644 index 0000000000000..fa0ed68c1e9bf --- /dev/null +++ b/prdoc/pr_7562.prdoc @@ -0,0 +1,10 @@ +title: 'pallet-revive: Add env var to allow skipping of validation for testing' +doc: +- audience: Runtime Dev + description: |- + When trying to reproduce bugs we sometimes need to deploy code that wouldn't pass validation. This PR adds a new environment variable `REVIVE_SKIP_VALIDATION` that when set will skip all validation except the contract blob size limit. + + Please note that this only applies to when the pallet is compiled for `std` and hence will never be part of on-chain. +crates: +- name: pallet-revive + bump: patch diff --git a/prdoc/pr_7570.prdoc b/prdoc/pr_7570.prdoc new file mode 100644 index 0000000000000..c38107f2de326 --- /dev/null +++ b/prdoc/pr_7570.prdoc @@ -0,0 +1,7 @@ +title: '[pallet-revive] fix subxt version' +doc: +- audience: Runtime Dev + description: |- + Cargo.lock change to subxt were rollback + Fixing it and updating it in Cargo.toml so it does not happen again +crates: [] diff --git a/prdoc/pr_7571.prdoc b/prdoc/pr_7571.prdoc new file mode 100644 index 0000000000000..48434a612adf1 --- /dev/null +++ b/prdoc/pr_7571.prdoc @@ -0,0 +1,7 @@ +title: 'frame-benchmarking: Improve macro hygiene' +doc: +- audience: Runtime Dev + description: Improve macro hygiene of benchmarking macros. +crates: +- name: frame-benchmarking + bump: patch diff --git a/substrate/.maintain/frame-umbrella-weight-template.hbs b/substrate/.maintain/frame-umbrella-weight-template.hbs index 050e74a16d7e7..c99758c41d9d7 100644 --- a/substrate/.maintain/frame-umbrella-weight-template.hbs +++ b/substrate/.maintain/frame-umbrella-weight-template.hbs @@ -16,7 +16,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] #![allow(missing_docs)] -#[allow(dead_code)] +#![allow(dead_code)] use frame::weights_prelude::*; diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index 541f064850a71..624fc57aa3295 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -16,7 +16,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] #![allow(missing_docs)] -#[allow(dead_code)] +#![allow(dead_code)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 8986d61da6bed..3cbc8aa7115d6 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -504,6 +504,7 @@ impl pallet_scheduler::Config for Runtime { type WeightInfo = pallet_scheduler::weights::SubstrateWeight; type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = Preimage; + type BlockNumberProvider = frame_system::Pallet; } impl pallet_glutton::Config for Runtime { @@ -2316,6 +2317,7 @@ impl pallet_migrations::Config for Runtime { parameter_types! { pub const BrokerPalletId: PalletId = PalletId(*b"py/broke"); + pub const MinimumCreditPurchase: Balance = 100 * MILLICENTS; } pub struct IntoAuthor; @@ -2368,6 +2370,7 @@ impl pallet_broker::Config for Runtime { type SovereignAccountOf = SovereignAccountOf; type MaxAutoRenewals = ConstU32<10>; type PriceAdapter = pallet_broker::CenterTargetPrice; + type MinimumCreditPurchase = MinimumCreditPurchase; } parameter_types! { @@ -3406,6 +3409,67 @@ impl_runtime_apis! { key ) } + + fn trace_block( + block: Block, + config: pallet_revive::evm::TracerConfig + ) -> Vec<(u32, pallet_revive::evm::CallTrace)> { + use pallet_revive::tracing::trace; + let mut tracer = config.build(Revive::evm_gas_from_weight); + let mut traces = vec![]; + let (header, extrinsics) = block.deconstruct(); + + Executive::initialize_block(&header); + for (index, ext) in extrinsics.into_iter().enumerate() { + trace(&mut tracer, || { + let _ = Executive::apply_extrinsic(ext); + }); + + if let Some(tx_trace) = tracer.collect_traces().pop() { + traces.push((index as u32, tx_trace)); + } + } + + traces + } + + fn trace_tx( + block: Block, + tx_index: u32, + config: pallet_revive::evm::TracerConfig + ) -> Option { + use pallet_revive::tracing::trace; + let mut tracer = config.build(Revive::evm_gas_from_weight); + let (header, extrinsics) = block.deconstruct(); + + Executive::initialize_block(&header); + for (index, ext) in extrinsics.into_iter().enumerate() { + if index as u32 == tx_index { + trace(&mut tracer, || { + let _ = Executive::apply_extrinsic(ext); + }); + break; + } else { + let _ = Executive::apply_extrinsic(ext); + } + } + + tracer.collect_traces().pop() + } + + fn trace_call( + tx: pallet_revive::evm::GenericTransaction, + config: pallet_revive::evm::TracerConfig) + -> Result + { + use pallet_revive::tracing::trace; + let mut tracer = config.build(Revive::evm_gas_from_weight); + trace(&mut tracer, || { + Self::eth_transact(tx) + })?; + + Ok(tracer.collect_traces().pop().expect("eth_transact succeeded, trace must exist, qed")) + } } impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< @@ -3692,7 +3756,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; + use frame_benchmarking::{baseline, BenchmarkList}; use frame_support::traits::StorageInfoTrait; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency @@ -3717,7 +3781,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; + use frame_benchmarking::{baseline, BenchmarkBatch}; use sp_storage::TrackedStorageKey; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency diff --git a/substrate/client/informant/src/display.rs b/substrate/client/informant/src/display.rs index 2decd76747827..8b3e2730584ff 100644 --- a/substrate/client/informant/src/display.rs +++ b/substrate/client/informant/src/display.rs @@ -24,6 +24,8 @@ use sc_network_sync::{SyncState, SyncStatus, WarpSyncPhase, WarpSyncProgress}; use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero}; use std::{fmt, time::Instant}; +use crate::PrintFullHashOnDebugLogging; + /// State of the informant display system. /// /// This is the system that handles the line that gets regularly printed and that looks something @@ -138,9 +140,9 @@ impl InformantDisplay { target, style(num_connected_peers).white().bold(), style(best_number).white().bold(), - best_hash, + PrintFullHashOnDebugLogging(&best_hash), style(finalized_number).white().bold(), - info.chain.finalized_hash, + PrintFullHashOnDebugLogging(&info.chain.finalized_hash), style(TransferRateFormat(avg_bytes_per_sec_inbound)).green(), style(TransferRateFormat(avg_bytes_per_sec_outbound)).red(), ) diff --git a/substrate/client/informant/src/lib.rs b/substrate/client/informant/src/lib.rs index 0b0e13dc08bbb..88da105edac72 100644 --- a/substrate/client/informant/src/lib.rs +++ b/substrate/client/informant/src/lib.rs @@ -21,13 +21,18 @@ use console::style; use futures::prelude::*; use futures_timer::Delay; -use log::{debug, info, trace}; +use log::{debug, info, log_enabled, trace}; use sc_client_api::{BlockchainEvents, UsageProvider}; use sc_network::NetworkStatusProvider; use sc_network_sync::{SyncStatusProvider, SyncingService}; use sp_blockchain::HeaderMetadata; use sp_runtime::traits::{Block as BlockT, Header}; -use std::{collections::VecDeque, fmt::Display, sync::Arc, time::Duration}; +use std::{ + collections::VecDeque, + fmt::{Debug, Display}, + sync::Arc, + time::Duration, +}; mod display; @@ -78,7 +83,20 @@ where }; } -fn display_block_import(client: Arc) -> impl Future +/// Print the full hash when debug logging is enabled. +struct PrintFullHashOnDebugLogging<'a, H>(&'a H); + +impl Display for PrintFullHashOnDebugLogging<'_, H> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if log_enabled!(log::Level::Debug) { + Debug::fmt(&self.0, f) + } else { + Display::fmt(&self.0, f) + } + } +} + +async fn display_block_import(client: Arc) where C: UsageProvider + HeaderMetadata + BlockchainEvents, >::Error: Display, @@ -91,8 +109,9 @@ where // Hashes of the last blocks we have seen at import. let mut last_blocks = VecDeque::new(); let max_blocks_to_track = 100; + let mut notifications = client.import_notification_stream(); - client.import_notification_stream().for_each(move |n| { + while let Some(n) = notifications.next().await { // detect and log reorganizations. if let Some((ref last_num, ref last_hash)) = last_best { if n.header.parent_hash() != last_hash && n.is_new_best { @@ -103,9 +122,9 @@ where Ok(ref ancestor) if ancestor.hash != *last_hash => info!( "♻️ Reorg on #{},{} to #{},{}, common ancestor #{},{}", style(last_num).red().bold(), - last_hash, + PrintFullHashOnDebugLogging(&last_hash), style(n.header.number()).green().bold(), - n.hash, + PrintFullHashOnDebugLogging(&n.hash), style(ancestor.number).white().bold(), ancestor.hash, ), @@ -133,11 +152,9 @@ where target: "substrate", "{best_indicator} Imported #{} ({} → {})", style(n.header.number()).white().bold(), - n.header.parent_hash(), - n.hash, + PrintFullHashOnDebugLogging(n.header.parent_hash()), + PrintFullHashOnDebugLogging(&n.hash), ); } - - future::ready(()) - }) + } } diff --git a/substrate/client/network/test/Cargo.toml b/substrate/client/network/test/Cargo.toml index 783d47f21fa76..c077fb78e24dd 100644 --- a/substrate/client/network/test/Cargo.toml +++ b/substrate/client/network/test/Cargo.toml @@ -16,6 +16,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +async-channel = { workspace = true } async-trait = { workspace = true } futures = { workspace = true } futures-timer = { workspace = true } diff --git a/substrate/client/network/test/src/conformance/high_level.rs b/substrate/client/network/test/src/conformance/high_level.rs new file mode 100644 index 0000000000000..90ab78e5c076e --- /dev/null +++ b/substrate/client/network/test/src/conformance/high_level.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::conformance::setup::{ + connect_backends, connect_notifications, create_network_backend, NetworkBackendClient, +}; + +use sc_network::{ + request_responses::OutgoingResponse, service::traits::NotificationEvent, IfDisconnected, + Litep2pNetworkBackend, NetworkWorker, +}; + +#[tokio::test] +async fn check_connectivity() { + // Libp2p dials litep2p. + connect_backends( + &create_network_backend::>(), + &create_network_backend::(), + ) + .await; + + // Litep2p dials libp2p. + connect_backends( + &create_network_backend::(), + &create_network_backend::>(), + ) + .await; +} + +#[tokio::test] +async fn check_request_response() { + async fn inner_check_request_response(left: NetworkBackendClient, right: NetworkBackendClient) { + connect_backends(&left, &right).await; + + let rx = right.receiver.clone(); + tokio::spawn(async move { + while let Ok(request) = rx.recv().await { + request + .pending_response + .send(OutgoingResponse { + result: Ok(request.payload), + reputation_changes: vec![], + sent_feedback: None, + }) + .expect("Valid response; qed"); + } + }); + + let channels = (0..32) + .map(|i| { + let (tx, rx) = futures::channel::oneshot::channel(); + left.network_service.start_request( + right.network_service.local_peer_id().into(), + "/request-response/1".into(), + vec![1, 2, 3, i], + None, + tx, + IfDisconnected::ImmediateError, + ); + + (i, rx) + }) + .collect::>(); + + for (id, channel) in channels { + let response = channel + .await + .expect("Channel should not be closed") + .expect(format!("Channel {} should have a response", id).as_str()); + assert_eq!(response.0, vec![1, 2, 3, id]); + } + } + + inner_check_request_response( + create_network_backend::>(), + create_network_backend::(), + ) + .await; + + inner_check_request_response( + create_network_backend::(), + create_network_backend::>(), + ) + .await; +} + +#[tokio::test] +async fn check_notifications() { + async fn inner_check_notifications(left: NetworkBackendClient, right: NetworkBackendClient) { + const MAX_NOTIFICATIONS: usize = 128; + connect_notifications(&left, &right).await; + + let right_peer = right.network_service.local_peer_id(); + let (tx, rx) = async_channel::bounded(1); + + tokio::spawn(async move { + let mut notifications_left = left.notification_service.lock().await; + for _ in 0..MAX_NOTIFICATIONS { + notifications_left + .send_async_notification(&right_peer, vec![1, 2, 3]) + .await + .expect("qed; cannot fail"); + } + let _ = rx.recv().await; + }); + + let mut notifications_right = right.notification_service.lock().await; + let mut notification_index = 0; + while let Some(event) = notifications_right.next_event().await { + match event { + NotificationEvent::NotificationReceived { notification, .. } => { + notification_index += 1; + + if notification_index >= MAX_NOTIFICATIONS { + let _ = tx.send(()).await; + break; + } + + assert_eq!(notification, vec![1, 2, 3]); + }, + _ => {}, + } + } + } + + // Check libp2p -> litep2p. + inner_check_notifications( + create_network_backend::>(), + create_network_backend::(), + ) + .await; + + // Check litep2p -> libp2p. + inner_check_notifications( + create_network_backend::(), + create_network_backend::>(), + ) + .await; +} + +#[tokio::test] +async fn check_notifications_ping_pong() { + async fn inner_check_notifications_ping_pong( + left: NetworkBackendClient, + right: NetworkBackendClient, + ) { + const MAX_NOTIFICATIONS: usize = 128; + connect_notifications(&left, &right).await; + + let left_peer = left.network_service.local_peer_id(); + let right_peer = right.network_service.local_peer_id(); + + let mut notification_index = 0; + tokio::spawn(async move { + let mut notifications_left = left.notification_service.lock().await; + + notifications_left + .send_async_notification(&right_peer, vec![1, 2, 3]) + .await + .expect("qed; cannot fail"); + + while let Some(event) = notifications_left.next_event().await { + match event { + NotificationEvent::NotificationReceived { notification, .. } => { + assert_eq!(notification, vec![1, 2, 3, 4, 5]); + + notification_index += 1; + + if notification_index >= MAX_NOTIFICATIONS { + break; + } + + notifications_left + .send_async_notification(&right_peer, vec![1, 2, 3]) + .await + .expect("qed; cannot fail"); + }, + _ => {}, + } + } + + for _ in 0..MAX_NOTIFICATIONS {} + }); + + let mut notifications_right = right.notification_service.lock().await; + let mut notification_index = 0; + while let Some(event) = notifications_right.next_event().await { + match event { + NotificationEvent::NotificationReceived { notification, .. } => { + assert_eq!(notification, vec![1, 2, 3]); + + notification_index += 1; + + if notification_index >= MAX_NOTIFICATIONS { + break; + } + + notifications_right + .send_async_notification(&left_peer, vec![1, 2, 3, 4, 5]) + .await + .expect("qed; cannot fail"); + }, + _ => {}, + } + } + } + + // Check libp2p -> litep2p. + inner_check_notifications_ping_pong( + create_network_backend::>(), + create_network_backend::(), + ) + .await; + + // Check litep2p -> libp2p. + inner_check_notifications_ping_pong( + create_network_backend::(), + create_network_backend::>(), + ) + .await; +} diff --git a/substrate/client/network/test/src/conformance/mod.rs b/substrate/client/network/test/src/conformance/mod.rs new file mode 100644 index 0000000000000..e24a7e1cc442c --- /dev/null +++ b/substrate/client/network/test/src/conformance/mod.rs @@ -0,0 +1,20 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +mod high_level; +mod setup; diff --git a/substrate/client/network/test/src/conformance/setup.rs b/substrate/client/network/test/src/conformance/setup.rs new file mode 100644 index 0000000000000..de7651a418a92 --- /dev/null +++ b/substrate/client/network/test/src/conformance/setup.rs @@ -0,0 +1,225 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_network::{ + config::{ + FullNetworkConfiguration, IncomingRequest, MultiaddrWithPeerId, NetworkConfiguration, + NonReservedPeerMode, NotificationHandshake, OutgoingResponse, Params, ProtocolId, Role, + SetConfig, + }, + service::traits::{NetworkService, NotificationEvent}, + IfDisconnected, NetworkBackend, NetworkRequest, NotificationMetrics, NotificationService, + Roles, +}; + +use sc_network_common::sync::message::BlockAnnouncesHandshake; +use sp_runtime::traits::Zero; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::runtime; +use tokio::sync::Mutex; + +/// High level network backend (litep2p or libp2p) test client. +pub struct NetworkBackendClient { + pub network_service: Arc, + pub notification_service: Arc>>, + pub receiver: async_channel::Receiver, +} + +/// Configure the network backend client for tests based on the given service. +/// +/// This will setup: +/// - `/request-response/1` request response protocol with bounded channel of 32 requests +/// - `/block-announces/1` notification protocol +pub fn create_network_backend() -> NetworkBackendClient +where + N: NetworkBackend, +{ + let (tx, rx) = async_channel::bounded(32); + let request_response_config = N::request_response_config( + "/request-response/1".into(), + vec![], + 1024, + 1024, + Duration::from_secs(2), + Some(tx), + ); + + let role = Role::Full; + let net_conf = NetworkConfiguration::new_local(); + let mut network_config = FullNetworkConfiguration::new(&net_conf, None); + network_config.add_request_response_protocol(request_response_config); + let genesis_hash = runtime::Hash::zero(); + let (block_announce_config, notification_service) = N::notification_config( + "/block-announces/1".into(), + vec![], + 1024, + Some(NotificationHandshake::new(BlockAnnouncesHandshake::::build( + Roles::from(&Role::Full), + Zero::zero(), + genesis_hash, + genesis_hash, + ))), + SetConfig { + in_peers: 1, + out_peers: 1, + reserved_nodes: vec![], + non_reserved_mode: NonReservedPeerMode::Accept, + }, + NotificationMetrics::new(None), + network_config.peer_store_handle(), + ); + let worker = N::new(Params:: { + block_announce_config, + role, + executor: Box::new(|f| { + tokio::spawn(f); + }), + genesis_hash: runtime::Hash::zero(), + network_config, + protocol_id: ProtocolId::from("test"), + fork_id: None, + metrics_registry: None, + bitswap_config: None, + notification_metrics: NotificationMetrics::new(None), + }) + .unwrap(); + let network_service = worker.network_service(); + + // Run the worker in the backend. + tokio::spawn(worker.run()); + + NetworkBackendClient { + network_service, + notification_service: Arc::new(Mutex::new(notification_service)), + receiver: rx, + } +} + +/// Connect two backends together and submit one request with `IfDisconnected::TryConnect` option +/// expecting the left backend to dial the right one. +pub async fn connect_backends(left: &NetworkBackendClient, right: &NetworkBackendClient) { + let right_peer_id = right.network_service.local_peer_id(); + + // Ensure the right backend responds to a first request + let rx = right.receiver.clone(); + tokio::spawn(async move { + let request = rx.recv().await.expect("Left backend should receive a request"); + assert_eq!(request.payload, vec![1, 2, 3]); + request + .pending_response + .send(OutgoingResponse { + result: Ok(vec![4, 5, 6]), + reputation_changes: vec![], + sent_feedback: None, + }) + .expect("Left backend should send a response"); + }); + + // Connect the two backends + while left.network_service.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + while right.network_service.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + let right_listen_address = right + .network_service + .listen_addresses() + .first() + .expect("qed; non empty") + .clone(); + + left.network_service + .add_known_address(right_peer_id, right_listen_address.clone().into()); + + let result = left + .network_service + .request( + right_peer_id, + "/request-response/1".into(), + vec![1, 2, 3], + None, + IfDisconnected::TryConnect, + ) + .await + .expect("Left backend should send a request"); + assert_eq!(result.0, vec![4, 5, 6]); + assert_eq!(result.1, "/request-response/1".into()); +} + +/// Ensure connectivity on the notification protocol level. +pub async fn connect_notifications(left: &NetworkBackendClient, right: &NetworkBackendClient) { + let right_peer_id = right.network_service.local_peer_id(); + + while left.network_service.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + while right.network_service.listen_addresses().is_empty() { + tokio::time::sleep(Duration::from_millis(10)).await; + } + + let right_listen_address = right + .network_service + .listen_addresses() + .first() + .expect("qed; non empty") + .clone(); + + left.network_service + .add_reserved_peer(MultiaddrWithPeerId { + multiaddr: right_listen_address.into(), + peer_id: right_peer_id, + }) + .unwrap(); + + let mut notifications_left = left.notification_service.lock().await; + let mut notifications_right = right.notification_service.lock().await; + let mut opened = 0; + loop { + tokio::select! { + Some(event) = notifications_left.next_event() => { + match event { + NotificationEvent::NotificationStreamOpened { .. } => { + opened += 1; + if opened >= 2 { + break; + } + }, + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + }, + _ => {}, + }; + }, + Some(event) = notifications_right.next_event() => { + match event { + NotificationEvent::ValidateInboundSubstream { result_tx, .. } => { + result_tx.send(sc_network::service::traits::ValidationResult::Accept).unwrap(); + }, + NotificationEvent::NotificationStreamOpened { .. } => { + opened += 1; + if opened >= 2 { + break; + } + }, + _ => {} + } + }, + } + } +} diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 3cdf211e07f68..1a810f66494d4 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -20,6 +20,8 @@ #[cfg(test)] mod block_import; #[cfg(test)] +mod conformance; +#[cfg(test)] mod fuzz; #[cfg(test)] mod service; diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs index ffe6c20d92b72..6195cf53b6072 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/fork_aware_txpool.rs @@ -21,7 +21,7 @@ use super::{ dropped_watcher::{MultiViewDroppedWatcherController, StreamOfDropped}, import_notification_sink::MultiViewImportNotificationSink, - metrics::MetricsLink as PrometheusMetrics, + metrics::{EventsMetricsCollector, MetricsLink as PrometheusMetrics}, multi_view_listener::MultiViewListener, tx_mem_pool::{InsertionInfo, TxMemPool, TXMEMPOOL_TRANSACTION_LIMIT_MULTIPLIER}, view::View, @@ -143,6 +143,9 @@ where /// Prometheus's metrics endpoint. metrics: PrometheusMetrics, + /// Collector of transaction statuses updates, reports transaction events metrics. + events_metrics_collector: EventsMetricsCollector, + /// Util tracking best and finalized block. enactment_state: Arc>>, @@ -193,7 +196,7 @@ where future_limits: crate::PoolLimit, mempool_max_transactions_count: usize, ) -> (Self, ForkAwareTxPoolTask) { - let (listener, listener_task) = MultiViewListener::new_with_worker(); + let (listener, listener_task) = MultiViewListener::new_with_worker(Default::default()); let listener = Arc::new(listener); let (import_notification_sink, import_notification_sink_task) = @@ -246,6 +249,7 @@ where options, is_validator: false.into(), metrics: Default::default(), + events_metrics_collector: EventsMetricsCollector::default(), }, combined_tasks, ) @@ -314,8 +318,11 @@ where finalized_hash: Block::Hash, ) -> Self { let metrics = PrometheusMetrics::new(prometheus); + let (events_metrics_collector, event_metrics_task) = + EventsMetricsCollector::::new_with_worker(metrics.clone()); - let (listener, listener_task) = MultiViewListener::new_with_worker(); + let (listener, listener_task) = + MultiViewListener::new_with_worker(events_metrics_collector.clone()); let listener = Arc::new(listener); let (revalidation_queue, revalidation_task) = @@ -337,6 +344,7 @@ where let view_store = Arc::new(ViewStore::new(pool_api.clone(), listener, dropped_stream_controller)); + let dropped_monitor_task = Self::dropped_monitor_task( dropped_stream, mempool.clone(), @@ -350,6 +358,7 @@ where _ = revalidation_task => {}, _ = import_notification_sink_task => {}, _ = dropped_monitor_task => {} + _ = event_metrics_task => {}, } } .boxed(); @@ -368,6 +377,7 @@ where import_notification_sink, options, metrics, + events_metrics_collector, is_validator, } } @@ -721,7 +731,10 @@ where .iter() .zip(xts) .filter_map(|(result, xt)| { - result.as_ref().ok().map(|insertion| (insertion.source.clone(), xt)) + result.as_ref().ok().map(|insertion| { + self.events_metrics_collector.report_submitted(&insertion); + (insertion.source.clone(), xt) + }) }) .collect::>(); @@ -812,21 +825,21 @@ where ); let xt = Arc::from(xt); - let InsertionInfo { hash: xt_hash, source: timed_source, .. } = - match self.mempool.push_watched(source, xt.clone()) { - Ok(result) => result, - Err(TxPoolApiError::ImmediatelyDropped) => - self.attempt_transaction_replacement(source, true, xt.clone()).await?, - Err(e) => return Err(e.into()), - }; + let insertion = match self.mempool.push_watched(source, xt.clone()) { + Ok(result) => result, + Err(TxPoolApiError::ImmediatelyDropped) => + self.attempt_transaction_replacement(source, true, xt.clone()).await?, + Err(e) => return Err(e.into()), + }; self.metrics.report(|metrics| metrics.submitted_transactions.inc()); + self.events_metrics_collector.report_submitted(&insertion); self.view_store - .submit_and_watch(at, timed_source, xt) + .submit_and_watch(at, insertion.source, xt) .await .inspect_err(|_| { - self.mempool.remove_transaction(&xt_hash); + self.mempool.remove_transaction(&insertion.hash); }) .map(|mut outcome| { self.mempool.update_transaction_priority(&outcome); @@ -1272,6 +1285,12 @@ where pruned_log.extend(enacted_log); }); + self.metrics.report(|metrics| { + metrics + .unknown_from_block_import_txs + .inc_by(self.mempool.count_unknown_transactions(pruned_log.iter()) as _) + }); + //resubmit { let mut resubmit_transactions = Vec::new(); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/metrics.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/metrics.rs index 73d45ac430519..c04741e1c1d9e 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/metrics.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/metrics.rs @@ -18,11 +18,26 @@ //! Prometheus's metrics for a fork-aware transaction pool. -use crate::common::metrics::{GenericMetricsLink, MetricsRegistrant}; +use super::tx_mem_pool::InsertionInfo; +use crate::{ + common::metrics::{GenericMetricsLink, MetricsRegistrant}, + graph::{self, BlockHash, ExtrinsicHash}, + LOG_TARGET, +}; +use futures::{FutureExt, StreamExt}; use prometheus_endpoint::{ - histogram_opts, linear_buckets, register, Counter, Gauge, Histogram, PrometheusError, Registry, - U64, + exponential_buckets, histogram_opts, linear_buckets, register, Counter, Gauge, Histogram, + PrometheusError, Registry, U64, +}; +use sc_transaction_pool_api::TransactionStatus; +use sc_utils::mpsc; +use std::{ + collections::{hash_map::Entry, HashMap}, + future::Future, + pin::Pin, + time::{Duration, Instant}, }; +use tracing::trace; /// A helper alias for the Prometheus's metrics endpoint. pub type MetricsLink = GenericMetricsLink; @@ -41,6 +56,8 @@ pub struct Metrics { pub unwatched_txs: Gauge, /// Total number of transactions reported as invalid. pub removed_invalid_txs: Counter, + /// Total number of transactions from imported blocks that are unknown to the pool. + pub unknown_from_block_import_txs: Counter, /// Total number of finalized transactions. pub finalized_txs: Counter, /// Histogram of maintain durations. @@ -59,6 +76,145 @@ pub struct Metrics { pub view_revalidation_duration: Histogram, /// Total number of the views created w/o cloning existing view. pub non_cloned_views: Counter, + /// Histograms to track the timing distribution of individual transaction pool events. + pub events_histograms: EventsHistograms, +} + +/// Represents a collection of histogram timings for different transaction statuses. +pub struct EventsHistograms { + /// Histogram of timings for reporting `TransactionStatus::Future` event + pub future: Histogram, + /// Histogram of timings for reporting `TransactionStatus::Ready` event + pub ready: Histogram, + /// Histogram of timings for reporting `TransactionStatus::Broadcast` event + pub broadcast: Histogram, + /// Histogram of timings for reporting `TransactionStatus::InBlock` event + pub in_block: Histogram, + /// Histogram of timings for reporting `TransactionStatus::Retracted` event + pub retracted: Histogram, + /// Histogram of timings for reporting `TransactionStatus::FinalityTimeout` event + pub finality_timeout: Histogram, + /// Histogram of timings for reporting `TransactionStatus::Finalized` event + pub finalized: Histogram, + /// Histogram of timings for reporting `TransactionStatus::Usurped(Hash)` event + pub usurped: Histogram, + /// Histogram of timings for reporting `TransactionStatus::Dropped` event + pub dropped: Histogram, + /// Histogram of timings for reporting `TransactionStatus::Invalid` event + pub invalid: Histogram, +} + +impl EventsHistograms { + fn register(registry: &Registry) -> Result { + Ok(Self { + future: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_future", + "Histogram of timings for reporting Future event", + exponential_buckets(0.01, 2.0, 16).unwrap() + ))?, + registry, + )?, + ready: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_ready", + "Histogram of timings for reporting Ready event", + exponential_buckets(0.01, 2.0, 16).unwrap() + ))?, + registry, + )?, + broadcast: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_broadcast", + "Histogram of timings for reporting Broadcast event", + linear_buckets(0.01, 0.25, 16).unwrap() + ))?, + registry, + )?, + in_block: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_in_block", + "Histogram of timings for reporting InBlock event", + linear_buckets(0.0, 3.0, 20).unwrap() + ))?, + registry, + )?, + retracted: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_retracted", + "Histogram of timings for reporting Retracted event", + linear_buckets(0.0, 3.0, 20).unwrap() + ))?, + registry, + )?, + finality_timeout: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_finality_timeout", + "Histogram of timings for reporting FinalityTimeout event", + linear_buckets(0.0, 40.0, 20).unwrap() + ))?, + registry, + )?, + finalized: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_finalized", + "Histogram of timings for reporting Finalized event", + linear_buckets(0.0, 40.0, 20).unwrap() + ))?, + registry, + )?, + usurped: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_usurped", + "Histogram of timings for reporting Usurped event", + linear_buckets(0.0, 3.0, 20).unwrap() + ))?, + registry, + )?, + dropped: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_dropped", + "Histogram of timings for reporting Dropped event", + linear_buckets(0.0, 3.0, 20).unwrap() + ))?, + registry, + )?, + invalid: register( + Histogram::with_opts(histogram_opts!( + "substrate_sub_txpool_timing_event_invalid", + "Histogram of timings for reporting Invalid event", + linear_buckets(0.0, 3.0, 20).unwrap() + ))?, + registry, + )?, + }) + } + + /// Records the timing for a given transaction status. + /// + /// This method records the duration, representing the time elapsed since the + /// transaction was submitted until the event was reported. Based on the + /// transaction status, it utilizes the appropriate histogram to log this duration. + pub fn observe( + &self, + status: TransactionStatus, + duration: Duration, + ) { + let duration = duration.as_secs_f64(); + let histogram = match status { + TransactionStatus::Future => &self.future, + TransactionStatus::Ready => &self.ready, + TransactionStatus::Broadcast(..) => &self.broadcast, + TransactionStatus::InBlock(..) => &self.in_block, + TransactionStatus::Retracted(..) => &self.retracted, + TransactionStatus::FinalityTimeout(..) => &self.finality_timeout, + TransactionStatus::Finalized(..) => &self.finalized, + TransactionStatus::Usurped(..) => &self.usurped, + TransactionStatus::Dropped => &self.dropped, + TransactionStatus::Invalid => &self.invalid, + }; + histogram.observe(duration); + } } impl MetricsRegistrant for Metrics { @@ -106,6 +262,13 @@ impl MetricsRegistrant for Metrics { )?, registry, )?, + unknown_from_block_import_txs: register( + Counter::new( + "substrate_sub_txpool_unknown_from_block_import_txs_total", + "Total number of transactions from imported blocks that are unknown to the pool.", + )?, + registry, + )?, finalized_txs: register( Counter::new( "substrate_sub_txpool_finalized_txs_total", @@ -171,6 +334,222 @@ impl MetricsRegistrant for Metrics { )?, registry, )?, + events_histograms: EventsHistograms::register(registry)?, })) } } + +/// Messages used to report and compute event metrics. +enum EventMetricsMessage { + /// Message indicating a transaction has been submitted, including the timestamp + /// and its hash. + Submitted(Instant, Hash), + /// Message indicating the new status of a transaction, including the timestamp and transaction + /// hash. + Status(Instant, Hash, TransactionStatus), +} + +/// Collects metrics related to transaction events. +pub struct EventsMetricsCollector { + /// Optional channel for sending event metrics messages. + /// + /// If `None` no event metrics are collected (e.g. in tests). + metrics_message_sink: Option, BlockHash>>, +} + +impl Default for EventsMetricsCollector { + fn default() -> Self { + Self { metrics_message_sink: None } + } +} + +impl Clone for EventsMetricsCollector { + fn clone(&self) -> Self { + Self { metrics_message_sink: self.metrics_message_sink.clone() } + } +} + +impl EventsMetricsCollector { + /// Reports the status of a transaction. + /// + /// Takes a transaction hash and status, and attempts to send a status + /// message to the metrics messages processing task. + pub fn report_status( + &self, + tx_hash: ExtrinsicHash, + status: TransactionStatus, ExtrinsicHash>, + ) { + self.metrics_message_sink.as_ref().map(|sink| { + if let Err(error) = + sink.unbounded_send(EventMetricsMessage::Status(Instant::now(), tx_hash, status)) + { + trace!(target: LOG_TARGET, %error, "tx status metrics message send failed") + } + }); + } + + /// Reports that a transaction has been submitted. + /// + /// Takes a transaction hash and its submission timestamp, and attempts to + /// send a submission message to the metrics messages processing task. + pub fn report_submitted(&self, insertion_info: &InsertionInfo>) { + self.metrics_message_sink.as_ref().map(|sink| { + if let Err(error) = sink.unbounded_send(EventMetricsMessage::Submitted( + insertion_info + .source + .timestamp + .expect("timestamp is set in fork-aware pool. qed"), + insertion_info.hash, + )) { + trace!(target: LOG_TARGET, %error, "tx status metrics message send failed") + } + }); + } +} + +/// A type alias for a asynchronous task that collects metrics related to events. +pub type EventsMetricsCollectorTask = Pin + Send>>; + +/// Sink type for sending event metrics messages. +type MessageSink = + mpsc::TracingUnboundedSender>; + +/// Receiver type for receiving event metrics messages. +type MessageReceiver = + mpsc::TracingUnboundedReceiver>; + +/// Holds data relevant to transaction event metrics, allowing de-duplication +/// of certain transaction statuses, and compute the timings of events. +struct TransactionEventMetricsData { + /// Flag indicating if the transaction was seen as `Ready`. + ready_seen: bool, + /// Flag indicating if the transaction was seen as `Broadcast`. + broadcast_seen: bool, + /// Flag indicating if the transaction was seen as `Future`. + future_seen: bool, + /// Flag indicating if the transaction was seen as `InBlock`. + in_block_seen: bool, + /// Flag indicating if the transaction was seen as `Retracted`. + retracted_seen: bool, + /// Timestamp when the transaction was submitted. + /// + /// Used to compute a time elapsed until events are reported. + submit_timestamp: Instant, +} + +impl TransactionEventMetricsData { + /// Creates a new `TransactionEventMetricsData` with the given timestamp. + fn new(submit_timestamp: Instant) -> Self { + Self { + submit_timestamp, + future_seen: false, + ready_seen: false, + broadcast_seen: false, + in_block_seen: false, + retracted_seen: false, + } + } + + /// Sets flag to true once. + /// + /// Return true if flag was toggled. + fn set_true_once(flag: &mut bool) -> bool { + if *flag { + false + } else { + *flag = true; + true + } + } + + /// Updates the status flags based on the given transaction status. + /// + /// Returns the submit timestamp if given status was not seen yet, `None` otherwise. + fn update( + &mut self, + status: &TransactionStatus, + ) -> Option { + let flag = match *status { + TransactionStatus::Ready => &mut self.ready_seen, + TransactionStatus::Future => &mut self.future_seen, + TransactionStatus::Broadcast(..) => &mut self.broadcast_seen, + TransactionStatus::InBlock(..) => &mut self.in_block_seen, + TransactionStatus::Retracted(..) => &mut self.retracted_seen, + _ => return Some(self.submit_timestamp), + }; + Self::set_true_once(flag).then_some(self.submit_timestamp) + } +} + +impl EventsMetricsCollector +where + ChainApi: graph::ChainApi + 'static, +{ + /// Handles the status event. + /// + /// Updates the metrics by observing the time taken for a transaction's status update + /// from its submission time. + fn handle_status( + hash: ExtrinsicHash, + status: TransactionStatus, BlockHash>, + timestamp: Instant, + submitted_timestamp_map: &mut HashMap, TransactionEventMetricsData>, + metrics: &MetricsLink, + ) { + let Entry::Occupied(mut entry) = submitted_timestamp_map.entry(hash) else { return }; + let remove = status.is_final(); + if let Some(submit_timestamp) = entry.get_mut().update(&status) { + metrics.report(|metrics| { + metrics + .events_histograms + .observe(status, timestamp.duration_since(submit_timestamp)) + }); + } + remove.then(|| entry.remove()); + } + + /// Asynchronous task to process received messages and compute relevant event metrics. + /// + /// Runs indefinitely, handling arriving messages and updating metrics + /// based on the recorded submission times and timestamps of current event statuses. + async fn task( + mut rx: MessageReceiver, BlockHash>, + metrics: MetricsLink, + ) { + let mut submitted_timestamp_map = + HashMap::, TransactionEventMetricsData>::default(); + + loop { + match rx.next().await { + Some(EventMetricsMessage::Submitted(timestamp, hash)) => { + submitted_timestamp_map + .insert(hash, TransactionEventMetricsData::new(timestamp)); + }, + Some(EventMetricsMessage::Status(timestamp, hash, status)) => { + Self::handle_status( + hash, + status, + timestamp, + &mut submitted_timestamp_map, + &metrics, + ); + }, + None => { + return /* ? */ + }, + }; + } + } + + /// Constructs a new `EventsMetricsCollector` and its associated worker task. + /// + /// Returns the collector alongside an asynchronous task. The task shall be polled by caller. + pub fn new_with_worker(metrics: MetricsLink) -> (Self, EventsMetricsCollectorTask) { + const QUEUE_WARN_SIZE: usize = 100_000; + let (metrics_message_sink, rx) = + mpsc::tracing_unbounded("txpool-event-metrics-collector", QUEUE_WARN_SIZE); + let task = Self::task(rx, metrics); + + (Self { metrics_message_sink: Some(metrics_message_sink) }, task.boxed()) + } +} diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs index 107c2941ec183..959df2ffe9784 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/multi_view_listener.rs @@ -39,7 +39,10 @@ use std::{ use tokio_stream::StreamMap; use tracing::trace; -use super::dropped_watcher::{DroppedReason, DroppedTransaction}; +use super::{ + dropped_watcher::{DroppedReason, DroppedTransaction}, + metrics::EventsMetricsCollector, +}; /// A side channel allowing to control the external stream instance (one per transaction) with /// [`ControllerCommand`]. @@ -113,6 +116,26 @@ where } } +impl Into, BlockHash>> + for &TransactionStatusUpdate +where + ChainApi: graph::ChainApi, +{ + fn into(self) -> TransactionStatus, BlockHash> { + match self { + TransactionStatusUpdate::TransactionInvalidated(_) => TransactionStatus::Invalid, + TransactionStatusUpdate::TransactionFinalized(_, hash, index) => + TransactionStatus::Finalized((*hash, *index)), + TransactionStatusUpdate::TransactionBroadcasted(_, peers) => + TransactionStatus::Broadcast(peers.clone()), + TransactionStatusUpdate::TransactionDropped(_, DroppedReason::Usurped(by)) => + TransactionStatus::Usurped(*by), + TransactionStatusUpdate::TransactionDropped(_, DroppedReason::LimitsEnforced) => + TransactionStatus::Dropped, + } + } +} + impl std::fmt::Debug for TransactionStatusUpdate where ChainApi: graph::ChainApi, @@ -451,12 +474,15 @@ where /// - transaction commands, /// to multiple individual per-transaction external watcher contexts. /// - /// The future shall be polled by instantiator of `MultiViewListener`. + /// It also reports transactions statuses updates to the provided `events_metrics_collector`. + /// + /// The returned future shall be polled by instantiator of `MultiViewListener`. async fn task( external_watchers_tx_hash_map: Arc< RwLock, Controller>>>, >, mut command_receiver: CommandReceiver>, + events_metrics_collector: EventsMetricsCollector, ) { let mut aggregated_streams_map: StreamMap, ViewStatusStream> = Default::default(); @@ -465,6 +491,7 @@ where tokio::select! { biased; Some((view_hash, (tx_hash, status))) = next_event(&mut aggregated_streams_map) => { + events_metrics_collector.report_status(tx_hash, status.clone()); if let Entry::Occupied(mut ctrl) = external_watchers_tx_hash_map.write().entry(tx_hash) { log::trace!( target: LOG_TARGET, @@ -510,6 +537,7 @@ where Some(ControllerCommand::TransactionStatusRequest(request)) => { let tx_hash = request.hash(); + events_metrics_collector.report_status(tx_hash, (&request).into()); if let Entry::Occupied(mut ctrl) = external_watchers_tx_hash_map.write().entry(tx_hash) { if let Err(e) = ctrl .get_mut() @@ -529,12 +557,19 @@ where /// Creates a new [`MultiViewListener`] instance along with its associated worker task. /// - /// This function instansiates the new `MultiViewListener` and provides the worker task that + /// This function instantiates the new `MultiViewListener` and provides the worker task that /// relays messages to the external transactions listeners. The task shall be polled by caller. /// + /// The `events_metrics_collector` is an instance of `EventsMetricsCollector` that is + /// responsible for collecting and managing metrics related to transaction events. Newly + /// created instance of `MultiViewListener` will report transaction status updates and its + /// timestamps to the given metrics collector. + /// /// Returns a tuple containing the [`MultiViewListener`] and the /// [`MultiViewListenerTask`]. - pub fn new_with_worker() -> (Self, MultiViewListenerTask) { + pub fn new_with_worker( + events_metrics_collector: EventsMetricsCollector, + ) -> (Self, MultiViewListenerTask) { let external_controllers = Arc::from(RwLock::from(HashMap::< ExtrinsicHash, Controller>, @@ -545,7 +580,7 @@ where "txpool-multi-view-listener-task-controller", CONTROLLER_QUEUE_WARN_SIZE, ); - let task = Self::task(external_controllers.clone(), rx); + let task = Self::task(external_controllers.clone(), rx, events_metrics_collector); (Self { external_controllers, controller: tx }, task.boxed()) } @@ -557,6 +592,9 @@ where /// (meaning that it can be exposed to [`sc_transaction_pool_api::TransactionPool`] API client /// e.g. rpc) stream of transaction status events. If an external watcher is already present for /// the given transaction, it returns `None`. + /// + /// The `submit_timestamp` indicates the time at which a transaction is submitted. + /// It is primarily used to calculate event timings for metric collection. pub(crate) fn create_external_watcher_for_tx( &self, tx_hash: ExtrinsicHash, @@ -779,7 +817,7 @@ mod tests { fn create_multi_view_listener( ) -> (MultiViewListener, tokio::sync::oneshot::Sender<()>, JoinHandle<()>) { - let (listener, listener_task) = MultiViewListener::new_with_worker(); + let (listener, listener_task) = MultiViewListener::new_with_worker(Default::default()); let (tx, rx) = tokio::sync::oneshot::channel(); diff --git a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs index e141016ccb28b..d64d80d434308 100644 --- a/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs +++ b/substrate/client/transaction-pool/src/fork_aware_txpool/tx_mem_pool.rs @@ -276,7 +276,7 @@ where ) -> Self { Self { api, - listener: Arc::from(MultiViewListener::new_with_worker().0), + listener: Arc::from(MultiViewListener::new_with_worker(Default::default()).0), transactions: Default::default(), metrics: Default::default(), max_transactions_count, @@ -599,6 +599,16 @@ where .map(|p| *p.priority.write() = Some(priority)) }); } + + /// Counts the number of transactions in the provided iterator of hashes + /// that are not known to the pool. + pub(super) fn count_unknown_transactions<'a>( + &self, + hashes: impl Iterator>, + ) -> usize { + let transactions = self.transactions.read(); + hashes.filter(|tx_hash| !transactions.contains_key(tx_hash)).count() + } } #[cfg(test)] diff --git a/substrate/frame/alliance/Cargo.toml b/substrate/frame/alliance/Cargo.toml index 9d21b9e964c9a..99eb8ba3a864a 100644 --- a/substrate/frame/alliance/Cargo.toml +++ b/substrate/frame/alliance/Cargo.toml @@ -4,7 +4,7 @@ version = "27.0.0" authors.workspace = true edition.workspace = true license = "Apache-2.0" -homepage = "https://docs.substrate.io/" +homepage.workspace = true repository.workspace = true description = "The Alliance pallet provides a collective for standard-setting industry collaboration." readme = "README.md" diff --git a/substrate/frame/benchmarking/src/v1.rs b/substrate/frame/benchmarking/src/v1.rs index 99aad0301c127..994b52bb9fd07 100644 --- a/substrate/frame/benchmarking/src/v1.rs +++ b/substrate/frame/benchmarking/src/v1.rs @@ -1828,7 +1828,7 @@ macro_rules! add_benchmark { internal_repeats, } = config; if &pallet[..] == &name_string[..] && &instance[..] == &instance_string[..] { - let benchmark_result = <$location>::run_benchmark( + let benchmark_result = <$location as $crate::Benchmarking>::run_benchmark( &benchmark[..], &selected_components[..], whitelist, @@ -1915,8 +1915,8 @@ macro_rules! list_benchmark { ( $list:ident, $extra:ident, $name:path, $location:ty ) => { let pallet_string = stringify!($name).as_bytes(); let instance_string = stringify!($location).as_bytes(); - let benchmarks = <$location>::benchmarks($extra); - let pallet_benchmarks = BenchmarkList { + let benchmarks = <$location as $crate::Benchmarking>::benchmarks($extra); + let pallet_benchmarks = $crate::BenchmarkList { pallet: pallet_string.to_vec(), instance: instance_string.to_vec(), benchmarks: benchmarks.to_vec(), diff --git a/substrate/frame/broker/src/benchmarking.rs b/substrate/frame/broker/src/benchmarking.rs index 516518740f7d0..49003afcdd8bc 100644 --- a/substrate/frame/broker/src/benchmarking.rs +++ b/substrate/frame/broker/src/benchmarking.rs @@ -543,26 +543,22 @@ mod benches { let caller: T::AccountId = whitelisted_caller(); T::Currency::set_balance( &caller.clone(), - T::Currency::minimum_balance().saturating_add(30_000_000u32.into()), + T::Currency::minimum_balance().saturating_add(T::MinimumCreditPurchase::get()), ); T::Currency::set_balance(&Broker::::account_id(), T::Currency::minimum_balance()); - let region = Broker::::do_purchase(caller.clone(), 10_000_000u32.into()) - .expect("Offer not high enough for configuration."); - - let recipient: T::AccountId = account("recipient", 0, SEED); - - Broker::::do_pool(region, None, recipient, Final) - .map_err(|_| BenchmarkError::Weightless)?; - let beneficiary: RelayAccountIdOf = account("beneficiary", 0, SEED); #[extrinsic_call] - _(RawOrigin::Signed(caller.clone()), 20_000_000u32.into(), beneficiary.clone()); + _(RawOrigin::Signed(caller.clone()), T::MinimumCreditPurchase::get(), beneficiary.clone()); assert_last_event::( - Event::CreditPurchased { who: caller, beneficiary, amount: 20_000_000u32.into() } - .into(), + Event::CreditPurchased { + who: caller, + beneficiary, + amount: T::MinimumCreditPurchase::get(), + } + .into(), ); Ok(()) diff --git a/substrate/frame/broker/src/dispatchable_impls.rs b/substrate/frame/broker/src/dispatchable_impls.rs index 489be12bdd154..77bbf0878b4fa 100644 --- a/substrate/frame/broker/src/dispatchable_impls.rs +++ b/substrate/frame/broker/src/dispatchable_impls.rs @@ -426,6 +426,7 @@ impl Pallet { amount: BalanceOf, beneficiary: RelayAccountIdOf, ) -> DispatchResult { + ensure!(amount >= T::MinimumCreditPurchase::get(), Error::::CreditPurchaseTooSmall); T::Currency::transfer(&who, &Self::account_id(), amount, Expendable)?; let rc_amount = T::ConvertBalance::convert(amount); T::Coretime::credit_account(beneficiary.clone(), rc_amount); diff --git a/substrate/frame/broker/src/lib.rs b/substrate/frame/broker/src/lib.rs index 01368fd6404da..f605815a421ca 100644 --- a/substrate/frame/broker/src/lib.rs +++ b/substrate/frame/broker/src/lib.rs @@ -121,8 +121,15 @@ pub mod pallet { #[pallet::constant] type MaxReservedCores: Get; + /// Given that we are performing all auto-renewals in a single block, it has to be limited. #[pallet::constant] type MaxAutoRenewals: Get; + + /// The smallest amount of credits a user can purchase. + /// + /// Needed to prevent spam attacks. + #[pallet::constant] + type MinimumCreditPurchase: Get>; } /// The current configuration of this pallet. @@ -544,6 +551,9 @@ pub mod pallet { SovereignAccountNotFound, /// Attempted to disable auto-renewal for a core that didn't have it enabled. AutoRenewalNotEnabled, + /// Needed to prevent spam attacks.The amount of credits the user attempted to purchase is + /// below `T::MinimumCreditPurchase`. + CreditPurchaseTooSmall, } #[derive(frame_support::DefaultNoBound)] diff --git a/substrate/frame/broker/src/mock.rs b/substrate/frame/broker/src/mock.rs index 42377eefdb22e..40233a22edfc9 100644 --- a/substrate/frame/broker/src/mock.rs +++ b/substrate/frame/broker/src/mock.rs @@ -177,6 +177,7 @@ impl OnUnbalanced::Currency>> for IntoZero { ord_parameter_types! { pub const One: u64 = 1; + pub const MinimumCreditPurchase: u64 = 50; } type EnsureOneOrRoot = EitherOfDiverse, EnsureSignedBy>; @@ -203,6 +204,7 @@ impl crate::Config for Test { type SovereignAccountOf = SovereignAccountOf; type MaxAutoRenewals = ConstU32<3>; type PriceAdapter = CenterTargetPrice>; + type MinimumCreditPurchase = MinimumCreditPurchase; } pub fn advance_to(b: u64) { diff --git a/substrate/frame/broker/src/tests.rs b/substrate/frame/broker/src/tests.rs index a130a2050d9a1..984650aac08ed 100644 --- a/substrate/frame/broker/src/tests.rs +++ b/substrate/frame/broker/src/tests.rs @@ -113,7 +113,7 @@ fn drop_history_works() { TestExt::new() .contribution_timeout(4) .endow(1, 1000) - .endow(2, 30) + .endow(2, 50) .execute_with(|| { assert_ok!(Broker::do_start_sales(100, 1)); advance_to(2); @@ -121,7 +121,7 @@ fn drop_history_works() { // Place region in pool. Active in pool timeslices 4, 5, 6 = rcblocks 8, 10, 12; we // expect to make/receive revenue reports on blocks 10, 12, 14. assert_ok!(Broker::do_pool(region, Some(1), 1, Final)); - assert_ok!(Broker::do_purchase_credit(2, 30, 2)); + assert_ok!(Broker::do_purchase_credit(2, 50, 2)); advance_to(6); // In the stable state with no pending payouts, we expect to see 3 items in // InstaPoolHistory here since there is a latency of 1 timeslice (for generating the @@ -694,6 +694,24 @@ fn purchase_works() { }); } +#[test] +fn purchase_credit_works() { + TestExt::new().endow(1, 50).execute_with(|| { + assert_ok!(Broker::do_start_sales(100, 1)); + advance_to(2); + + let credits = CoretimeCredit::get(); + assert_eq!(credits.get(&1), None); + + assert_noop!(Broker::do_purchase_credit(1, 10, 1), Error::::CreditPurchaseTooSmall); + assert_noop!(Broker::do_purchase_credit(1, 100, 1), TokenError::FundsUnavailable); + + assert_ok!(Broker::do_purchase_credit(1, 50, 1)); + let credits = CoretimeCredit::get(); + assert_eq!(credits.get(&1), Some(&50)); + }); +} + #[test] fn partition_works() { TestExt::new().endow(1, 1000).execute_with(|| { diff --git a/substrate/frame/democracy/src/tests.rs b/substrate/frame/democracy/src/tests.rs index 7777448006848..91f239476610c 100644 --- a/substrate/frame/democracy/src/tests.rs +++ b/substrate/frame/democracy/src/tests.rs @@ -106,6 +106,7 @@ impl pallet_scheduler::Config for Test { type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = (); + type BlockNumberProvider = frame_system::Pallet; } #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] diff --git a/substrate/frame/message-queue/Cargo.toml b/substrate/frame/message-queue/Cargo.toml index 7b0de7c1e4ff8..f48fa1a110440 100644 --- a/substrate/frame/message-queue/Cargo.toml +++ b/substrate/frame/message-queue/Cargo.toml @@ -29,6 +29,7 @@ frame-support = { workspace = true } frame-system = { workspace = true } [dev-dependencies] +frame-support = { workspace = true, features = ["experimental"] } rand = { workspace = true, default-features = true } rand_distr = { workspace = true } sp-crypto-hashing = { workspace = true, default-features = true } diff --git a/substrate/frame/message-queue/src/benchmarking.rs b/substrate/frame/message-queue/src/benchmarking.rs index 8f0712acc5f51..9391c41d06dc4 100644 --- a/substrate/frame/message-queue/src/benchmarking.rs +++ b/substrate/frame/message-queue/src/benchmarking.rs @@ -169,6 +169,22 @@ mod benchmarks { assert_eq!(weight.consumed(), T::WeightInfo::bump_service_head()); } + // Worst case for calling `bump_service_head`. + #[benchmark] + fn set_service_head() { + setup_bump_service_head::(0.into(), 1.into()); + let mut weight = WeightMeter::new(); + assert_eq!(ServiceHead::::get().unwrap(), 0u32.into()); + + #[block] + { + assert!(MessageQueue::::set_service_head(&mut weight, &1u32.into()).unwrap()); + } + + assert_eq!(ServiceHead::::get().unwrap(), 1u32.into()); + assert_eq!(weight.consumed(), T::WeightInfo::set_service_head()); + } + #[benchmark] fn reap_page() { // Mock the storage to get a *cullable* but not *reapable* page. diff --git a/substrate/frame/message-queue/src/integration_test.rs b/substrate/frame/message-queue/src/integration_test.rs index e4db87d8be7a3..87b236413708a 100644 --- a/substrate/frame/message-queue/src/integration_test.rs +++ b/substrate/frame/message-queue/src/integration_test.rs @@ -29,10 +29,10 @@ use crate::{ mock::{ - build_and_execute, gen_seed, Callback, CountingMessageProcessor, IntoWeight, + build_and_execute, gen_seed, set_weight, Callback, CountingMessageProcessor, IntoWeight, MessagesProcessed, MockedWeightInfo, NumMessagesProcessed, YieldingQueues, }, - mock_helpers::MessageOrigin, + mock_helpers::{MessageOrigin, MessageOrigin::Everywhere}, *, }; @@ -68,7 +68,7 @@ impl Config for Test { type WeightInfo = MockedWeightInfo; type MessageProcessor = CountingMessageProcessor; type Size = u32; - type QueueChangeHandler = (); + type QueueChangeHandler = AhmPrioritizer; type QueuePausedQuery = (); type HeapSize = HeapSize; type MaxStale = MaxStale; @@ -76,6 +76,83 @@ impl Config for Test { type IdleMaxServiceWeight = (); } +/// The object that does the AHM message prioritization for us. +#[derive(Debug, Default, codec::Encode, codec::Decode)] +pub struct AhmPrioritizer { + streak_until: Option, + prioritized_queue: Option>, + favorite_queue_num_messages: Option, +} + +// The whole `AhmPrioritizer` could be part of the AHM controller pallet. +parameter_types! { + pub storage AhmPrioritizerStorage: AhmPrioritizer = AhmPrioritizer::default(); +} + +/// Instead of giving our prioritized queue only one block, we give it a streak of blocks. +const STREAK_LEN: u64 = 3; + +impl OnQueueChanged for AhmPrioritizer { + fn on_queue_changed(origin: MessageOrigin, f: QueueFootprint) { + let mut this = AhmPrioritizerStorage::get(); + + if this.prioritized_queue != Some(origin) { + return; + } + + // Return early if this was an enqueue instead of a dequeue. + if this.favorite_queue_num_messages.map_or(false, |n| n <= f.storage.count) { + return; + } + this.favorite_queue_num_messages = Some(f.storage.count); + + // only update when we are not already in a streak + if this.streak_until.map_or(false, |s| s < System::block_number()) { + this.streak_until = Some(System::block_number().saturating_add(STREAK_LEN)); + } + } +} + +impl AhmPrioritizer { + // This will need to be called by the migration controller. + fn on_initialize(now: u64) -> Weight { + let mut meter = WeightMeter::new(); + let mut this = AhmPrioritizerStorage::get(); + + let Some(q) = this.prioritized_queue else { + return meter.consumed(); + }; + // init + if this.streak_until.is_none() { + this.streak_until = Some(0); + } + if this.favorite_queue_num_messages.is_none() { + this.favorite_queue_num_messages = Some(0); + } + + // Our queue did not get a streak since 10 blocks. It must either be empty or starved: + if Pallet::::footprint(q).pages == 0 { + return meter.consumed(); + } + if this.streak_until.map_or(false, |until| until < now.saturating_sub(10)) { + log::warn!("Queue is being starved, scheduling streak of {} blocks", STREAK_LEN); + this.streak_until = Some(now.saturating_add(STREAK_LEN)); + } + + if this.streak_until.map_or(false, |until| until > now) { + let _ = Pallet::::force_set_head(&mut meter, &q).defensive(); + } + + meter.consumed() + } +} + +impl Drop for AhmPrioritizer { + fn drop(&mut self) { + AhmPrioritizerStorage::set(self); + } +} + /// Simulates heavy usage by enqueueing and processing large amounts of messages. /// /// # Example output @@ -122,6 +199,87 @@ fn stress_test_enqueue_and_service() { }); } +/// Simulate heavy usage while calling `force_set_head` on random queues. +#[test] +#[ignore] // Only run in the CI, otherwise its too slow. +fn stress_test_force_set_head() { + let blocks = 20; + let max_queues = 10_000; + let max_messages_per_queue = 10_000; + let max_msg_len = MaxMessageLenOf::::get(); + let mut rng = StdRng::seed_from_u64(gen_seed()); + + build_and_execute::(|| { + let mut msgs_remaining = 0; + for _ in 0..blocks { + // Start by enqueuing a large number of messages. + let enqueued = + enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng); + msgs_remaining += enqueued; + + for _ in 0..10 { + let random_queue = rng.gen_range(0..=max_queues); + MessageQueue::force_set_head(&mut WeightMeter::new(), &Everywhere(random_queue)) + .unwrap(); + } + + // Pick a fraction of all messages currently in queue and process them. + let processed = rng.gen_range(1..=msgs_remaining); + log::info!("Processing {} of all messages {}", processed, msgs_remaining); + process_some_messages(processed); // This also advances the block. + msgs_remaining -= processed; + } + log::info!("Processing all remaining {} messages", msgs_remaining); + process_all_messages(msgs_remaining); + post_conditions(); + }); +} + +/// Check that our AHM prioritization does not affect liveness. This does not really check the AHM +/// prioritization works itself, but rather that it does not break things. The actual test is in +/// another test below. +#[test] +#[ignore] // Only run in the CI, otherwise its too slow. +fn stress_test_prioritize_queue() { + let blocks = 20; + let max_queues = 10_000; + let favorite_queue = Everywhere(9000); + let max_messages_per_queue = 1_000; + let max_msg_len = MaxMessageLenOf::::get(); + let mut rng = StdRng::seed_from_u64(gen_seed()); + + build_and_execute::(|| { + let mut prio = AhmPrioritizerStorage::get(); + prio.prioritized_queue = Some(favorite_queue); + drop(prio); + + let mut msgs_remaining = 0; + for _ in 0..blocks { + // Start by enqueuing a large number of messages. + let enqueued = + enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng); + msgs_remaining += enqueued; + // ensure that our favorite queue always has some more messages + for _ in 0..200 { + MessageQueue::enqueue_message( + BoundedSlice::defensive_truncate_from("favorite".as_bytes()), + favorite_queue, + ); + msgs_remaining += 1; + } + + // Pick a fraction of all messages currently in queue and process them. + let processed = rng.gen_range(1..=100); + log::info!("Processing {} of all messages {}", processed, msgs_remaining); + process_some_messages(processed); // This also advances the block. + msgs_remaining -= processed; + } + log::info!("Processing all remaining {} messages", msgs_remaining); + process_all_messages(msgs_remaining); + post_conditions(); + }); +} + /// Very similar to `stress_test_enqueue_and_service`, but enqueues messages while processing them. #[test] #[ignore] // Only run in the CI, otherwise its too slow. @@ -275,6 +433,55 @@ fn stress_test_queue_suspension() { }); } +/// Test that our AHM prioritizer will ensure that our favorite queue always gets some dedicated +/// weight. +#[test] +#[ignore] +fn stress_test_ahm_despair_mode_works() { + build_and_execute::(|| { + let blocks = 200; + let queues = 200; + + for o in 0..queues { + for i in 0..100 { + MessageQueue::enqueue_message( + BoundedSlice::defensive_truncate_from(format!("{}:{}", o, i).as_bytes()), + Everywhere(o), + ); + } + } + set_weight("bump_head", Weight::from_parts(1, 1)); + + // Prioritize the last queue. + let mut prio = AhmPrioritizerStorage::get(); + prio.prioritized_queue = Some(Everywhere(199)); + drop(prio); + + ServiceWeight::set(Some(Weight::from_parts(10, 10))); + for _ in 0..blocks { + next_block(); + } + + // Check that our favorite queue has processed the most messages. + let mut min = u64::MAX; + let mut min_origin = 0; + + for o in 0..queues { + let fp = MessageQueue::footprint(Everywhere(o)); + if fp.storage.count < min { + min = fp.storage.count; + min_origin = o; + } + } + assert_eq!(min_origin, 199); + + // Process all remaining messages. + ServiceWeight::set(Some(Weight::MAX)); + next_block(); + post_conditions(); + }); +} + /// How many messages are in each queue. fn msgs_per_queue() -> BTreeMap { let mut per_queue = BTreeMap::new(); @@ -353,10 +560,12 @@ fn process_all_messages(expected: u32) { /// Returns the weight consumed by `MessageQueue::on_initialize()`. fn next_block() -> Weight { + log::info!("Next block: {}", System::block_number() + 1); MessageQueue::on_finalize(System::block_number()); System::on_finalize(System::block_number()); System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); + AhmPrioritizer::on_initialize(System::block_number()); MessageQueue::on_initialize(System::block_number()) } diff --git a/substrate/frame/message-queue/src/lib.rs b/substrate/frame/message-queue/src/lib.rs index 04620fa88d85b..9cd3e42d70dd2 100644 --- a/substrate/frame/message-queue/src/lib.rs +++ b/substrate/frame/message-queue/src/lib.rs @@ -462,6 +462,17 @@ impl OnQueueChanged for () { fn on_queue_changed(_: Id, _: QueueFootprint) {} } +/// Allows to force the processing head to a specific queue. +pub trait ForceSetHead { + /// Set the `ServiceHead` to `origin`. + /// + /// This function: + /// - `Err`: Queue did not exist, not enough weight or other error. + /// - `Ok(true)`: The service head was updated. + /// - `Ok(false)`: The service head was not updated since the queue is empty. + fn force_set_head(weight: &mut WeightMeter, origin: &O) -> Result; +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -626,16 +637,16 @@ pub mod pallet { /// The index of the first and last (non-empty) pages. #[pallet::storage] - pub(super) type BookStateFor = + pub type BookStateFor = StorageMap<_, Twox64Concat, MessageOriginOf, BookState>, ValueQuery>; /// The origin at which we should begin servicing. #[pallet::storage] - pub(super) type ServiceHead = StorageValue<_, MessageOriginOf, OptionQuery>; + pub type ServiceHead = StorageValue<_, MessageOriginOf, OptionQuery>; /// The map of page indices to pages. #[pallet::storage] - pub(super) type Pages = StorageDoubleMap< + pub type Pages = StorageDoubleMap< _, Twox64Concat, MessageOriginOf, @@ -861,6 +872,7 @@ impl Pallet { ServiceHead::::put(&head_neighbours.next); Some(head) } else { + defensive!("The head must point to a queue in the ready ring"); None } } else { @@ -868,6 +880,20 @@ impl Pallet { } } + fn set_service_head(weight: &mut WeightMeter, queue: &MessageOriginOf) -> Result { + if weight.try_consume(T::WeightInfo::set_service_head()).is_err() { + return Err(()) + } + + // Ensure that we never set the head to an un-ready queue. + if BookStateFor::::get(queue).ready_neighbours.is_some() { + ServiceHead::::put(queue); + Ok(true) + } else { + Ok(false) + } + } + /// The maximal weight that a single message ever can consume. /// /// Any message using more than this will be marked as permanently overweight and not @@ -1616,6 +1642,12 @@ impl Pallet { } } +impl ForceSetHead> for Pallet { + fn force_set_head(weight: &mut WeightMeter, origin: &MessageOriginOf) -> Result { + Pallet::::set_service_head(weight, origin) + } +} + /// Run a closure that errors on re-entrance. Meant to be used by anything that services queues. pub(crate) fn with_service_mutex R, R>(f: F) -> Result { // Holds the singleton token instance. diff --git a/substrate/frame/message-queue/src/mock.rs b/substrate/frame/message-queue/src/mock.rs index f1d341d1a5db1..f3906373dc144 100644 --- a/substrate/frame/message-queue/src/mock.rs +++ b/substrate/frame/message-queue/src/mock.rs @@ -46,6 +46,7 @@ parameter_types! { pub const MaxStale: u32 = 2; pub const ServiceWeight: Option = Some(Weight::from_parts(100, 100)); } + impl Config for Test { type RuntimeEvent = RuntimeEvent; type WeightInfo = MockedWeightInfo; @@ -121,6 +122,12 @@ impl crate::weights::WeightInfo for MockedWeightInfo { .copied() .unwrap_or(DefaultWeightForCall::get()) } + fn set_service_head() -> Weight { + WeightForCall::get() + .get("set_service_head") + .copied() + .unwrap_or(DefaultWeightForCall::get()) + } fn service_page_item() -> Weight { WeightForCall::get() .get("service_page_item") @@ -263,6 +270,7 @@ impl ProcessMessage for CountingMessageProcessor { return Err(ProcessMessageError::Corrupt) } } + NumMessagesProcessed::set(NumMessagesProcessed::get() + 1); Ok(true) } else { @@ -319,7 +327,8 @@ where { new_test_ext::().execute_with(|| { test(); - MessageQueue::do_try_state().expect("All invariants must hold after a test"); + pallet_message_queue::Pallet::::do_try_state() + .expect("All invariants must hold after a test"); }); } diff --git a/substrate/frame/message-queue/src/tests.rs b/substrate/frame/message-queue/src/tests.rs index c81e486a40dfa..9054071bf54dd 100644 --- a/substrate/frame/message-queue/src/tests.rs +++ b/substrate/frame/message-queue/src/tests.rs @@ -2039,3 +2039,103 @@ fn process_message_ok_true_keeps_storage_changes() { assert_eq!(sp_io::storage::exists(b"key"), true); }); } + +#[test] +fn force_set_head_can_starve_other_queues() { + use MessageOrigin::*; + build_and_execute::(|| { + // Enqueue messages to three queues. + for _ in 0..2 { + MessageQueue::enqueue_message(msg("A"), Here); + MessageQueue::enqueue_message(msg("B"), There); + MessageQueue::enqueue_message(msg("C"), Everywhere(0)); + } + + // Servicing will only touch `Here` and `There`. + MessageQueue::service_queues(4.into_weight()); + assert_eq!( + MessagesProcessed::take(), + vec![ + (b"A".to_vec(), Here), + (b"A".to_vec(), Here), + (b"B".to_vec(), There), + (b"B".to_vec(), There) + ] + ); + + // Some more traffic on our favorite queue. + MessageQueue::enqueue_message(msg("A"), Here); + + // Hypothetically, it would proceed with `Everywhere(0)`, not our favorite queue: + frame_support::hypothetically! {{ + MessageQueue::service_queues(1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(b"C".to_vec(), Everywhere(0))]); + }}; + + // But we won't let that happen and instead prioritize it: + assert!(Pallet::::force_set_head(&mut WeightMeter::new(), &Here).unwrap()); + + MessageQueue::service_queues(1.into_weight()); + assert_eq!(MessagesProcessed::take(), vec![(b"A".to_vec(), Here)]); + }); +} + +#[test] +fn force_set_head_noop_on_unready_queue() { + use crate::tests::MessageOrigin::*; + build_and_execute::(|| { + // enqueue and process one message + MessageQueue::enqueue_message(msg("A"), Here); + MessageQueue::service_queues(1.into_weight()); + assert_ring(&[]); + + let _guard = StorageNoopGuard::new(); + let was_set = Pallet::::force_set_head(&mut WeightMeter::new(), &There).unwrap(); + assert!(!was_set); + }); +} + +#[test] +fn force_set_head_noop_on_current_head() { + use crate::tests::MessageOrigin::*; + build_and_execute::(|| { + MessageQueue::enqueue_message(msg("A"), Here); + MessageQueue::enqueue_message(msg("A"), Here); + MessageQueue::service_queues(1.into_weight()); + assert_ring(&[Here]); + + let _guard = StorageNoopGuard::new(); + let was_set = Pallet::::force_set_head(&mut WeightMeter::new(), &Here).unwrap(); + assert!(was_set); + }); +} + +#[test] +fn force_set_head_noop_unprocessed_queue() { + use crate::tests::MessageOrigin::*; + build_and_execute::(|| { + MessageQueue::enqueue_message(msg("A"), Here); + assert_ring(&[Here]); + + let _guard = StorageNoopGuard::new(); + let was_set = Pallet::::force_set_head(&mut WeightMeter::new(), &Here).unwrap(); + assert!(was_set); + }); +} + +#[test] +fn force_set_head_works() { + use crate::tests::MessageOrigin::*; + build_and_execute::(|| { + MessageQueue::enqueue_message(msg("A"), Here); + MessageQueue::enqueue_message(msg("B"), There); + assert_eq!(ServiceHead::::get(), Some(Here)); + assert_ring(&[Here, There]); + + let was_set = Pallet::::force_set_head(&mut WeightMeter::new(), &There).unwrap(); + assert!(was_set); + + assert_eq!(ServiceHead::::get(), Some(There)); + assert_ring(&[There, Here]); + }); +} diff --git a/substrate/frame/message-queue/src/weights.rs b/substrate/frame/message-queue/src/weights.rs index 7d36cb7551061..6f9d0581c4a03 100644 --- a/substrate/frame/message-queue/src/weights.rs +++ b/substrate/frame/message-queue/src/weights.rs @@ -18,33 +18,37 @@ //! Autogenerated weights for `pallet_message_queue` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-11-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2025-02-12, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-wiukf8gn-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` +//! HOSTNAME: `793863dddfdf`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024` // Executed Command: -// ./target/production/substrate-node +// frame-omni-bencher +// v1 // benchmark // pallet -// --chain=dev +// --extrinsic=* +// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm +// --pallet=pallet_message_queue +// --header=/__w/polkadot-sdk/polkadot-sdk/substrate/HEADER-APACHE2 +// --output=/__w/polkadot-sdk/polkadot-sdk/substrate/frame/message-queue/src/weights.rs +// --wasm-execution=compiled // --steps=50 // --repeat=20 -// --pallet=pallet_message_queue +// --heap-pages=4096 +// --template=substrate/.maintain/frame-weight-template.hbs // --no-storage-info -// --no-median-slopes // --no-min-squares -// --extrinsic=* -// --wasm-execution=compiled -// --heap-pages=4096 -// --output=./substrate/frame/message-queue/src/weights.rs -// --header=./substrate/HEADER-APACHE2 -// --template=./substrate/.maintain/frame-weight-template.hbs +// --no-median-slopes +// --genesis-builder-policy=none +// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] #![allow(missing_docs)] +#![allow(dead_code)] use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; use core::marker::PhantomData; @@ -58,6 +62,7 @@ pub trait WeightInfo { fn service_page_base_no_completion() -> Weight; fn service_page_item() -> Weight; fn bump_service_head() -> Weight; + fn set_service_head() -> Weight; fn reap_page() -> Weight; fn execute_overweight_page_removed() -> Weight; fn execute_overweight_page_updated() -> Weight; @@ -72,10 +77,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `301` + // Measured: `209` // Estimated: `6038` - // Minimum execution time: 17_093_000 picoseconds. - Weight::from_parts(17_612_000, 6038) + // Minimum execution time: 12_475_000 picoseconds. + Weight::from_parts(13_054_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -85,10 +90,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `301` + // Measured: `209` // Estimated: `6038` - // Minimum execution time: 15_482_000 picoseconds. - Weight::from_parts(16_159_000, 6038) + // Minimum execution time: 11_544_000 picoseconds. + Weight::from_parts(11_741_000, 6038) .saturating_add(T::DbWeight::get().reads(3_u64)) .saturating_add(T::DbWeight::get().writes(3_u64)) } @@ -96,10 +101,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `76` + // Measured: `0` // Estimated: `3514` - // Minimum execution time: 4_911_000 picoseconds. - Weight::from_parts(5_177_000, 3514) + // Minimum execution time: 2_487_000 picoseconds. + Weight::from_parts(2_618_000, 3514) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -107,10 +112,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `147` + // Measured: `50` // Estimated: `69049` - // Minimum execution time: 7_108_000 picoseconds. - Weight::from_parts(7_477_000, 69049) + // Minimum execution time: 4_657_000 picoseconds. + Weight::from_parts(4_890_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -118,10 +123,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `147` + // Measured: `50` // Estimated: `69049` - // Minimum execution time: 7_435_000 picoseconds. - Weight::from_parts(7_669_000, 69049) + // Minimum execution time: 4_750_000 picoseconds. + Weight::from_parts(4_988_000, 69049) .saturating_add(T::DbWeight::get().reads(1_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } @@ -133,8 +138,8 @@ impl WeightInfo for SubstrateWeight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_331_000 picoseconds. - Weight::from_parts(174_170_000, 0) + // Minimum execution time: 164_125_000 picoseconds. + Weight::from_parts(166_703_000, 0) .saturating_add(T::DbWeight::get().writes(2_u64)) } /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) @@ -143,23 +148,36 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `154` // Estimated: `3514` - // Minimum execution time: 11_817_000 picoseconds. - Weight::from_parts(12_351_000, 3514) + // Minimum execution time: 7_120_000 picoseconds. + Weight::from_parts(7_389_000, 3514) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(1_u64)) } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `154` + // Estimated: `3514` + // Minimum execution time: 6_176_000 picoseconds. + Weight::from_parts(6_484_000, 3514) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65744` + // Measured: `65652` // Estimated: `69049` - // Minimum execution time: 60_883_000 picoseconds. - Weight::from_parts(62_584_000, 69049) + // Minimum execution time: 56_098_000 picoseconds. + Weight::from_parts(57_609_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -169,10 +187,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65744` + // Measured: `65652` // Estimated: `69049` - // Minimum execution time: 77_569_000 picoseconds. - Weight::from_parts(79_165_000, 69049) + // Minimum execution time: 72_766_000 picoseconds. + Weight::from_parts(74_318_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -182,10 +200,10 @@ impl WeightInfo for SubstrateWeight { /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65744` + // Measured: `65652` // Estimated: `69049` - // Minimum execution time: 120_786_000 picoseconds. - Weight::from_parts(122_457_000, 69049) + // Minimum execution time: 112_953_000 picoseconds. + Weight::from_parts(115_634_000, 69049) .saturating_add(T::DbWeight::get().reads(2_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } @@ -199,10 +217,10 @@ impl WeightInfo for () { /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn ready_ring_knit() -> Weight { // Proof Size summary in bytes: - // Measured: `301` + // Measured: `209` // Estimated: `6038` - // Minimum execution time: 17_093_000 picoseconds. - Weight::from_parts(17_612_000, 6038) + // Minimum execution time: 12_475_000 picoseconds. + Weight::from_parts(13_054_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -212,10 +230,10 @@ impl WeightInfo for () { /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) fn ready_ring_unknit() -> Weight { // Proof Size summary in bytes: - // Measured: `301` + // Measured: `209` // Estimated: `6038` - // Minimum execution time: 15_482_000 picoseconds. - Weight::from_parts(16_159_000, 6038) + // Minimum execution time: 11_544_000 picoseconds. + Weight::from_parts(11_741_000, 6038) .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(3_u64)) } @@ -223,10 +241,10 @@ impl WeightInfo for () { /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn service_queue_base() -> Weight { // Proof Size summary in bytes: - // Measured: `76` + // Measured: `0` // Estimated: `3514` - // Minimum execution time: 4_911_000 picoseconds. - Weight::from_parts(5_177_000, 3514) + // Minimum execution time: 2_487_000 picoseconds. + Weight::from_parts(2_618_000, 3514) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -234,10 +252,10 @@ impl WeightInfo for () { /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `147` + // Measured: `50` // Estimated: `69049` - // Minimum execution time: 7_108_000 picoseconds. - Weight::from_parts(7_477_000, 69049) + // Minimum execution time: 4_657_000 picoseconds. + Weight::from_parts(4_890_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -245,10 +263,10 @@ impl WeightInfo for () { /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn service_page_base_no_completion() -> Weight { // Proof Size summary in bytes: - // Measured: `147` + // Measured: `50` // Estimated: `69049` - // Minimum execution time: 7_435_000 picoseconds. - Weight::from_parts(7_669_000, 69049) + // Minimum execution time: 4_750_000 picoseconds. + Weight::from_parts(4_988_000, 69049) .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } @@ -260,8 +278,8 @@ impl WeightInfo for () { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 173_331_000 picoseconds. - Weight::from_parts(174_170_000, 0) + // Minimum execution time: 164_125_000 picoseconds. + Weight::from_parts(166_703_000, 0) .saturating_add(RocksDbWeight::get().writes(2_u64)) } /// Storage: `MessageQueue::ServiceHead` (r:1 w:1) @@ -270,23 +288,36 @@ impl WeightInfo for () { /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) fn bump_service_head() -> Weight { // Proof Size summary in bytes: - // Measured: `246` + // Measured: `154` // Estimated: `3514` - // Minimum execution time: 11_817_000 picoseconds. - Weight::from_parts(12_351_000, 3514) + // Minimum execution time: 7_120_000 picoseconds. + Weight::from_parts(7_389_000, 3514) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } + /// Storage: `MessageQueue::BookStateFor` (r:1 w:0) + /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) + /// Storage: `MessageQueue::ServiceHead` (r:0 w:1) + /// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + fn set_service_head() -> Weight { + // Proof Size summary in bytes: + // Measured: `154` + // Estimated: `3514` + // Minimum execution time: 6_176_000 picoseconds. + Weight::from_parts(6_484_000, 3514) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } /// Storage: `MessageQueue::BookStateFor` (r:1 w:1) /// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`) /// Storage: `MessageQueue::Pages` (r:1 w:1) /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn reap_page() -> Weight { // Proof Size summary in bytes: - // Measured: `65744` + // Measured: `65652` // Estimated: `69049` - // Minimum execution time: 60_883_000 picoseconds. - Weight::from_parts(62_584_000, 69049) + // Minimum execution time: 56_098_000 picoseconds. + Weight::from_parts(57_609_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -296,10 +327,10 @@ impl WeightInfo for () { /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_removed() -> Weight { // Proof Size summary in bytes: - // Measured: `65744` + // Measured: `65652` // Estimated: `69049` - // Minimum execution time: 77_569_000 picoseconds. - Weight::from_parts(79_165_000, 69049) + // Minimum execution time: 72_766_000 picoseconds. + Weight::from_parts(74_318_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } @@ -309,10 +340,10 @@ impl WeightInfo for () { /// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`) fn execute_overweight_page_updated() -> Weight { // Proof Size summary in bytes: - // Measured: `65744` + // Measured: `65652` // Estimated: `69049` - // Minimum execution time: 120_786_000 picoseconds. - Weight::from_parts(122_457_000, 69049) + // Minimum execution time: 112_953_000 picoseconds. + Weight::from_parts(115_634_000, 69049) .saturating_add(RocksDbWeight::get().reads(2_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } diff --git a/substrate/frame/referenda/src/mock.rs b/substrate/frame/referenda/src/mock.rs index c46236586f1f7..10e5f35bbabf1 100644 --- a/substrate/frame/referenda/src/mock.rs +++ b/substrate/frame/referenda/src/mock.rs @@ -81,6 +81,7 @@ impl pallet_scheduler::Config for Test { type WeightInfo = (); type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = Preimage; + type BlockNumberProvider = frame_system::Pallet; } #[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] impl pallet_balances::Config for Test { diff --git a/substrate/frame/revive/Cargo.toml b/substrate/frame/revive/Cargo.toml index 4faa9205378fe..09cbf0b49f591 100644 --- a/substrate/frame/revive/Cargo.toml +++ b/substrate/frame/revive/Cargo.toml @@ -17,6 +17,7 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +alloy-core = { workspace = true, features = ["sol-types"] } codec = { features = ["derive", "max-encoded-len"], workspace = true } derive_more = { workspace = true } environmental = { workspace = true } @@ -77,6 +78,7 @@ xcm-builder = { workspace = true, default-features = true } [features] default = ["std"] std = [ + "alloy-core/std", "codec/std", "environmental/std", "ethabi/std", diff --git a/substrate/frame/revive/fixtures/contracts/caller_contract.rs b/substrate/frame/revive/fixtures/contracts/caller_contract.rs index b6a9bf2895fa6..236aec2e863bd 100644 --- a/substrate/frame/revive/fixtures/contracts/caller_contract.rs +++ b/substrate/frame/revive/fixtures/contracts/caller_contract.rs @@ -31,7 +31,7 @@ pub extern "C" fn deploy() {} #[no_mangle] #[polkavm_derive::polkavm_export] pub extern "C" fn call() { - input!(code_hash: &[u8; 32],); + input!(code_hash: &[u8; 32], load_code_ref_time: u64,); // The value to transfer on instantiation and calls. Chosen to be greater than existential // deposit. @@ -49,8 +49,9 @@ pub extern "C" fn call() { // Fail to deploy the contract since it returns a non-zero exit status. let res = api::instantiate( - u64::MAX, // How much ref_time weight to devote for the execution. u64::MAX = use all. - u64::MAX, // How much proof_size weight to devote for the execution. u64::MAX = use all. + u64::MAX, /* How much ref_time weight to devote for the execution. u64::MAX = use + * all. */ + u64::MAX, // How much proof_size weight to devote for the execution. u64::MAX = use all. &[u8::MAX; 32], // No deposit limit. &value, &reverted_input_deploy, @@ -62,8 +63,9 @@ pub extern "C" fn call() { // Fail to deploy the contract due to insufficient ref_time weight. let res = api::instantiate( - 1u64, // too little ref_time weight - u64::MAX, // How much proof_size weight to devote for the execution. u64::MAX = use all. + 1u64, // too little ref_time weight + u64::MAX, /* How much proof_size weight to devote for the execution. u64::MAX = + * use all. */ &[u8::MAX; 32], // No deposit limit. &value, &input_deploy, @@ -75,7 +77,8 @@ pub extern "C" fn call() { // Fail to deploy the contract due to insufficient proof_size weight. let res = api::instantiate( - u64::MAX, // How much ref_time weight to devote for the execution. u64::MAX = use all. + u64::MAX, /* How much ref_time weight to devote for the execution. u64::MAX = use + * all. */ 1u64, // Too little proof_size weight &[u8::MAX; 32], // No deposit limit. &value, @@ -90,8 +93,9 @@ pub extern "C" fn call() { let mut callee = [0u8; 20]; api::instantiate( - u64::MAX, // How much ref_time weight to devote for the execution. u64::MAX = use all. - u64::MAX, // How much proof_size weight to devote for the execution. u64::MAX = use all. + u64::MAX, /* How much ref_time weight to devote for the execution. u64::MAX = use + * all. */ + u64::MAX, // How much proof_size weight to devote for the execution. u64::MAX = use all. &[u8::MAX; 32], // No deposit limit. &value, &input_deploy, @@ -118,8 +122,9 @@ pub extern "C" fn call() { let res = api::call( uapi::CallFlags::empty(), &callee, - 1u64, // Too little ref_time weight. - u64::MAX, // How much proof_size weight to devote for the execution. u64::MAX = use all. + load_code_ref_time, // Too little ref_time weight. + u64::MAX, /* How much proof_size weight to devote for the execution. u64::MAX + * = use all. */ &[u8::MAX; 32], // No deposit limit. &value, &INPUT, diff --git a/substrate/frame/revive/fixtures/contracts/tracing.rs b/substrate/frame/revive/fixtures/contracts/tracing.rs index 9cbef3bbc8435..451769b87cefd 100644 --- a/substrate/frame/revive/fixtures/contracts/tracing.rs +++ b/substrate/frame/revive/fixtures/contracts/tracing.rs @@ -20,7 +20,7 @@ #![no_std] #![no_main] -use common::input; +use common::{input, u256_bytes}; use uapi::{HostFn, HostFnImpl as api}; #[no_mangle] @@ -32,6 +32,18 @@ pub extern "C" fn deploy() {} pub extern "C" fn call() { input!(calls_left: u32, callee_addr: &[u8; 20],); if calls_left == 0 { + // transfer some value to BOB + let _ = api::call( + uapi::CallFlags::empty(), + &[2u8; 20], + u64::MAX, // How much ref_time to devote for the execution. u64::MAX = use all. + u64::MAX, /* How much proof_size to devote for the execution. u64::MAX = use + * all. */ + &[u8::MAX; 32], // No deposit limit. + &u256_bytes(100), // Value transferred + &[], + None, + ); return } diff --git a/substrate/frame/revive/rpc/.sqlx/query-2fcbf357b3993c0065141859e5ad8c11bd7800e3e6d22e8383ab9ac8bbec25b1.json b/substrate/frame/revive/rpc/.sqlx/query-2fcbf357b3993c0065141859e5ad8c11bd7800e3e6d22e8383ab9ac8bbec25b1.json new file mode 100644 index 0000000000000..07e69b7d8f10b --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-2fcbf357b3993c0065141859e5ad8c11bd7800e3e6d22e8383ab9ac8bbec25b1.json @@ -0,0 +1,26 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t SELECT transaction_index, transaction_hash\n\t\t FROM transaction_hashes\n\t\t WHERE block_hash = $1\n\t\t ", + "describe": { + "columns": [ + { + "name": "transaction_index", + "ordinal": 0, + "type_info": "Integer" + }, + { + "name": "transaction_hash", + "ordinal": 1, + "type_info": "Blob" + } + ], + "parameters": { + "Right": 1 + }, + "nullable": [ + false, + false + ] + }, + "hash": "2fcbf357b3993c0065141859e5ad8c11bd7800e3e6d22e8383ab9ac8bbec25b1" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-6345c84da6afad02d0fdf4e1657c53e64320c118d39db73f573510235baf4ba0.json b/substrate/frame/revive/rpc/.sqlx/query-6345c84da6afad02d0fdf4e1657c53e64320c118d39db73f573510235baf4ba0.json new file mode 100644 index 0000000000000..498b125292037 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-6345c84da6afad02d0fdf4e1657c53e64320c118d39db73f573510235baf4ba0.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n DELETE FROM logs\n WHERE block_hash = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "6345c84da6afad02d0fdf4e1657c53e64320c118d39db73f573510235baf4ba0" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-76dd0f2460cfc0ffa93dda7a42893cbf05b3451cb8e4c4cb6cf86ec70930a11e.json b/substrate/frame/revive/rpc/.sqlx/query-76dd0f2460cfc0ffa93dda7a42893cbf05b3451cb8e4c4cb6cf86ec70930a11e.json new file mode 100644 index 0000000000000..e8c40966e8395 --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-76dd0f2460cfc0ffa93dda7a42893cbf05b3451cb8e4c4cb6cf86ec70930a11e.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n\t\t\t\t\tINSERT OR REPLACE INTO logs(\n\t\t\t\t\t\tblock_hash,\n\t\t\t\t\t\ttransaction_index,\n\t\t\t\t\t\tlog_index,\n\t\t\t\t\t\taddress,\n\t\t\t\t\t\tblock_number,\n\t\t\t\t\t\ttransaction_hash,\n\t\t\t\t\t\ttopic_0, topic_1, topic_2, topic_3,\n\t\t\t\t\t\tdata)\n\t\t\t\t\tVALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)\n\t\t\t\t\t", + "describe": { + "columns": [], + "parameters": { + "Right": 11 + }, + "nullable": [] + }, + "hash": "76dd0f2460cfc0ffa93dda7a42893cbf05b3451cb8e4c4cb6cf86ec70930a11e" +} diff --git a/substrate/frame/revive/rpc/.sqlx/query-d7377b5a09f075668d259d02e3fc7a12048a70a33b96381118d6c24210afce34.json b/substrate/frame/revive/rpc/.sqlx/query-d7377b5a09f075668d259d02e3fc7a12048a70a33b96381118d6c24210afce34.json new file mode 100644 index 0000000000000..e4979066814af --- /dev/null +++ b/substrate/frame/revive/rpc/.sqlx/query-d7377b5a09f075668d259d02e3fc7a12048a70a33b96381118d6c24210afce34.json @@ -0,0 +1,12 @@ +{ + "db_name": "SQLite", + "query": "\n DELETE FROM transaction_hashes\n WHERE block_hash = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Right": 1 + }, + "nullable": [] + }, + "hash": "d7377b5a09f075668d259d02e3fc7a12048a70a33b96381118d6c24210afce34" +} diff --git a/substrate/frame/revive/rpc/Cargo.toml b/substrate/frame/revive/rpc/Cargo.toml index c333c9816e579..b207a6041b9b6 100644 --- a/substrate/frame/revive/rpc/Cargo.toml +++ b/substrate/frame/revive/rpc/Cargo.toml @@ -41,7 +41,6 @@ path = "examples/rust/remark-extrinsic.rs" anyhow = { workspace = true } clap = { workspace = true, features = ["derive", "env"] } codec = { workspace = true, features = ["derive"] } -ethabi = { version = "18.0.0" } futures = { workspace = true, features = ["thread-pool"] } hex = { workspace = true } jsonrpsee = { workspace = true, features = ["full"] } @@ -57,6 +56,7 @@ sc-service = { workspace = true, default-features = true } sp-arithmetic = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-crypto-hashing = { workspace = true } +sp-runtime = { workspace = true, default-features = true } sp-weights = { workspace = true, default-features = true } sqlx = { version = "0.8.2", features = ["macros", "runtime-tokio", "sqlite"] } subxt = { workspace = true, default-features = true, features = [ @@ -70,6 +70,8 @@ tokio = { workspace = true, features = ["full"] } [dev-dependencies] env_logger = { workspace = true } +ethabi = { version = "18.0.0" } +pretty_assertions = { workspace = true } static_init = { workspace = true } substrate-cli-test-utils = { workspace = true } subxt-signer = { workspace = true, features = ["unstable-eth"] } diff --git a/substrate/frame/revive/rpc/examples/js/.solhint.json b/substrate/frame/revive/rpc/examples/js/.solhint.json new file mode 100644 index 0000000000000..83a795a1f4eef --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/.solhint.json @@ -0,0 +1,9 @@ +{ + "extends": "solhint:recommended", + "rules": { + "compiler-version": ["error", "^0.8.0"], + "gas-custom-errors": "off", + "one-contract-per-file": "off", + "no-empty-blocks": "off" + } +} diff --git a/substrate/frame/revive/rpc/examples/js/bun.lockb b/substrate/frame/revive/rpc/examples/js/bun.lockb index 39a1d0906b70e..3b8ad3d048b07 100755 Binary files a/substrate/frame/revive/rpc/examples/js/bun.lockb and b/substrate/frame/revive/rpc/examples/js/bun.lockb differ diff --git a/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json b/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json deleted file mode 100644 index 2614a969da39b..0000000000000 --- a/substrate/frame/revive/rpc/examples/js/contracts/.solhint.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "extends": "solhint:recommended" -} diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol b/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol index 51aaafcae4288..93b09f08c2723 100644 --- a/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol +++ b/substrate/frame/revive/rpc/examples/js/contracts/Flipper.sol @@ -20,7 +20,7 @@ contract FlipperCaller { address public flipperAddress; // Constructor to initialize Flipper's address - constructor(address _flipperAddress) { + constructor(address _flipperAddress) public { flipperAddress = _flipperAddress; } diff --git a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol index 0c8a4d26f4dc2..89be1b2589dd5 100644 --- a/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol +++ b/substrate/frame/revive/rpc/examples/js/contracts/PiggyBank.sol @@ -6,7 +6,7 @@ contract PiggyBank { uint256 private balance; address public owner; - constructor() { + constructor() public { owner = msg.sender; balance = 0; } @@ -21,7 +21,7 @@ contract PiggyBank { } function withdraw(uint256 withdrawAmount) public returns (uint256 remainingBal) { - require(msg.sender == owner); + require(msg.sender == owner, "You are not the owner"); balance -= withdrawAmount; (bool success, ) = payable(msg.sender).call{value: withdrawAmount}(""); require(success, "Transfer failed"); diff --git a/substrate/frame/revive/rpc/examples/js/contracts/Tracing.sol b/substrate/frame/revive/rpc/examples/js/contracts/Tracing.sol new file mode 100644 index 0000000000000..c7867fc4a0536 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/contracts/Tracing.sol @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +contract TracingCaller { + event TraceEvent(uint256 value, string message); + address payable public callee; + + constructor(address payable _callee) public payable { + require(_callee != address(0), "Callee address cannot be zero"); + callee = _callee; + } + + function start(uint256 counter) external { + if (counter == 0) { + return; + } + + uint256 paymentAmount = 0.01 ether; + callee.transfer(paymentAmount); + + emit TraceEvent(counter, "before"); + TracingCallee(callee).consumeGas(counter); + emit TraceEvent(counter, "after"); + + try TracingCallee(callee).failingFunction{value: paymentAmount}() { + } catch { + } + + this.start(counter - 1); + } +} + +contract TracingCallee { + event CalleeCalled(uint256 counter); + + function consumeGas(uint256 counter) external { + // burn some gas + for (uint256 i = 0; i < 10; i++) { + uint256(keccak256(abi.encodePacked(i))); + } + + emit CalleeCalled(counter); + } + + function failingFunction() external payable { + require(false, "This function always fails"); + } + + // Enable contract to receive Ether + receive() external payable {} +} + diff --git a/substrate/frame/revive/rpc/examples/js/package.json b/substrate/frame/revive/rpc/examples/js/package.json index e181461cf8615..5b225711dc552 100644 --- a/substrate/frame/revive/rpc/examples/js/package.json +++ b/substrate/frame/revive/rpc/examples/js/package.json @@ -7,12 +7,14 @@ "dev": "vite", "build": "tsc && vite build", "preview": "vite preview", - "prettier": "prettier --write ." + "prettier": "prettier --write .", + "solhint": "solhint \"contracts/**/*.sol\"" }, "dependencies": { "@parity/revive": "^0.0.9", "ethers": "^6.13.5", "solc": "^0.8.28", + "solhint": "^5.0.5", "viem": "^2.22.4" }, "devDependencies": { diff --git a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts index b162b8be0adfe..17f5dffd9973e 100644 --- a/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts +++ b/substrate/frame/revive/rpc/examples/js/src/build-contracts.ts @@ -1,4 +1,4 @@ -import { compile } from '@parity/revive' +import { compile, SolcOutput } from '@parity/revive' import { format } from 'prettier' import { parseArgs } from 'node:util' import solc from 'solc' @@ -8,7 +8,7 @@ import { basename, join } from 'path' type CompileInput = Parameters[0] const { - values: { filter }, + values: { filter, solcOnly }, } = parseArgs({ args: process.argv.slice(2), options: { @@ -16,6 +16,10 @@ const { type: 'string', short: 'f', }, + solcOnly: { + type: 'boolean', + short: 's', + }, }, }) @@ -54,32 +58,23 @@ for (const file of input) { [name]: { content: readFileSync(join(contractsDir, file), 'utf8') }, } - console.log('Compiling with revive...') - const reviveOut = await compile(input) - - for (const contracts of Object.values(reviveOut.contracts)) { - for (const [name, contract] of Object.entries(contracts)) { - console.log(`📜 Add PVM contract ${name}`) - const abi = contract.abi - const abiName = `${name}Abi` - writeFileSync(join(abiDir, `${name}.json`), JSON.stringify(abi, null, 2)) - - writeFileSync( - join(abiDir, `${name}.ts`), - await format(`export const ${abiName} = ${JSON.stringify(abi, null, 2)} as const`, { - parser: 'typescript', - }) - ) + if (!solcOnly) { + console.log('Compiling with revive...') + const reviveOut = await compile(input, { bin: 'resolc' }) - writeFileSync( - join(pvmDir, `${name}.polkavm`), - Buffer.from(contract.evm.bytecode.object, 'hex') - ) + for (const contracts of Object.values(reviveOut.contracts)) { + for (const [name, contract] of Object.entries(contracts)) { + console.log(`📜 Add PVM contract ${name}`) + writeFileSync( + join(pvmDir, `${name}.polkavm`), + Buffer.from(contract.evm.bytecode.object, 'hex') + ) + } } } console.log(`Compile with solc ${file}`) - const evmOut = JSON.parse(evmCompile(input)) as typeof reviveOut + const evmOut = JSON.parse(evmCompile(input)) as SolcOutput for (const contracts of Object.values(evmOut.contracts)) { for (const [name, contract] of Object.entries(contracts)) { @@ -88,6 +83,17 @@ for (const file of input) { join(evmDir, `${name}.bin`), Buffer.from(contract.evm.bytecode.object, 'hex') ) + + const abi = contract.abi + const abiName = `${name}Abi` + writeFileSync(join(abiDir, `${name}.json`), JSON.stringify(abi, null, 2)) + + writeFileSync( + join(abiDir, `${name}.ts`), + await format(`export const ${abiName} = ${JSON.stringify(abi, null, 2)} as const`, { + parser: 'typescript', + }) + ) } } } diff --git a/substrate/frame/revive/rpc/examples/js/src/fixtures/debug_traceCall.json b/substrate/frame/revive/rpc/examples/js/src/fixtures/debug_traceCall.json new file mode 100644 index 0000000000000..b017c97693ddc --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/fixtures/debug_traceCall.json @@ -0,0 +1,143 @@ +{ + "from": "0x0000000000000000000000000000000000000000", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000002", + "calls": [ + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xa329e8de0000000000000000000000000000000000000000000000000000000000000002", + "logs": [ + { + "address": "", + "topics": [ + "0xa07465e8ec189714f79f3786a8f616baf78ebd9cb1769bd61f18de45f2567360" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002", + "position": "0x0" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xd1b96663", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000", + "error": "execution reverted", + "revertReason": "This function always fails", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xa329e8de0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "", + "topics": [ + "0xa07465e8ec189714f79f3786a8f616baf78ebd9cb1769bd61f18de45f2567360" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001", + "position": "0x0" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xd1b96663", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000", + "error": "execution reverted", + "revertReason": "This function always fails", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "", + "topics": [ + "0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000066265666f72650000000000000000000000000000000000000000000000000000", + "position": "0x1" + }, + { + "address": "", + "topics": [ + "0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000056166746572000000000000000000000000000000000000000000000000000000", + "position": "0x2" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "", + "topics": ["0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549"], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000066265666f72650000000000000000000000000000000000000000000000000000", + "position": "0x1" + }, + { + "address": "", + "topics": ["0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549"], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000056166746572000000000000000000000000000000000000000000000000000000", + "position": "0x2" + } + ], + "value": "0x0", + "type": "CALL" +} diff --git a/substrate/frame/revive/rpc/examples/js/src/fixtures/trace_block.json b/substrate/frame/revive/rpc/examples/js/src/fixtures/trace_block.json new file mode 100644 index 0000000000000..952c1c5ce084e --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/fixtures/trace_block.json @@ -0,0 +1,152 @@ +[ + { + "txHash": "", + "result": { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000002", + "calls": [ + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xa329e8de0000000000000000000000000000000000000000000000000000000000000002", + "logs": [ + { + "address": "", + "topics": [ + "0xa07465e8ec189714f79f3786a8f616baf78ebd9cb1769bd61f18de45f2567360" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002", + "position": "0x0" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xd1b96663", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000", + "error": "execution reverted", + "revertReason": "This function always fails", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xa329e8de0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "", + "topics": [ + "0xa07465e8ec189714f79f3786a8f616baf78ebd9cb1769bd61f18de45f2567360" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001", + "position": "0x0" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xd1b96663", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000", + "error": "execution reverted", + "revertReason": "This function always fails", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "", + "topics": [ + "0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000066265666f72650000000000000000000000000000000000000000000000000000", + "position": "0x1" + }, + { + "address": "", + "topics": [ + "0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000056166746572000000000000000000000000000000000000000000000000000000", + "position": "0x2" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "", + "topics": [ + "0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000066265666f72650000000000000000000000000000000000000000000000000000", + "position": "0x1" + }, + { + "address": "", + "topics": [ + "0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000056166746572000000000000000000000000000000000000000000000000000000", + "position": "0x2" + } + ], + "value": "0x0", + "type": "CALL" + } + } +] diff --git a/substrate/frame/revive/rpc/examples/js/src/fixtures/trace_transaction.json b/substrate/frame/revive/rpc/examples/js/src/fixtures/trace_transaction.json new file mode 100644 index 0000000000000..9ef9c7c4dfca7 --- /dev/null +++ b/substrate/frame/revive/rpc/examples/js/src/fixtures/trace_transaction.json @@ -0,0 +1,143 @@ +{ + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000002", + "calls": [ + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xa329e8de0000000000000000000000000000000000000000000000000000000000000002", + "logs": [ + { + "address": "", + "topics": [ + "0xa07465e8ec189714f79f3786a8f616baf78ebd9cb1769bd61f18de45f2567360" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002", + "position": "0x0" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xd1b96663", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000", + "error": "execution reverted", + "revertReason": "This function always fails", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000001", + "calls": [ + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xa329e8de0000000000000000000000000000000000000000000000000000000000000001", + "logs": [ + { + "address": "", + "topics": [ + "0xa07465e8ec189714f79f3786a8f616baf78ebd9cb1769bd61f18de45f2567360" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001", + "position": "0x0" + } + ], + "value": "0x0", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0xd1b96663", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000", + "error": "execution reverted", + "revertReason": "This function always fails", + "value": "0x2386f26fc10000", + "type": "CALL" + }, + { + "from": "", + "gas": "0x42", + "gasUsed": "0x42", + "to": "", + "input": "0x95805dad0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "", + "topics": [ + "0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000066265666f72650000000000000000000000000000000000000000000000000000", + "position": "0x1" + }, + { + "address": "", + "topics": [ + "0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549" + ], + "data": "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000056166746572000000000000000000000000000000000000000000000000000000", + "position": "0x2" + } + ], + "value": "0x0", + "type": "CALL" + } + ], + "logs": [ + { + "address": "", + "topics": ["0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549"], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000066265666f72650000000000000000000000000000000000000000000000000000", + "position": "0x1" + }, + { + "address": "", + "topics": ["0x25d760f35a7a9cb2bffd2ea8756913655b3786c642f300a702e2934062763549"], + "data": "0x0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000056166746572000000000000000000000000000000000000000000000000000000", + "position": "0x2" + } + ], + "value": "0x0", + "type": "CALL" +} diff --git a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts index 51bbad3c17966..33e38fccd8a81 100644 --- a/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts +++ b/substrate/frame/revive/rpc/examples/js/src/geth-diff.test.ts @@ -5,17 +5,20 @@ import { killProcessOnPort, waitForHealth, polkadotSdkPath, + visit, } from './util.ts' import { afterAll, afterEach, describe, expect, test } from 'bun:test' -import { encodeFunctionData, Hex, parseEther, decodeEventLog, keccak256, toHex } from 'viem' +import { encodeFunctionData, Hex, parseEther, decodeEventLog } from 'viem' import { ErrorsAbi } from '../abi/Errors' import { EventExampleAbi } from '../abi/EventExample' +import { TracingCallerAbi } from '../abi/TracingCaller' +import { TracingCalleeAbi } from '../abi/TracingCallee' import { Subprocess, spawn } from 'bun' import { fail } from 'node:assert' const procs: Subprocess[] = [] if (process.env.START_GETH) { - process.env.USE_ETH_RPC = 'true' + process.env.USE_GETH = 'true' procs.push( // Run geth on port 8546 await (async () => { @@ -129,6 +132,41 @@ for (const env of envs) { } })() + const getTracingExampleAddrs = (() => { + let callerAddr: Hex = '0x' + let calleeAddr: Hex = '0x' + return async () => { + if (callerAddr !== '0x') { + return [callerAddr, calleeAddr] + } + calleeAddr = await (async () => { + const hash = await env.serverWallet.deployContract({ + abi: TracingCalleeAbi, + bytecode: getByteCode('TracingCallee', env.evm), + }) + const receipt = await env.serverWallet.waitForTransactionReceipt({ + hash, + }) + return receipt.contractAddress! + })() + + callerAddr = await (async () => { + const hash = await env.serverWallet.deployContract({ + abi: TracingCallerAbi, + args: [calleeAddr], + bytecode: getByteCode('TracingCaller', env.evm), + value: parseEther('10'), + }) + const receipt = await env.serverWallet.waitForTransactionReceipt({ + hash, + }) + return receipt.contractAddress! + })() + + return [callerAddr, calleeAddr] + } + })() + test('triggerAssertError', async () => { try { await env.accountWallet.readContract({ @@ -143,7 +181,10 @@ for (const env of envs) { expect(lastJsonRpcError?.data).toBe( '0x4e487b710000000000000000000000000000000000000000000000000000000000000001' ) - expect(lastJsonRpcError?.message).toBe('execution reverted: assert(false)') + expect(lastJsonRpcError?.message).toBeOneOf([ + 'execution reverted: assert(false)', + 'execution reverted: panic: assertion failed (0x01)', + ]) } }) @@ -158,7 +199,10 @@ for (const env of envs) { } catch (err) { const lastJsonRpcError = jsonRpcErrors.pop() expect(lastJsonRpcError?.code).toBe(3) - expect(lastJsonRpcError?.message).toBe('execution reverted: This is a revert error') + expect(lastJsonRpcError?.message).toBeOneOf([ + 'execution reverted: This is a revert error', + 'execution reverted: revert: This is a revert error', + ]) expect(lastJsonRpcError?.data).toBe( '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001654686973206973206120726576657274206572726f7200000000000000000000' ) @@ -179,9 +223,10 @@ for (const env of envs) { expect(lastJsonRpcError?.data).toBe( '0x4e487b710000000000000000000000000000000000000000000000000000000000000012' ) - expect(lastJsonRpcError?.message).toBe( - 'execution reverted: division or modulo by zero' - ) + expect(lastJsonRpcError?.message).toBeOneOf([ + 'execution reverted: division or modulo by zero', + 'execution reverted: panic: division or modulo by zero (0x12)', + ]) } }) @@ -199,9 +244,10 @@ for (const env of envs) { expect(lastJsonRpcError?.data).toBe( '0x4e487b710000000000000000000000000000000000000000000000000000000000000032' ) - expect(lastJsonRpcError?.message).toBe( - 'execution reverted: out-of-bounds access of an array or bytesN' - ) + expect(lastJsonRpcError?.message).toBeOneOf([ + 'execution reverted: out-of-bounds access of an array or bytesN', + 'execution reverted: panic: array out-of-bounds access (0x32)', + ]) } }) @@ -308,9 +354,10 @@ for (const env of envs) { } catch (err) { const lastJsonRpcError = jsonRpcErrors.pop() expect(lastJsonRpcError?.code).toBe(3) - expect(lastJsonRpcError?.message).toBe( - 'execution reverted: msg.value does not match value' - ) + expect(lastJsonRpcError?.message).toBeOneOf([ + 'execution reverted: msg.value does not match value', + 'execution reverted: revert: msg.value does not match value', + ]) expect(lastJsonRpcError?.data).toBe( '0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e6d73672e76616c756520646f6573206e6f74206d617463682076616c75650000' ) @@ -364,5 +411,121 @@ for (const env of envs) { ], }) }) + + test('logs', async () => { + let address = await getEventExampleAddr() + let { request } = await env.serverWallet.simulateContract({ + address, + abi: EventExampleAbi, + functionName: 'triggerEvent', + }) + + let hash = await env.serverWallet.writeContract(request) + let receipt = await env.serverWallet.waitForTransactionReceipt({ hash }) + const logs = await env.serverWallet.getLogs({ + address, + blockHash: receipt.blockHash, + }) + expect(logs).toHaveLength(1) + expect(logs[0]).toMatchObject({ + address, + data: '0x00000000000000000000000000000000000000000000000000000000000030390000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000b48656c6c6f20776f726c64000000000000000000000000000000000000000000', + transactionHash: hash, + }) + + expect( + decodeEventLog({ + abi: EventExampleAbi, + data: logs[0].data, + topics: logs[0].topics, + }) + ).toEqual({ + eventName: 'ExampleEvent', + args: { + sender: env.serverWallet.account.address, + value: 12345n, + message: 'Hello world', + }, + }) + }) + + test('tracing', async () => { + let [callerAddr, calleeAddr] = await getTracingExampleAddrs() + + const receipt = await (async () => { + const { request } = await env.serverWallet.simulateContract({ + address: callerAddr, + abi: TracingCallerAbi, + functionName: 'start', + args: [2n], + }) + const hash = await env.serverWallet.writeContract(request) + return await env.serverWallet.waitForTransactionReceipt({ hash }) + })() + + const visitor: Parameters[1] = (key, value) => { + switch (key) { + case 'address': + case 'from': + case 'to': { + if (value === callerAddr) { + return '' + } else if (value === calleeAddr) { + return '' + } else if (value == env.serverWallet.account.address.toLowerCase()) { + return '' + } + + return value + } + case 'revertReason': + return value.startsWith('revert: ') ? value.slice('revert: '.length) : value + + case 'gas': + case 'gasUsed': { + return '0x42' + } + case 'txHash': { + return '' + } + default: { + return value + } + } + } + + // test debug_traceTransaction + { + const fixture = await Bun.file('./src/fixtures/trace_transaction.json').json() + const res = await env.debugClient.traceTransaction(receipt.transactionHash, { + withLog: true, + }) + expect(visit(res, visitor)).toEqual(fixture) + } + + // test debug_traceBlock + { + const res = await env.debugClient.traceBlock(receipt.blockNumber, { withLog: true }) + const fixture = await Bun.file('./src/fixtures/trace_block.json').json() + expect(visit(res, visitor)).toEqual(fixture) + } + + // test debug_traceCall + { + const fixture = await Bun.file('./src/fixtures/debug_traceCall.json').json() + const res = await env.debugClient.traceCall( + { + to: callerAddr, + data: encodeFunctionData({ + abi: TracingCallerAbi, + functionName: 'start', + args: [2n], + }), + }, + { withLog: true } + ) + expect(visit(res, visitor)).toEqual(fixture) + } + }) }) } diff --git a/substrate/frame/revive/rpc/examples/js/src/spammer.ts b/substrate/frame/revive/rpc/examples/js/src/spammer.ts index 29bdf20d935c7..682bfcabb2698 100644 --- a/substrate/frame/revive/rpc/examples/js/src/spammer.ts +++ b/substrate/frame/revive/rpc/examples/js/src/spammer.ts @@ -4,62 +4,49 @@ import { getByteCode, killProcessOnPort, polkadotSdkPath, - timeout, wait, waitForHealth, } from './util' import { FlipperAbi } from '../abi/Flipper' -//Run the substate node -console.log('🚀 Start substrate-node...') -killProcessOnPort(9944) -spawn( - [ - './target/debug/substrate-node', - '--dev', - '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', - ], - { - stdout: Bun.file('/tmp/substrate-node.out.log'), - stderr: Bun.file('/tmp/substrate-node.err.log'), - cwd: polkadotSdkPath, - } -) - -// Run eth-indexer -console.log('🔍 Start indexer...') -spawn( - [ - './target/debug/eth-indexer', - '--node-rpc-url=ws://localhost:9944', - '-l=eth-rpc=debug', - '--database-url ${polkadotSdkPath}/substrate/frame/revive/rpc/tx_hashes.db', - ], - { - stdout: Bun.file('/tmp/eth-indexer.out.log'), - stderr: Bun.file('/tmp/eth-indexer.err.log'), - cwd: polkadotSdkPath, - } -) +if (process.env.START_SUBSTRATE_NODE) { + //Run the substate node + console.log('🚀 Start substrate-node...') + killProcessOnPort(9944) + spawn( + [ + './target/debug/substrate-node', + '--dev', + '-l=error,evm=debug,sc_rpc_server=info,runtime::revive=debug', + ], + { + stdout: Bun.file('/tmp/substrate-node.out.log'), + stderr: Bun.file('/tmp/substrate-node.err.log'), + cwd: polkadotSdkPath, + } + ) +} // Run eth-rpc on 8545 -console.log('💻 Start eth-rpc...') -killProcessOnPort(8545) -spawn( - [ - './target/debug/eth-rpc', - '--dev', - '--node-rpc-url=ws://localhost:9944', - '-l=rpc-metrics=debug,eth-rpc=debug', - ], - { - stdout: Bun.file('/tmp/eth-rpc.out.log'), - stderr: Bun.file('/tmp/eth-rpc.err.log'), - cwd: polkadotSdkPath, - } -) -await waitForHealth('http://localhost:8545').catch() +if (process.env.START_ETH_RPC) { + console.log('🚀 Start eth-rpc...') + killProcessOnPort(8545) + spawn( + [ + './target/debug/eth-rpc', + '--dev', + '--node-rpc-url=ws://localhost:9944', + '-l=rpc-metrics=debug,eth-rpc=debug', + ], + { + stdout: Bun.file('/tmp/eth-rpc.out.log'), + stderr: Bun.file('/tmp/eth-rpc.err.log'), + cwd: polkadotSdkPath, + } + ) +} +await waitForHealth('http://localhost:8545').catch() const env = await createEnv('eth-rpc') const wallet = env.accountWallet @@ -74,30 +61,32 @@ if (!deployReceipt.contractAddress) throw new Error('Contract address should be const flipperAddr = deployReceipt.contractAddress let nonce = await wallet.getTransactionCount(wallet.account) -let callCount = 0 -console.log('🔄 Starting nonce:', nonce) console.log('🔄 Starting loop...') +console.log('Starting nonce:', nonce) try { while (true) { - callCount++ - console.log(`🔄 Call flip (${callCount})...`) + console.log(`Call flip (nonce: ${nonce})...`) const { request } = await wallet.simulateContract({ account: wallet.account, address: flipperAddr, abi: FlipperAbi, functionName: 'flip', + nonce, }) - console.log(`🔄 Submit flip (call ${callCount}...`) - - await Promise.race([ - (async () => { - const hash = await wallet.writeContract(request) - await wallet.waitForTransactionReceipt({ hash }) - })(), - timeout(15_000), - ]) + const hash = await wallet.writeContract(request) + console.time(hash) + wallet.waitForTransactionReceipt({ hash }).then((receipt) => { + console.timeEnd(hash) + console.log('-----------------------------------') + console.log(`status: ${receipt.status ? '✅' : '❌'}`) + console.log(`block: ${receipt.blockNumber} - hash: ${receipt.blockHash}`) + console.log(`tx: ${hash}`) + console.log('-----------------------------------') + }) + await wait(1_000) + nonce++ } } catch (err) { console.error('Failed with error:', err) diff --git a/substrate/frame/revive/rpc/examples/js/src/transfer.ts b/substrate/frame/revive/rpc/examples/js/src/transfer.ts index aef9a487b0c01..711e4eb893796 100644 --- a/substrate/frame/revive/rpc/examples/js/src/transfer.ts +++ b/substrate/frame/revive/rpc/examples/js/src/transfer.ts @@ -6,10 +6,11 @@ try { console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) - await walletClient.sendTransaction({ + let resp = await walletClient.sendTransaction({ to: recipient, value: parseEther('1.0'), }) + console.log(`Transaction hash: ${resp}`) console.log(`Sent: ${parseEther('1.0')}`) console.log(`Signer balance: ${await walletClient.getBalance(walletClient.account)}`) console.log(`Recipient balance: ${await walletClient.getBalance({ address: recipient })}`) diff --git a/substrate/frame/revive/rpc/examples/js/src/util.ts b/substrate/frame/revive/rpc/examples/js/src/util.ts index 3a488da67d801..cf5c08d256106 100644 --- a/substrate/frame/revive/rpc/examples/js/src/util.ts +++ b/substrate/frame/revive/rpc/examples/js/src/util.ts @@ -1,7 +1,17 @@ import { spawnSync } from 'bun' import { resolve } from 'path' import { readFileSync } from 'fs' -import { createWalletClient, defineChain, Hex, http, publicActions } from 'viem' +import { + CallParameters, + createClient, + createWalletClient, + defineChain, + formatTransactionRequest, + type Hex, + hexToNumber, + http, + publicActions, +} from 'viem' import { privateKeyToAccount, nonceManager } from 'viem/accounts' export function getByteCode(name: string, evm: boolean = false): Hex { @@ -39,8 +49,21 @@ export async function createEnv(name: 'geth' | 'eth-rpc') { const gethPort = process.env.GETH_PORT || '8546' const ethRpcPort = process.env.ETH_RPC_PORT || '8545' const url = `http://localhost:${name == 'geth' ? gethPort : ethRpcPort}` + + let id = await (async () => { + const resp = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ jsonrpc: '2.0', method: 'eth_chainId', id: 1 }), + }) + let { result } = await resp.json() + return hexToNumber(result) + })() + const chain = defineChain({ - id: name == 'geth' ? 1337 : 420420420, + id, name, nativeCurrency: { name: 'Westie', @@ -94,9 +117,43 @@ export async function createEnv(name: 'geth' | 'eth-rpc') { chain, }).extend(publicActions) - return { serverWallet, emptyWallet, accountWallet, evm: name == 'geth' } + const debugClient = createClient({ + chain, + transport, + }).extend((client) => ({ + async traceTransaction(txHash: Hex, tracerConfig: { withLog: boolean }) { + return client.request({ + method: 'debug_traceTransaction' as any, + params: [txHash, { tracer: 'callTracer', tracerConfig } as any], + }) + }, + async traceBlock(blockNumber: bigint, tracerConfig: { withLog: boolean }) { + return client.request({ + method: 'debug_traceBlockByNumber' as any, + params: [ + `0x${blockNumber.toString(16)}`, + { tracer: 'callTracer', tracerConfig } as any, + ], + }) + }, + + async traceCall(args: CallParameters, tracerConfig: { withLog: boolean }) { + return client.request({ + method: 'debug_traceCall' as any, + params: [ + formatTransactionRequest(args), + 'latest', + { tracer: 'callTracer', tracerConfig } as any, + ], + }) + }, + })) + + return { debugClient, emptyWallet, serverWallet, accountWallet, evm: name == 'geth' } } +export type Env = Awaited> + export function wait(ms: number) { return new Promise((resolve) => setTimeout(resolve, ms)) } @@ -140,3 +197,16 @@ export function waitForHealth(url: string) { }, 1000) }) } + +export function visit(obj: any, callback: (key: string, value: any) => any): any { + if (Array.isArray(obj)) { + return obj.map((item) => visit(item, callback)) + } else if (typeof obj === 'object' && obj !== null) { + return Object.keys(obj).reduce((acc, key) => { + acc[key] = visit(callback(key, obj[key]), callback) + return acc + }, {} as any) + } else { + return obj + } +} diff --git a/substrate/frame/revive/rpc/migrations/0002_create_log_table.sql b/substrate/frame/revive/rpc/migrations/0002_create_log_table.sql new file mode 100644 index 0000000000000..4b012e17a38c5 --- /dev/null +++ b/substrate/frame/revive/rpc/migrations/0002_create_log_table.sql @@ -0,0 +1,28 @@ +CREATE TABLE IF NOT EXISTS logs ( + block_hash BLOB NOT NULL, + transaction_index INTEGER NOT NULL, + log_index INTEGER NOT NULL, + address BLOB NOT NULL, + block_number INTEGER NOT NULL, + transaction_hash BLOB NOT NULL, + topic_0 BLOB, + topic_1 BLOB, + topic_2 BLOB, + topic_3 BLOB, + data BLOB, + PRIMARY KEY (block_hash, transaction_index, log_index) +); + +CREATE INDEX IF NOT EXISTS idx_block_number_address_topics ON logs ( + block_number, + address, + topic_0, + topic_1, + topic_2, + topic_3 +); + +CREATE INDEX IF NOT EXISTS idx_block_hash ON logs ( + block_hash +); + diff --git a/substrate/frame/revive/rpc/revive_chain.metadata b/substrate/frame/revive/rpc/revive_chain.metadata index 89476924cf007..80f1000125a17 100644 Binary files a/substrate/frame/revive/rpc/revive_chain.metadata and b/substrate/frame/revive/rpc/revive_chain.metadata differ diff --git a/substrate/frame/revive/rpc/src/apis.rs b/substrate/frame/revive/rpc/src/apis.rs new file mode 100644 index 0000000000000..ae85246c55b39 --- /dev/null +++ b/substrate/frame/revive/rpc/src/apis.rs @@ -0,0 +1,24 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +mod debug_apis; +pub use debug_apis::*; + +mod execution_apis; +pub use execution_apis::*; + +mod health_api; +pub use health_api::*; diff --git a/substrate/frame/revive/rpc/src/apis/debug_apis.rs b/substrate/frame/revive/rpc/src/apis/debug_apis.rs new file mode 100644 index 0000000000000..2db05d2f75ee6 --- /dev/null +++ b/substrate/frame/revive/rpc/src/apis/debug_apis.rs @@ -0,0 +1,102 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +use crate::*; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + +/// Debug Ethererum JSON-RPC apis. +#[rpc(server, client)] +pub trait DebugRpc { + /// Returns the tracing of the execution of a specific block using its number. + /// + /// ## References + /// + /// - er + #[method(name = "debug_traceBlockByNumber")] + async fn trace_block_by_number( + &self, + block: BlockNumberOrTag, + tracer_config: TracerConfig, + ) -> RpcResult>; + + /// Returns a transaction's traces by replaying it. + /// + /// ## References + /// + /// - + #[method(name = "debug_traceTransaction")] + async fn trace_transaction( + &self, + transaction_hash: H256, + tracer_config: TracerConfig, + ) -> RpcResult; + + /// Dry run a call and returns the transaction's traces. + /// + /// ## References + /// + /// - + #[method(name = "debug_traceCall")] + async fn trace_call( + &self, + transaction: GenericTransaction, + block: BlockNumberOrTag, + tracer_config: TracerConfig, + ) -> RpcResult; +} + +pub struct DebugRpcServerImpl { + client: client::Client, +} + +impl DebugRpcServerImpl { + pub fn new(client: client::Client) -> Self { + Self { client } + } +} + +#[async_trait] +impl DebugRpcServer for DebugRpcServerImpl { + async fn trace_block_by_number( + &self, + block: BlockNumberOrTag, + tracer_config: TracerConfig, + ) -> RpcResult> { + log::debug!(target: crate::LOG_TARGET, "trace_block_by_number: {block:?} config: {tracer_config:?}"); + let traces = self.client.trace_block_by_number(block, tracer_config).await?; + Ok(traces) + } + + async fn trace_transaction( + &self, + transaction_hash: H256, + tracer_config: TracerConfig, + ) -> RpcResult { + let trace = self.client.trace_transaction(transaction_hash, tracer_config).await?; + Ok(trace) + } + + async fn trace_call( + &self, + transaction: GenericTransaction, + block: BlockNumberOrTag, + tracer_config: TracerConfig, + ) -> RpcResult { + log::debug!(target: crate::LOG_TARGET, "trace_call: {transaction:?} block: {block:?} config: {tracer_config:?}"); + let trace = self.client.trace_call(transaction, block, tracer_config).await?; + Ok(trace) + } +} diff --git a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs b/substrate/frame/revive/rpc/src/apis/execution_apis.rs similarity index 96% rename from substrate/frame/revive/rpc/src/rpc_methods_gen.rs rename to substrate/frame/revive/rpc/src/apis/execution_apis.rs index da60360d9e61b..f55209fce5856 100644 --- a/substrate/frame/revive/rpc/src/rpc_methods_gen.rs +++ b/substrate/frame/revive/rpc/src/apis/execution_apis.rs @@ -18,7 +18,7 @@ //! Generated JSON-RPC methods. #![allow(missing_docs)] -use super::*; +use crate::*; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; #[rpc(server, client)] @@ -95,6 +95,10 @@ pub trait EthRpc { #[method(name = "eth_getCode")] async fn get_code(&self, address: Address, block: BlockNumberOrTagOrHash) -> RpcResult; + /// Returns an array of all logs matching filter with given id. + #[method(name = "eth_getLogs")] + async fn get_logs(&self, filter: Option) -> RpcResult; + /// Returns the value from a storage position at a given address. #[method(name = "eth_getStorageAt")] async fn get_storage_at( diff --git a/substrate/frame/revive/rpc/src/rpc_health.rs b/substrate/frame/revive/rpc/src/apis/health_api.rs similarity index 99% rename from substrate/frame/revive/rpc/src/rpc_health.rs rename to substrate/frame/revive/rpc/src/apis/health_api.rs index 35c5a588f284d..076d2fb4800a8 100644 --- a/substrate/frame/revive/rpc/src/rpc_health.rs +++ b/substrate/frame/revive/rpc/src/apis/health_api.rs @@ -16,7 +16,7 @@ // limitations under the License. //! Heatlh JSON-RPC methods. -use super::*; +use crate::*; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sc_rpc_api::system::helpers::Health; diff --git a/substrate/frame/revive/rpc/src/block_info_provider.rs b/substrate/frame/revive/rpc/src/block_info_provider.rs index 675a83ed6558b..a8b54907ca37c 100644 --- a/substrate/frame/revive/rpc/src/block_info_provider.rs +++ b/substrate/frame/revive/rpc/src/block_info_provider.rs @@ -38,6 +38,11 @@ pub trait BlockInfoProvider: Send + Sync { /// Return the latest ingested block. async fn latest_block(&self) -> Option>; + /// Return the latest block number + async fn latest_block_number(&self) -> Option { + return self.latest_block().await.map(|block| block.number()); + } + /// Get block by block_number. async fn block_by_number( &self, @@ -225,6 +230,10 @@ pub mod test { None } + async fn latest_block_number(&self) -> Option { + Some(2u32) + } + async fn block_by_number( &self, _block_number: SubstrateBlockNumber, diff --git a/substrate/frame/revive/rpc/src/cli.rs b/substrate/frame/revive/rpc/src/cli.rs index e40f3b1d053ce..5844d36a87fff 100644 --- a/substrate/frame/revive/rpc/src/cli.rs +++ b/substrate/frame/revive/rpc/src/cli.rs @@ -18,8 +18,8 @@ use crate::{ client::{connect, native_to_eth_ratio, Client, SubscriptionType, SubstrateBlockNumber}, BlockInfoProvider, BlockInfoProviderImpl, CacheReceiptProvider, DBReceiptProvider, - EthRpcServer, EthRpcServerImpl, ReceiptExtractor, ReceiptProvider, SystemHealthRpcServer, - SystemHealthRpcServerImpl, LOG_TARGET, + DebugRpcServer, DebugRpcServerImpl, EthRpcServer, EthRpcServerImpl, ReceiptExtractor, + ReceiptProvider, SystemHealthRpcServer, SystemHealthRpcServerImpl, LOG_TARGET, }; use clap::Parser; use futures::{pin_mut, FutureExt}; @@ -37,6 +37,8 @@ const DEFAULT_PROMETHEUS_PORT: u16 = 9616; // Default port if --rpc-port is not specified const DEFAULT_RPC_PORT: u16 = 8545; +const IN_MEMORY_DB: &str = "sqlite::memory:"; + // Parsed command instructions from the command line #[derive(Parser, Debug)] #[clap(author, about, version)] @@ -52,8 +54,8 @@ pub struct CliCommand { /// The database used to store Ethereum transaction hashes. /// This is only useful if the node needs to act as an archive node and respond to Ethereum RPC /// queries for transactions that are not in the in memory cache. - #[clap(long, env = "DATABASE_URL")] - pub database_url: Option, + #[clap(long, env = "DATABASE_URL", default_value = IN_MEMORY_DB)] + pub database_url: String, /// If not provided, only new blocks will be indexed #[clap(long)] @@ -97,7 +99,7 @@ fn build_client( tokio_handle: &tokio::runtime::Handle, cache_size: usize, node_rpc_url: &str, - database_url: Option<&str>, + database_url: &str, abort_signal: Signals, ) -> anyhow::Result { let fut = async { @@ -105,22 +107,22 @@ fn build_client( let block_provider: Arc = Arc::new(BlockInfoProviderImpl::new(cache_size, api.clone(), rpc.clone())); + let prune_old_blocks = database_url == IN_MEMORY_DB; + if prune_old_blocks { + log::info!( target: LOG_TARGET, "Using in-memory database, keeping only {cache_size} blocks in memory"); + } + let receipt_extractor = ReceiptExtractor::new(native_to_eth_ratio(&api).await?); - let receipt_provider: Arc = if let Some(database_url) = database_url { - log::info!(target: LOG_TARGET, "🔗 Connecting to provided database"); - Arc::new(( - CacheReceiptProvider::default(), - DBReceiptProvider::new( - database_url, - block_provider.clone(), - receipt_extractor.clone(), - ) - .await?, - )) - } else { - log::info!(target: LOG_TARGET, "🔌 No database provided, using in-memory cache"); - Arc::new(CacheReceiptProvider::default()) - }; + let receipt_provider: Arc = Arc::new(( + CacheReceiptProvider::default(), + DBReceiptProvider::new( + database_url, + block_provider.clone(), + receipt_extractor.clone(), + prune_old_blocks, + ) + .await?, + )); let client = Client::new(api, rpc_client, rpc, block_provider, receipt_provider, receipt_extractor) @@ -187,7 +189,7 @@ pub fn run(cmd: CliCommand) -> anyhow::Result<()> { tokio_handle, cache_size, &node_rpc_url, - database_url.as_deref(), + &database_url, tokio_runtime.block_on(async { Signals::capture() })?, )?; @@ -232,10 +234,12 @@ fn rpc_module(is_dev: bool, client: Client) -> Result, sc_service: .with_accounts(if is_dev { vec![crate::Account::default()] } else { vec![] }) .into_rpc(); - let health_api = SystemHealthRpcServerImpl::new(client).into_rpc(); + let health_api = SystemHealthRpcServerImpl::new(client.clone()).into_rpc(); + let debug_api = DebugRpcServerImpl::new(client).into_rpc(); let mut module = RpcModule::new(()); module.merge(eth_api).map_err(|e| sc_service::Error::Application(e.into()))?; module.merge(health_api).map_err(|e| sc_service::Error::Application(e.into()))?; + module.merge(debug_api).map_err(|e| sc_service::Error::Application(e.into()))?; Ok(module) } diff --git a/substrate/frame/revive/rpc/src/client.rs b/substrate/frame/revive/rpc/src/client.rs index e538259ca8b74..9431057353763 100644 --- a/substrate/frame/revive/rpc/src/client.rs +++ b/substrate/frame/revive/rpc/src/client.rs @@ -22,15 +22,17 @@ use crate::{ }, BlockInfoProvider, ReceiptExtractor, ReceiptProvider, TransactionInfo, LOG_TARGET, }; +use codec::{Decode, Encode}; use jsonrpsee::types::{error::CALL_EXECUTION_FAILED_CODE, ErrorObjectOwned}; use pallet_revive::{ evm::{ - extract_revert_message, Block, BlockNumberOrTag, BlockNumberOrTagOrHash, - GenericTransaction, ReceiptInfo, SyncingProgress, SyncingStatus, TransactionSigned, H160, - H256, U256, + decode_revert_reason, Block, BlockNumberOrTag, BlockNumberOrTagOrHash, CallTrace, Filter, + GenericTransaction, Log, ReceiptInfo, SyncingProgress, SyncingStatus, TracerConfig, + TransactionSigned, TransactionTrace, H160, H256, U256, }, EthTransactError, EthTransactInfo, }; +use sp_runtime::OpaqueExtrinsic; use sp_weights::Weight; use std::{ops::ControlFlow, sync::Arc, time::Duration}; use subxt::{ @@ -108,9 +110,9 @@ pub enum ClientError { /// A [`codec::Error`] wrapper error. #[error(transparent)] CodecError(#[from] codec::Error), - /// Contract reverted + /// Transcact call failed. #[error("contract reverted")] - Reverted(EthTransactError), + TransactError(EthTransactError), /// A decimal conversion failed. #[error("conversion failed")] ConversionFailed, @@ -132,6 +134,9 @@ pub enum ClientError { /// The cache is empty. #[error("cache is empty")] CacheEmpty, + /// Failed to filter logs. + #[error("Failed to filter logs")] + LogFilterFailed(#[from] anyhow::Error), } const REVERT_CODE: i32 = 3; @@ -148,12 +153,16 @@ impl From for ErrorObjectOwned { None, ) }, - ClientError::Reverted(EthTransactError::Data(data)) => { - let msg = extract_revert_message(&data).unwrap_or_default(); + ClientError::TransactError(EthTransactError::Data(data)) => { + let msg = match decode_revert_reason(&data) { + Some(reason) => format!("execution reverted: {reason}"), + None => "execution reverted".to_string(), + }; + let data = format!("0x{}", hex::encode(data)); ErrorObjectOwned::owned::(REVERT_CODE, msg, Some(data)) }, - ClientError::Reverted(EthTransactError::Message(msg)) => + ClientError::TransactError(EthTransactError::Message(msg)) => ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, msg, None), _ => ErrorObjectOwned::owned::(CALL_EXECUTION_FAILED_CODE, err.to_string(), None), @@ -567,7 +576,7 @@ impl Client { match result { Err(err) => { log::debug!(target: LOG_TARGET, "Dry run failed {err:?}"); - Err(ClientError::Reverted(err.0)) + Err(ClientError::TransactError(err.0)) }, Ok(result) => Ok(result.0), } @@ -642,7 +651,136 @@ impl Client { let gas_price = runtime_api.call(payload).await?; Ok(*gas_price) } + /// Get the transaction traces for the given block. + pub async fn trace_block_by_number( + &self, + block: BlockNumberOrTag, + tracer_config: TracerConfig, + ) -> Result, ClientError> { + let block_hash = match block { + BlockNumberOrTag::U256(n) => { + let block_number: SubstrateBlockNumber = + n.try_into().map_err(|_| ClientError::ConversionFailed)?; + self.get_block_hash(block_number).await? + }, + BlockNumberOrTag::BlockTag(_) => self.latest_block().await.map(|b| b.hash()), + } + .ok_or(ClientError::BlockNotFound)?; + + let block = self + .rpc + .chain_get_block(Some(block_hash)) + .await? + .ok_or(ClientError::BlockNotFound)?; + + let header = block.block.header; + let parent_hash = header.parent_hash; + let exts = block + .block + .extrinsics + .into_iter() + .filter_map(|e| OpaqueExtrinsic::decode(&mut &e[..]).ok()) + .collect::>(); + + let params = ((header, exts), tracer_config).encode(); + + let bytes = self + .rpc + .state_call("ReviveApi_trace_block", Some(¶ms), Some(parent_hash)) + .await + .inspect_err(|err| { + log::error!(target: LOG_TARGET, "state_call failed with: {err:?}"); + })?; + + let traces = Vec::<(u32, CallTrace)>::decode(&mut &bytes[..])?; + + let mut hashes = self + .receipt_provider + .block_transaction_hashes(&block_hash) + .await + .ok_or(ClientError::EthExtrinsicNotFound)?; + + let traces = traces + .into_iter() + .filter_map(|(index, trace)| { + Some(TransactionTrace { tx_hash: hashes.remove(&(index as usize))?, trace }) + }) + .collect(); + + Ok(traces) + } + + /// Get the transaction traces for the given transaction. + pub async fn trace_transaction( + &self, + transaction_hash: H256, + tracer_config: TracerConfig, + ) -> Result { + let ReceiptInfo { block_hash, transaction_index, .. } = self + .receipt_provider + .receipt_by_hash(&transaction_hash) + .await + .ok_or(ClientError::EthExtrinsicNotFound)?; + + log::debug!(target: LOG_TARGET, "Found eth_tx at {block_hash:?} index: + {transaction_index:?}"); + + let block = self + .rpc + .chain_get_block(Some(block_hash)) + .await? + .ok_or(ClientError::BlockNotFound)?; + + let header = block.block.header; + let parent_hash = header.parent_hash; + let exts = block + .block + .extrinsics + .into_iter() + .filter_map(|e| OpaqueExtrinsic::decode(&mut &e[..]).ok()) + .collect::>(); + + let params = ((header, exts), transaction_index.as_u32(), tracer_config).encode(); + let bytes = self + .rpc + .state_call("ReviveApi_trace_tx", Some(¶ms), Some(parent_hash)) + .await + .inspect_err(|err| { + log::error!(target: LOG_TARGET, "state_call failed with: {err:?}"); + })?; + + let trace = Option::::decode(&mut &bytes[..])?; + trace.ok_or(ClientError::EthExtrinsicNotFound) + } + + /// Get the transaction traces for the given block. + pub async fn trace_call( + &self, + transaction: GenericTransaction, + block: BlockNumberOrTag, + tracer_config: TracerConfig, + ) -> Result { + let block_hash = match block { + BlockNumberOrTag::U256(n) => { + let block_number: SubstrateBlockNumber = + n.try_into().map_err(|_| ClientError::ConversionFailed)?; + self.get_block_hash(block_number).await? + }, + BlockNumberOrTag::BlockTag(_) => self.latest_block().await.map(|b| b.hash()), + }; + + let params = (transaction, tracer_config).encode(); + let bytes = self + .rpc + .state_call("ReviveApi_trace_call", Some(¶ms), block_hash) + .await + .inspect_err(|err| { + log::error!(target: LOG_TARGET, "state_call failed with: {err:?}"); + })?; + Result::::decode(&mut &bytes[..])? + .map_err(ClientError::TransactError) + } /// Get the EVM block for the given hash. pub async fn evm_block( &self, @@ -712,4 +850,11 @@ impl Client { pub fn max_block_weight(&self) -> Weight { self.max_block_weight } + + /// Get the logs matching the given filter. + pub async fn logs(&self, filter: Option) -> Result, ClientError> { + let logs = + self.receipt_provider.logs(filter).await.map_err(ClientError::LogFilterFailed)?; + Ok(logs) + } } diff --git a/substrate/frame/revive/rpc/src/lib.rs b/substrate/frame/revive/rpc/src/lib.rs index 536678a97ac63..8d6797722d4f2 100644 --- a/substrate/frame/revive/rpc/src/lib.rs +++ b/substrate/frame/revive/rpc/src/lib.rs @@ -44,11 +44,8 @@ pub use receipt_provider::*; mod receipt_extractor; pub use receipt_extractor::*; -mod rpc_health; -pub use rpc_health::*; - -mod rpc_methods_gen; -pub use rpc_methods_gen::*; +mod apis; +pub use apis::*; pub const LOG_TARGET: &str = "eth-rpc"; @@ -287,6 +284,11 @@ impl EthRpcServer for EthRpcServerImpl { Ok(self.client.receipts_count_per_block(&block.hash).await.map(U256::from)) } + async fn get_logs(&self, filter: Option) -> RpcResult { + let logs = self.client.logs(filter).await?; + Ok(FilterResults::Logs(logs)) + } + async fn get_storage_at( &self, address: H160, diff --git a/substrate/frame/revive/rpc/src/receipt_provider.rs b/substrate/frame/revive/rpc/src/receipt_provider.rs index bbed54a94b7dc..fe8a3e9fb04fa 100644 --- a/substrate/frame/revive/rpc/src/receipt_provider.rs +++ b/substrate/frame/revive/rpc/src/receipt_provider.rs @@ -16,7 +16,8 @@ // limitations under the License. use jsonrpsee::core::async_trait; -use pallet_revive::evm::{ReceiptInfo, TransactionSigned, H256}; +use pallet_revive::evm::{Filter, Log, ReceiptInfo, TransactionSigned, H256}; +use std::collections::HashMap; use tokio::join; mod cache; @@ -34,9 +35,15 @@ pub trait ReceiptProvider: Send + Sync { /// Similar to `insert`, but intended for archiving receipts from historical blocks. async fn archive(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]); + /// Get logs that match the given filter. + async fn logs(&self, filter: Option) -> anyhow::Result>; + /// Deletes receipts associated with the specified block hash. async fn remove(&self, block_hash: &H256); + /// Return all transaction hashes for the given block hash. + async fn block_transaction_hashes(&self, block_hash: &H256) -> Option>; + /// Get the receipt for the given block hash and transaction index. async fn receipt_by_block_hash_and_index( &self, @@ -65,7 +72,7 @@ impl ReceiptProvider for (Cach } async fn remove(&self, block_hash: &H256) { - self.0.remove(block_hash).await; + join!(self.0.remove(block_hash), self.1.remove(block_hash)); } async fn receipt_by_block_hash_and_index( @@ -89,6 +96,13 @@ impl ReceiptProvider for (Cach self.1.receipts_count_per_block(block_hash).await } + async fn block_transaction_hashes(&self, block_hash: &H256) -> Option> { + if let Some(hashes) = self.0.block_transaction_hashes(block_hash).await { + return Some(hashes); + } + self.1.block_transaction_hashes(block_hash).await + } + async fn receipt_by_hash(&self, hash: &H256) -> Option { if let Some(receipt) = self.0.receipt_by_hash(hash).await { return Some(receipt); @@ -102,4 +116,8 @@ impl ReceiptProvider for (Cach } self.1.signed_tx_by_hash(hash).await } + + async fn logs(&self, filter: Option) -> anyhow::Result> { + self.1.logs(filter).await + } } diff --git a/substrate/frame/revive/rpc/src/receipt_provider/cache.rs b/substrate/frame/revive/rpc/src/receipt_provider/cache.rs index 765c12f890106..576c08c0dd03b 100644 --- a/substrate/frame/revive/rpc/src/receipt_provider/cache.rs +++ b/substrate/frame/revive/rpc/src/receipt_provider/cache.rs @@ -16,7 +16,7 @@ // limitations under the License. use super::ReceiptProvider; use jsonrpsee::core::async_trait; -use pallet_revive::evm::{ReceiptInfo, TransactionSigned, H256}; +use pallet_revive::evm::{Filter, Log, ReceiptInfo, TransactionSigned, H256}; use std::{collections::HashMap, sync::Arc}; use tokio::sync::RwLock; @@ -37,6 +37,10 @@ impl CacheReceiptProvider { impl ReceiptProvider for CacheReceiptProvider { async fn archive(&self, _block_hash: &H256, _receipts: &[(TransactionSigned, ReceiptInfo)]) {} + async fn logs(&self, _filter: Option) -> anyhow::Result> { + anyhow::bail!("Not implemented") + } + async fn insert(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { let mut cache = self.cache.write().await; cache.insert(block_hash, receipts); @@ -66,6 +70,11 @@ impl ReceiptProvider for CacheReceiptProvider { cache.transaction_hashes_by_block_and_index.get(block_hash).map(|v| v.len()) } + async fn block_transaction_hashes(&self, block_hash: &H256) -> Option> { + let cache = self.cache().await; + cache.transaction_hashes_by_block_and_index.get(block_hash).cloned() + } + async fn receipt_by_hash(&self, hash: &H256) -> Option { let cache = self.cache().await; cache.receipts_by_hash.get(hash).cloned() diff --git a/substrate/frame/revive/rpc/src/receipt_provider/db.rs b/substrate/frame/revive/rpc/src/receipt_provider/db.rs index 42ffe93a9f8b6..c471d009022ab 100644 --- a/substrate/frame/revive/rpc/src/receipt_provider/db.rs +++ b/substrate/frame/revive/rpc/src/receipt_provider/db.rs @@ -16,12 +16,15 @@ // limitations under the License. use super::*; -use crate::{BlockInfoProvider, ReceiptExtractor}; +use crate::{ + Address, AddressOrAddresses, BlockInfoProvider, Bytes, FilterTopic, ReceiptExtractor, + LOG_TARGET, +}; use jsonrpsee::core::async_trait; -use pallet_revive::evm::{ReceiptInfo, TransactionSigned}; -use sp_core::H256; -use sqlx::{query, SqlitePool}; -use std::sync::Arc; +use pallet_revive::evm::{Filter, Log, ReceiptInfo, TransactionSigned}; +use sp_core::{H256, U256}; +use sqlx::{query, QueryBuilder, Row, Sqlite, SqlitePool}; +use std::{collections::HashMap, sync::Arc}; /// A `[ReceiptProvider]` that stores receipts in a SQLite database. #[derive(Clone)] @@ -32,6 +35,8 @@ pub struct DBReceiptProvider { block_provider: Arc, /// A means to extract receipts from extrinsics. receipt_extractor: ReceiptExtractor, + /// Whether to prune old blocks. + prune_old_blocks: bool, } impl DBReceiptProvider { @@ -40,10 +45,11 @@ impl DBReceiptProvider { database_url: &str, block_provider: Arc, receipt_extractor: ReceiptExtractor, + prune_old_blocks: bool, ) -> Result { let pool = SqlitePool::connect(database_url).await?; sqlx::migrate!().run(&pool).await?; - Ok(Self { pool, block_provider, receipt_extractor }) + Ok(Self { pool, block_provider, receipt_extractor, prune_old_blocks }) } async fn fetch_row(&self, transaction_hash: &H256) -> Option<(H256, usize)> { @@ -68,7 +74,41 @@ impl DBReceiptProvider { #[async_trait] impl ReceiptProvider for DBReceiptProvider { - async fn remove(&self, _block_hash: &H256) {} + async fn remove(&self, block_hash: &H256) { + if !self.prune_old_blocks { + return; + } + + let block_hash = block_hash.as_ref(); + + let delete_transaction_hashes = query!( + r#" + DELETE FROM transaction_hashes + WHERE block_hash = $1 + "#, + block_hash + ) + .execute(&self.pool); + + let delete_logs = query!( + r#" + DELETE FROM logs + WHERE block_hash = $1 + "#, + block_hash + ) + .execute(&self.pool); + + let (tx_result, logs_result) = tokio::join!(delete_transaction_hashes, delete_logs); + + if let Err(err) = tx_result { + log::error!(target: LOG_TARGET, "Error removing transaction hashes for block hash {block_hash:?}: {err:?}"); + } + + if let Err(err) = logs_result { + log::error!(target: LOG_TARGET, "Error removing logs for block hash {block_hash:?}: {err:?}"); + } + } async fn archive(&self, block_hash: &H256, receipts: &[(TransactionSigned, ReceiptInfo)]) { self.insert(block_hash, receipts).await; @@ -95,7 +135,173 @@ impl ReceiptProvider for DBReceiptProvider { if let Err(err) = result { log::error!("Error inserting transaction for block hash {block_hash:?}: {err:?}"); } + + for log in &receipt.logs { + let block_hash = log.block_hash.as_ref(); + let transaction_index = log.transaction_index.as_u64() as i64; + let log_index = log.log_index.as_u32() as i32; + let address = log.address.as_ref(); + let block_number = log.block_number.as_u64() as i64; + let transaction_hash = log.transaction_hash.as_ref(); + + let topic_0 = log.topics.first().as_ref().map(|v| &v[..]); + let topic_1 = log.topics.get(1).as_ref().map(|v| &v[..]); + let topic_2 = log.topics.get(2).as_ref().map(|v| &v[..]); + let topic_3 = log.topics.get(3).as_ref().map(|v| &v[..]); + let data = log.data.as_ref().map(|v| &v.0[..]); + + let result = query!( + r#" + INSERT OR REPLACE INTO logs( + block_hash, + transaction_index, + log_index, + address, + block_number, + transaction_hash, + topic_0, topic_1, topic_2, topic_3, + data) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) + "#, + block_hash, + transaction_index, + log_index, + address, + block_number, + transaction_hash, + topic_0, + topic_1, + topic_2, + topic_3, + data + ) + .execute(&self.pool) + .await; + + if let Err(err) = result { + log::error!("Error inserting log {log:?}: {err:?}"); + } + } + } + } + + async fn logs(&self, filter: Option) -> anyhow::Result> { + let mut qb = QueryBuilder::::new("SELECT logs.* FROM logs WHERE 1=1"); + let filter = filter.unwrap_or_default(); + + let latest_block = + U256::from(self.block_provider.latest_block_number().await.unwrap_or_default()); + + match (filter.from_block, filter.to_block, filter.block_hash) { + (Some(_), _, Some(_)) | (_, Some(_), Some(_)) => { + anyhow::bail!("block number and block hash cannot be used together"); + }, + + (Some(block), _, _) | (_, Some(block), _) if block > latest_block => { + anyhow::bail!("block number exceeds latest block"); + }, + (Some(from_block), Some(to_block), None) if from_block > to_block => { + anyhow::bail!("invalid block range params"); + }, + (Some(from_block), Some(to_block), None) if from_block == to_block => { + qb.push(" AND block_number = ").push_bind(from_block.as_u64() as i64); + }, + (Some(from_block), Some(to_block), None) => { + qb.push(" AND block_number BETWEEN ") + .push_bind(from_block.as_u64() as i64) + .push(" AND ") + .push_bind(to_block.as_u64() as i64); + }, + (Some(from_block), None, None) => { + qb.push(" AND block_number >= ").push_bind(from_block.as_u64() as i64); + }, + (None, Some(to_block), None) => { + qb.push(" AND block_number <= ").push_bind(to_block.as_u64() as i64); + }, + (None, None, Some(hash)) => { + qb.push(" AND block_hash = ").push_bind(hash.0.to_vec()); + }, + (None, None, None) => { + qb.push(" AND block_number = ").push_bind(latest_block.as_u64() as i64); + }, } + + if let Some(addresses) = filter.address { + match addresses { + AddressOrAddresses::Address(addr) => { + qb.push(" AND address = ").push_bind(addr.0.to_vec()); + }, + AddressOrAddresses::Addresses(addrs) => { + qb.push(" AND address IN ("); + let mut separated = qb.separated(", "); + for addr in addrs { + separated.push_bind(addr.0.to_vec()); + } + separated.push_unseparated(")"); + }, + } + } + + if let Some(topics) = filter.topics { + if topics.len() > 4 { + return Err(anyhow::anyhow!("exceed max topics")); + } + + for (i, topic) in topics.into_iter().enumerate() { + match topic { + FilterTopic::Single(hash) => { + qb.push(format_args!(" AND topic_{i} = ")).push_bind(hash.0.to_vec()); + }, + FilterTopic::Multiple(hashes) => { + qb.push(format_args!(" AND topic_{i} IN (")); + let mut separated = qb.separated(", "); + for hash in hashes { + separated.push_bind(hash.0.to_vec()); + } + separated.push_unseparated(")"); + }, + } + } + } + + qb.push(" LIMIT 10000"); + + let logs = qb + .build() + .try_map(|row| { + let block_hash: Vec = row.try_get("block_hash")?; + let transaction_index: i64 = row.try_get("transaction_index")?; + let log_index: i64 = row.try_get("log_index")?; + let address: Vec = row.try_get("address")?; + let block_number: i64 = row.try_get("block_number")?; + let transaction_hash: Vec = row.try_get("transaction_hash")?; + let topic_0: Option> = row.try_get("topic_0")?; + let topic_1: Option> = row.try_get("topic_1")?; + let topic_2: Option> = row.try_get("topic_2")?; + let topic_3: Option> = row.try_get("topic_3")?; + let data: Option> = row.try_get("data")?; + + let topics = [topic_0, topic_1, topic_2, topic_3] + .iter() + .filter_map(|t| t.as_ref().map(|t| H256::from_slice(t))) + .collect::>(); + + Ok(Log { + address: Address::from_slice(&address), + block_hash: H256::from_slice(&block_hash), + block_number: U256::from(block_number as u64), + data: data.map(Bytes::from), + log_index: U256::from(log_index as u64), + topics, + transaction_hash: H256::from_slice(&transaction_hash), + transaction_index: U256::from(transaction_index as u64), + removed: None, + }) + }) + .fetch_all(&self.pool) + .await?; + + Ok(logs) } async fn receipts_count_per_block(&self, block_hash: &H256) -> Option { @@ -116,6 +322,28 @@ impl ReceiptProvider for DBReceiptProvider { Some(count) } + async fn block_transaction_hashes(&self, block_hash: &H256) -> Option> { + let block_hash = block_hash.as_ref(); + let rows = query!( + r#" + SELECT transaction_index, transaction_hash + FROM transaction_hashes + WHERE block_hash = $1 + "#, + block_hash + ) + .map(|row| { + let transaction_index = row.transaction_index as usize; + let transaction_hash = H256::from_slice(&row.transaction_hash); + (transaction_index, transaction_hash) + }) + .fetch_all(&self.pool) + .await + .ok()?; + + Some(rows.into_iter().collect()) + } + async fn receipt_by_block_hash_and_index( &self, block_hash: &H256, @@ -174,7 +402,8 @@ mod tests { use super::*; use crate::test::MockBlockInfoProvider; use pallet_revive::evm::{ReceiptInfo, TransactionSigned}; - use sp_core::H256; + use pretty_assertions::assert_eq; + use sp_core::{H160, H256}; use sqlx::SqlitePool; async fn setup_sqlite_provider(pool: SqlitePool) -> DBReceiptProvider { @@ -182,18 +411,53 @@ mod tests { pool, block_provider: Arc::new(MockBlockInfoProvider {}), receipt_extractor: ReceiptExtractor::new(1_000_000), + prune_old_blocks: true, } } #[sqlx::test] - async fn test_insert(pool: SqlitePool) { + async fn test_insert_remove(pool: SqlitePool) { let provider = setup_sqlite_provider(pool).await; let block_hash = H256::default(); - let receipts = vec![(TransactionSigned::default(), ReceiptInfo::default())]; + let receipts = vec![( + TransactionSigned::default(), + ReceiptInfo { + logs: vec![Log { block_hash, ..Default::default() }], + ..Default::default() + }, + )]; provider.insert(&block_hash, &receipts).await; let row = provider.fetch_row(&receipts[0].1.transaction_hash).await; assert_eq!(row, Some((block_hash, 0))); + + provider.remove(&block_hash).await; + + let transaction_count: i64 = sqlx::query_scalar( + r#" + SELECT COUNT(*) + FROM transaction_hashes + WHERE block_hash = ? + "#, + ) + .bind(block_hash.as_ref()) + .fetch_one(&provider.pool) + .await + .unwrap(); + assert_eq!(transaction_count, 0); + + let logs_count: i64 = sqlx::query_scalar( + r#" + SELECT COUNT(*) + FROM logs + WHERE block_hash = ? + "#, + ) + .bind(block_hash.as_ref()) + .fetch_one(&provider.pool) + .await + .unwrap(); + assert_eq!(logs_count, 0); } #[sqlx::test] @@ -215,4 +479,137 @@ mod tests { let count = provider.receipts_count_per_block(&block_hash).await; assert_eq!(count, Some(2)); } + + #[sqlx::test] + async fn test_query_logs(pool: SqlitePool) -> anyhow::Result<()> { + let provider = setup_sqlite_provider(pool).await; + let log1 = Log { + block_hash: H256::from([1u8; 32]), + block_number: U256::from(1), + address: H160::from([1u8; 20]), + topics: vec![H256::from([1u8; 32]), H256::from([2u8; 32])], + data: Some(vec![0u8; 32].into()), + transaction_hash: H256::default(), + transaction_index: U256::from(1), + log_index: U256::from(1), + ..Default::default() + }; + let log2 = Log { + block_hash: H256::from([2u8; 32]), + block_number: U256::from(2), + address: H160::from([2u8; 20]), + topics: vec![H256::from([2u8; 32]), H256::from([3u8; 32])], + transaction_hash: H256::from([1u8; 32]), + transaction_index: U256::from(2), + log_index: U256::from(1), + ..Default::default() + }; + + provider + .insert( + &log1.block_hash, + &vec![( + TransactionSigned::default(), + ReceiptInfo { logs: vec![log1.clone()], ..Default::default() }, + )], + ) + .await; + provider + .insert( + &log2.block_hash, + &vec![( + TransactionSigned::default(), + ReceiptInfo { logs: vec![log2.clone()], ..Default::default() }, + )], + ) + .await; + + // Empty filter + let logs = provider.logs(None).await?; + assert_eq!(logs, vec![log2.clone()]); + + // from_block filter + let logs = provider + .logs(Some(Filter { from_block: Some(log2.block_number), ..Default::default() })) + .await?; + assert_eq!(logs, vec![log2.clone()]); + + // to_block filter + let logs = provider + .logs(Some(Filter { to_block: Some(log1.block_number), ..Default::default() })) + .await?; + assert_eq!(logs, vec![log1.clone()]); + + // block_hash filter + let logs = provider + .logs(Some(Filter { block_hash: Some(log1.block_hash), ..Default::default() })) + .await?; + assert_eq!(logs, vec![log1.clone()]); + + // single address + let logs = provider + .logs(Some(Filter { + from_block: Some(U256::from(0)), + address: Some(log1.address.into()), + ..Default::default() + })) + .await?; + assert_eq!(logs, vec![log1.clone()]); + + // multiple addresses + let logs = provider + .logs(Some(Filter { + from_block: Some(U256::from(0)), + address: Some(vec![log1.address, log2.address].into()), + ..Default::default() + })) + .await?; + assert_eq!(logs, vec![log1.clone(), log2.clone()]); + + // single topic + let logs = provider + .logs(Some(Filter { + from_block: Some(U256::from(0)), + topics: Some(vec![FilterTopic::Single(log1.topics[0])]), + ..Default::default() + })) + .await?; + assert_eq!(logs, vec![log1.clone()]); + + // multiple topic + let logs = provider + .logs(Some(Filter { + from_block: Some(U256::from(0)), + topics: Some(vec![ + FilterTopic::Single(log1.topics[0]), + FilterTopic::Single(log1.topics[1]), + ]), + ..Default::default() + })) + .await?; + assert_eq!(logs, vec![log1.clone()]); + + // multiple topic for topic_0 + let logs = provider + .logs(Some(Filter { + from_block: Some(U256::from(0)), + topics: Some(vec![FilterTopic::Multiple(vec![log1.topics[0], log2.topics[0]])]), + ..Default::default() + })) + .await?; + assert_eq!(logs, vec![log1.clone(), log2.clone()]); + + // Altogether + let logs = provider + .logs(Some(Filter { + from_block: Some(log1.block_number), + to_block: Some(log2.block_number), + block_hash: None, + address: Some(vec![log1.address, log2.address].into()), + topics: Some(vec![FilterTopic::Multiple(vec![log1.topics[0], log2.topics[0]])]), + })) + .await?; + assert_eq!(logs, vec![log1.clone(), log2.clone()]); + Ok(()) + } } diff --git a/substrate/frame/revive/rpc/src/tests.rs b/substrate/frame/revive/rpc/src/tests.rs index e1ac274d32ea8..375e3a5dd3f8c 100644 --- a/substrate/frame/revive/rpc/src/tests.rs +++ b/substrate/frame/revive/rpc/src/tests.rs @@ -230,7 +230,7 @@ async fn revert_call() -> anyhow::Result<()> { .unwrap_err(); let call_err = unwrap_call_err!(err.source().unwrap()); - assert_eq!(call_err.message(), "execution reverted: This is a require error"); + assert_eq!(call_err.message(), "execution reverted: revert: This is a require error"); assert_eq!(call_err.code(), 3); Ok(()) } diff --git a/substrate/frame/revive/src/evm.rs b/substrate/frame/revive/src/evm.rs index 33660a36aa6ea..f340474f472e0 100644 --- a/substrate/frame/revive/src/evm.rs +++ b/substrate/frame/revive/src/evm.rs @@ -24,46 +24,4 @@ pub use tracing::*; mod gas_encoder; pub use gas_encoder::*; pub mod runtime; - -use crate::alloc::{format, string::*}; - -/// Extract the revert message from a revert("msg") solidity statement. -pub fn extract_revert_message(exec_data: &[u8]) -> Option { - let error_selector = exec_data.get(0..4)?; - - match error_selector { - // assert(false) - [0x4E, 0x48, 0x7B, 0x71] => { - let panic_code: u32 = U256::from_big_endian(exec_data.get(4..36)?).try_into().ok()?; - - // See https://docs.soliditylang.org/en/latest/control-structures.html#panic-via-assert-and-error-via-require - let msg = match panic_code { - 0x00 => "generic panic", - 0x01 => "assert(false)", - 0x11 => "arithmetic underflow or overflow", - 0x12 => "division or modulo by zero", - 0x21 => "enum overflow", - 0x22 => "invalid encoded storage byte array accessed", - 0x31 => "out-of-bounds array access; popping on an empty array", - 0x32 => "out-of-bounds access of an array or bytesN", - 0x41 => "out of memory", - 0x51 => "uninitialized function", - code => return Some(format!("execution reverted: unknown panic code: {code:#x}")), - }; - - Some(format!("execution reverted: {msg}")) - }, - // revert(string) - [0x08, 0xC3, 0x79, 0xA0] => { - let decoded = ethabi::decode(&[ethabi::ParamKind::String], &exec_data[4..]).ok()?; - if let Some(ethabi::Token::String(msg)) = decoded.first() { - return Some(format!("execution reverted: {}", String::from_utf8_lossy(msg))) - } - Some("execution reverted".to_string()) - }, - _ => { - log::debug!(target: crate::LOG_TARGET, "Unknown revert function selector: {error_selector:?}"); - Some("execution reverted".to_string()) - }, - } -} +pub use alloy_core::sol_types::decode_revert_reason; diff --git a/substrate/frame/revive/src/evm/api/debug_rpc_types.rs b/substrate/frame/revive/src/evm/api/debug_rpc_types.rs index 0857a59fbf3b6..e9518f6b6400b 100644 --- a/substrate/frame/revive/src/evm/api/debug_rpc_types.rs +++ b/substrate/frame/revive/src/evm/api/debug_rpc_types.rs @@ -156,29 +156,23 @@ pub enum CallType { pub struct CallTrace { /// Address of the sender. pub from: H160, - /// Address of the receiver. - pub to: H160, - /// Call input data. - pub input: Vec, - /// Amount of value transferred. - #[serde(skip_serializing_if = "U256::is_zero")] - pub value: U256, - /// Type of call. - #[serde(rename = "type")] - pub call_type: CallType, /// Amount of gas provided for the call. pub gas: Gas, /// Amount of gas used. #[serde(rename = "gasUsed")] pub gas_used: Gas, + /// Address of the receiver. + pub to: H160, + /// Call input data. + pub input: Bytes, /// Return data. - #[serde(flatten, skip_serializing_if = "Bytes::is_empty")] + #[serde(skip_serializing_if = "Bytes::is_empty")] pub output: Bytes, /// The error message if the call failed. #[serde(skip_serializing_if = "Option::is_none")] pub error: Option, /// The revert reason, if the call reverted. - #[serde(rename = "revertReason")] + #[serde(rename = "revertReason", skip_serializing_if = "Option::is_none")] pub revert_reason: Option, /// List of sub-calls. #[serde(skip_serializing_if = "Vec::is_empty")] @@ -186,6 +180,11 @@ pub struct CallTrace { /// List of logs emitted during the call. #[serde(skip_serializing_if = "Vec::is_empty")] pub logs: Vec, + /// Amount of value transferred. + pub value: U256, + /// Type of call. + #[serde(rename = "type")] + pub call_type: CallType, } /// A log emitted during a call. @@ -195,12 +194,11 @@ pub struct CallTrace { pub struct CallLog { /// The address of the contract that emitted the log. pub address: H160, - /// The log's data. - #[serde(skip_serializing_if = "Bytes::is_empty")] - pub data: Bytes, /// The topics used to index the log. #[serde(default, skip_serializing_if = "Vec::is_empty")] pub topics: Vec, + /// The log's data. + pub data: Bytes, /// Position of the log relative to subcalls within the same trace /// See for details #[serde(with = "super::hex_serde")] diff --git a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs index e7003ee7c1891..8fd4c1072a931 100644 --- a/substrate/frame/revive/src/evm/api/rpc_types_gen.rs +++ b/substrate/frame/revive/src/evm/api/rpc_types_gen.rs @@ -137,6 +137,44 @@ impl Default for BlockNumberOrTagOrHash { } } +/// filter +#[derive( + Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, +)] +pub struct Filter { + /// Address(es) + pub address: Option, + /// from block + #[serde(rename = "fromBlock", skip_serializing_if = "Option::is_none")] + pub from_block: Option, + /// to block + #[serde(rename = "toBlock", skip_serializing_if = "Option::is_none")] + pub to_block: Option, + /// Restricts the logs returned to the single block + #[serde(rename = "blockHash", skip_serializing_if = "Option::is_none")] + pub block_hash: Option, + /// Topics + #[serde(skip_serializing_if = "Option::is_none")] + pub topics: Option, +} + +/// Filter results +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum FilterResults { + /// new block or transaction hashes + Hashes(Vec), + /// new logs + Logs(Vec), +} +impl Default for FilterResults { + fn default() -> Self { + FilterResults::Hashes(Default::default()) + } +} + /// Transaction object generic to all types #[derive( Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, @@ -326,6 +364,26 @@ impl Default for TransactionUnsigned { /// Access list pub type AccessList = Vec; +/// Address(es) +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum AddressOrAddresses { + /// Address + Address(Address), + /// Addresses + Addresses(Addresses), +} +impl Default for AddressOrAddresses { + fn default() -> Self { + AddressOrAddresses::Address(Default::default()) + } +} + +/// hex encoded address +pub type Addresses = Vec
; + /// Block tag /// `earliest`: The lowest numbered block the client has available; `finalized`: The most recent /// crypto-economically secure block, cannot be re-orged outside of manual intervention driven by @@ -353,6 +411,9 @@ pub enum BlockTag { Pending, } +/// Filter Topics +pub type FilterTopics = Vec; + #[derive( Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, )] @@ -604,6 +665,23 @@ pub struct AccessListEntry { pub storage_keys: Vec, } +/// Filter Topic List Entry +#[derive( + Debug, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, From, TryInto, Eq, PartialEq, +)] +#[serde(untagged)] +pub enum FilterTopic { + /// Single Topic Match + Single(H256), + /// Multiple Topic Match + Multiple(Vec), +} +impl Default for FilterTopic { + fn default() -> Self { + FilterTopic::Single(Default::default()) + } +} + /// Signed 1559 Transaction #[derive( Debug, Default, Clone, Encode, Decode, TypeInfo, Serialize, Deserialize, Eq, PartialEq, diff --git a/substrate/frame/revive/src/evm/tracing.rs b/substrate/frame/revive/src/evm/tracing.rs index 7466ec1de4877..7eae64db79fb1 100644 --- a/substrate/frame/revive/src/evm/tracing.rs +++ b/substrate/frame/revive/src/evm/tracing.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. use crate::{ - evm::{extract_revert_message, CallLog, CallTrace, CallType}, + evm::{decode_revert_reason, CallLog, CallTrace, CallType}, primitives::ExecReturnValue, tracing::Tracer, DispatchError, Weight, @@ -72,7 +72,7 @@ impl Gas> Tracer for CallTracer Gas> Tracer for CallTracer t.exit_child_span(&output, Weight::zero()), + Err(e) => t.exit_child_span_with_error(e.error.into(), Weight::zero()), + } }); - let result = Self::transfer_from_origin(&origin, &origin, &dest, value); - match result { - Ok(ref output) => { - if_tracing(|t| { - t.exit_child_span(&output, Weight::zero()); - }); - }, - Err(e) => { - if_tracing(|t| t.exit_child_span_with_error(e.error.into(), Weight::zero())); - }, - } result } } @@ -874,61 +873,75 @@ where read_only: bool, origin_is_caller: bool, ) -> Result, E)>, ExecError> { - let (account_id, contract_info, executable, delegate, entry_point) = match frame_args { - FrameArgs::Call { dest, cached_info, delegated_call } => { - let contract = if let Some(contract) = cached_info { - contract - } else { - if let Some(contract) = - >::get(T::AddressMapper::to_address(&dest)) - { + let (account_id, contract_info, executable, delegate, entry_point, nested_gas) = + match frame_args { + FrameArgs::Call { dest, cached_info, delegated_call } => { + let contract = if let Some(contract) = cached_info { contract } else { - return Ok(None); - } - }; + if let Some(contract) = + >::get(T::AddressMapper::to_address(&dest)) + { + contract + } else { + return Ok(None); + } + }; - let (executable, delegate_caller) = - if let Some(DelegatedCall { executable, caller, callee }) = delegated_call { + let mut nested_gas = gas_meter.nested(gas_limit); + let (executable, delegate_caller) = if let Some(DelegatedCall { + executable, + caller, + callee, + }) = delegated_call + { (executable, Some(DelegateInfo { caller, callee })) } else { - (E::from_storage(contract.code_hash, gas_meter)?, None) + (E::from_storage(contract.code_hash, &mut nested_gas)?, None) }; - (dest, contract, executable, delegate_caller, ExportedFunction::Call) - }, - FrameArgs::Instantiate { sender, executable, salt, input_data } => { - let deployer = T::AddressMapper::to_address(&sender); - let account_nonce = >::account_nonce(&sender); - let address = if let Some(salt) = salt { - address::create2(&deployer, executable.code(), input_data, salt) - } else { - use sp_runtime::Saturating; - address::create1( - &deployer, - // the Nonce from the origin has been incremented pre-dispatch, so we - // need to subtract 1 to get the nonce at the time of the call. - if origin_is_caller { - account_nonce.saturating_sub(1u32.into()).saturated_into() - } else { - account_nonce.saturated_into() - }, + ( + dest, + contract, + executable, + delegate_caller, + ExportedFunction::Call, + nested_gas, ) - }; - let contract = ContractInfo::new( - &address, - >::account_nonce(&sender), - *executable.code_hash(), - )?; - ( - T::AddressMapper::to_fallback_account_id(&address), - contract, - executable, - None, - ExportedFunction::Constructor, - ) - }, - }; + }, + FrameArgs::Instantiate { sender, executable, salt, input_data } => { + let deployer = T::AddressMapper::to_address(&sender); + let account_nonce = >::account_nonce(&sender); + let address = if let Some(salt) = salt { + address::create2(&deployer, executable.code(), input_data, salt) + } else { + use sp_runtime::Saturating; + address::create1( + &deployer, + // the Nonce from the origin has been incremented pre-dispatch, so we + // need to subtract 1 to get the nonce at the time of the call. + if origin_is_caller { + account_nonce.saturating_sub(1u32.into()).saturated_into() + } else { + account_nonce.saturated_into() + }, + ) + }; + let contract = ContractInfo::new( + &address, + >::account_nonce(&sender), + *executable.code_hash(), + )?; + ( + T::AddressMapper::to_fallback_account_id(&address), + contract, + executable, + None, + ExportedFunction::Constructor, + gas_meter.nested(gas_limit), + ) + }, + }; let frame = Frame { delegate, @@ -936,7 +949,7 @@ where contract_info: CachedContract::Cached(contract_info), account_id, entry_point, - nested_gas: gas_meter.nested(gas_limit), + nested_gas, nested_storage: storage_meter.nested(deposit_limit), allows_reentry: true, read_only, @@ -1414,13 +1427,28 @@ where )? { self.run(executable, input_data) } else { - Self::transfer_from_origin( + let result = Self::transfer_from_origin( &self.origin, &Origin::from_account_id(self.account_id().clone()), &dest, value, - )?; - Ok(()) + ); + if_tracing(|t| { + t.enter_child_span( + T::AddressMapper::to_address(self.account_id()), + T::AddressMapper::to_address(&dest), + false, + false, + value, + &input_data, + Weight::zero(), + ); + match result { + Ok(ref output) => t.exit_child_span(&output, Weight::zero()), + Err(e) => t.exit_child_span_with_error(e.error.into(), Weight::zero()), + } + }); + result.map(|_| ()) } }; diff --git a/substrate/frame/revive/src/lib.rs b/substrate/frame/revive/src/lib.rs index 69d60c5d6a952..9a86f6bf97db4 100644 --- a/substrate/frame/revive/src/lib.rs +++ b/substrate/frame/revive/src/lib.rs @@ -41,7 +41,7 @@ pub mod tracing; pub mod weights; use crate::{ - evm::{runtime::GAS_PRICE, GasEncoder, GenericTransaction}, + evm::{runtime::GAS_PRICE, CallTrace, GasEncoder, GenericTransaction, TracerConfig}, exec::{AccountIdOf, ExecError, Executable, Key, Stack as ExecStack}, gas::GasMeter, storage::{meter::Meter as StorageMeter, ContractInfo, DeletionQueueManager}, @@ -723,7 +723,6 @@ pub mod pallet { #[pallet::compact] storage_deposit_limit: BalanceOf, data: Vec, ) -> DispatchResultWithPostInfo { - log::info!(target: LOG_TARGET, "Call: {:?} {:?} {:?}", dest, value, data); let mut output = Self::bare_call( origin, dest, @@ -1018,7 +1017,7 @@ where storage_deposit = storage_meter .try_into_deposit(&origin, storage_deposit_limit.is_unchecked()) .inspect_err(|err| { - log::error!(target: LOG_TARGET, "Failed to transfer deposit: {err:?}"); + log::debug!(target: LOG_TARGET, "Failed to transfer deposit: {err:?}"); })?; Ok(result) }; @@ -1328,6 +1327,12 @@ where Self::convert_evm_to_native(fee, ConversionPrecision::RoundUp) } + /// Convert a weight to a gas value. + pub fn evm_gas_from_weight(weight: Weight) -> U256 { + let fee = T::WeightPrice::convert(weight); + Self::evm_fee_to_gas(fee) + } + /// Get the block gas limit. pub fn evm_block_gas_limit() -> U256 { let max_block_weight = T::BlockWeights::get() @@ -1335,8 +1340,7 @@ where .max_total .unwrap_or_else(|| T::BlockWeights::get().max_block); - let fee = T::WeightPrice::convert(max_block_weight); - Self::evm_fee_to_gas(fee) + Self::evm_gas_from_weight(max_block_weight) } /// Get the gas price. @@ -1509,5 +1513,35 @@ sp_api::decl_runtime_apis! { address: H160, key: [u8; 32], ) -> GetStorageResult; + + + /// Traces the execution of an entire block and returns call traces. + /// + /// This is intended to be called through `state_call` to replay the block from the + /// parent block. + /// + /// See eth-rpc `debug_traceBlockByNumber` for usage. + fn trace_block( + block: Block, + config: TracerConfig + ) -> Vec<(u32, CallTrace)>; + + /// Traces the execution of a specific transaction within a block. + /// + /// This is intended to be called through `state_call` to replay the block from the + /// parent hash up to the transaction. + /// + /// See eth-rpc `debug_traceTransaction` for usage. + fn trace_tx( + block: Block, + tx_index: u32, + config: TracerConfig + ) -> Option; + + /// Dry run and return the trace of the given call. + /// + /// See eth-rpc `debug_traceCall` for usage. + fn trace_call(tx: GenericTransaction, config: TracerConfig) -> Result; + } } diff --git a/substrate/frame/revive/src/limits.rs b/substrate/frame/revive/src/limits.rs index a4060cf6cc91c..96f8131a723c7 100644 --- a/substrate/frame/revive/src/limits.rs +++ b/substrate/frame/revive/src/limits.rs @@ -119,6 +119,12 @@ pub mod code { let blob: CodeVec = blob.try_into().map_err(|_| >::BlobTooLarge)?; + #[cfg(feature = "std")] + if std::env::var_os("REVIVE_SKIP_VALIDATION").is_some() { + log::warn!(target: LOG_TARGET, "Skipping validation because env var REVIVE_SKIP_VALIDATION is set"); + return Ok(blob) + } + let program = polkavm::ProgramBlob::parse(blob.as_slice().into()).map_err(|err| { log::debug!(target: LOG_TARGET, "failed to parse polkavm blob: {err:?}"); Error::::CodeRejected diff --git a/substrate/frame/revive/src/tests.rs b/substrate/frame/revive/src/tests.rs index 319b35d1916bd..39ac6fcaf3422 100644 --- a/substrate/frame/revive/src/tests.rs +++ b/substrate/frame/revive/src/tests.rs @@ -1005,6 +1005,7 @@ fn transient_storage_limit_in_call() { fn deploy_and_call_other_contract() { let (caller_wasm, _caller_code_hash) = compile_module("caller_contract").unwrap(); let (callee_wasm, callee_code_hash) = compile_module("return_with_data").unwrap(); + let code_load_weight = crate::wasm::code_load_weight(callee_wasm.len() as u32); ExtBuilder::default().existential_deposit(1).build().execute_with(|| { let min_balance = Contracts::min_balance(); @@ -1032,7 +1033,9 @@ fn deploy_and_call_other_contract() { // Call BOB contract, which attempts to instantiate and call the callee contract and // makes various assertions on the results from those calls. - assert_ok!(builder::call(caller_addr).data(callee_code_hash.as_ref().to_vec()).build()); + assert_ok!(builder::call(caller_addr) + .data((callee_code_hash, code_load_weight.ref_time()).encode()) + .build()); assert_eq!( System::events(), @@ -4428,8 +4431,10 @@ fn tracing_works_for_transfers() { trace(&mut tracer, || { builder::bare_call(BOB_ADDR).value(10_000_000).build_and_unwrap_result(); }); + + let traces = tracer.collect_traces(); assert_eq!( - tracer.collect_traces(), + traces, vec![CallTrace { from: ALICE_ADDR, to: BOB_ADDR, @@ -4442,20 +4447,19 @@ fn tracing_works_for_transfers() { } #[test] -#[ignore = "does not collect the gas_used properly"] fn tracing_works() { use crate::evm::*; use CallType::*; let (code, _code_hash) = compile_module("tracing").unwrap(); let (wasm_callee, _) = compile_module("tracing_callee").unwrap(); ExtBuilder::default().existential_deposit(200).build().execute_with(|| { - let _ = ::Currency::set_balance(&ALICE, 1_000_000); + let _ = ::Currency::set_balance(&ALICE, 100_000_000); let Contract { addr: addr_callee, .. } = builder::bare_instantiate(Code::Upload(wasm_callee)).build_and_unwrap_contract(); let Contract { addr, .. } = - builder::bare_instantiate(Code::Upload(code)).build_and_unwrap_contract(); + builder::bare_instantiate(Code::Upload(code)).value(10_000_000).build_and_unwrap_contract(); let tracer_options = vec![ ( false , vec![]), @@ -4499,20 +4503,18 @@ fn tracing_works() { vec![CallTrace { from: ALICE_ADDR, to: addr, - input: (3u32, addr_callee).encode(), + input: (3u32, addr_callee).encode().into(), call_type: Call, logs: logs.clone(), calls: vec![ CallTrace { from: addr, to: addr_callee, - input: 2u32.encode(), + input: 2u32.encode().into(), output: hex_literal::hex!( "08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001a546869732066756e6374696f6e20616c77617973206661696c73000000000000" ).to_vec().into(), - revert_reason: Some( - "execution reverted: This function always fails".to_string() - ), + revert_reason: Some("revert: This function always fails".to_string()), error: Some("execution reverted".to_string()), call_type: Call, ..Default::default() @@ -4520,14 +4522,14 @@ fn tracing_works() { CallTrace { from: addr, to: addr, - input: (2u32, addr_callee).encode(), + input: (2u32, addr_callee).encode().into(), call_type: Call, logs: logs.clone(), calls: vec![ CallTrace { from: addr, to: addr_callee, - input: 1u32.encode(), + input: 1u32.encode().into(), output: Default::default(), error: Some("ContractTrapped".to_string()), call_type: Call, @@ -4536,14 +4538,14 @@ fn tracing_works() { CallTrace { from: addr, to: addr, - input: (1u32, addr_callee).encode(), + input: (1u32, addr_callee).encode().into(), call_type: Call, logs: logs.clone(), calls: vec![ CallTrace { from: addr, to: addr_callee, - input: 0u32.encode(), + input: 0u32.encode().into(), output: 0u32.to_le_bytes().to_vec().into(), call_type: Call, ..Default::default() @@ -4551,8 +4553,17 @@ fn tracing_works() { CallTrace { from: addr, to: addr, - input: (0u32, addr_callee).encode(), + input: (0u32, addr_callee).encode().into(), call_type: Call, + calls: vec![ + CallTrace { + from: addr, + to: BOB_ADDR, + value: U256::from(100), + call_type: CallType::Call, + ..Default::default() + } + ], ..Default::default() }, ], diff --git a/substrate/frame/revive/src/wasm/mod.rs b/substrate/frame/revive/src/wasm/mod.rs index dc49fae26fdaa..3424473520193 100644 --- a/substrate/frame/revive/src/wasm/mod.rs +++ b/substrate/frame/revive/src/wasm/mod.rs @@ -115,6 +115,11 @@ impl Token for CodeLoadToken { } } +#[cfg(test)] +pub fn code_load_weight(code_len: u32) -> Weight { + Token::::weight(&CodeLoadToken(code_len)) +} + impl WasmBlob where BalanceOf: Into + TryFrom, @@ -307,6 +312,7 @@ impl WasmBlob { config.set_cache_enabled(false); #[cfg(feature = "std")] if std::env::var_os("REVIVE_USE_COMPILER").is_some() { + log::warn!(target: LOG_TARGET, "Using PolkaVM compiler backend because env var REVIVE_USE_COMPILER is set"); config.set_backend(Some(polkavm::BackendKind::Compiler)); } let engine = polkavm::Engine::new(&config).expect( diff --git a/substrate/frame/scheduler/src/benchmarking.rs b/substrate/frame/scheduler/src/benchmarking.rs index ff40e8ef8abfc..e7bd355f3a9bb 100644 --- a/substrate/frame/scheduler/src/benchmarking.rs +++ b/substrate/frame/scheduler/src/benchmarking.rs @@ -49,10 +49,7 @@ fn assert_last_event(generic_event: ::RuntimeEvent) { /// - `None`: aborted (hash without preimage) /// - `Some(true)`: hash resolves into call if possible, plain call otherwise /// - `Some(false)`: plain call -fn fill_schedule( - when: frame_system::pallet_prelude::BlockNumberFor, - n: u32, -) -> Result<(), &'static str> { +fn fill_schedule(when: BlockNumberFor, n: u32) -> Result<(), &'static str> { let t = DispatchTime::At(when); let origin: ::PalletsOrigin = frame_system::RawOrigin::Root.into(); for i in 0..n { diff --git a/substrate/frame/scheduler/src/lib.rs b/substrate/frame/scheduler/src/lib.rs index 468099010bf97..80ba7fd06da07 100644 --- a/substrate/frame/scheduler/src/lib.rs +++ b/substrate/frame/scheduler/src/lib.rs @@ -100,14 +100,11 @@ use frame_support::{ }, weights::{Weight, WeightMeter}, }; -use frame_system::{ - pallet_prelude::BlockNumberFor, - {self as system}, -}; +use frame_system::{self as system}; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ - traits::{BadOrigin, Dispatchable, One, Saturating, Zero}, + traits::{BadOrigin, BlockNumberProvider, Dispatchable, One, Saturating, Zero}, BoundedVec, DispatchError, RuntimeDebug, }; @@ -125,6 +122,9 @@ pub type CallOrHashOf = pub type BoundedCallOf = Bounded<::RuntimeCall, ::Hashing>; +pub type BlockNumberFor = + <::BlockNumberProvider as BlockNumberProvider>::BlockNumber; + /// The configuration of the retry mechanism for a given task along with its current state. #[derive(Clone, Copy, RuntimeDebug, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo)] pub struct RetryConfig { @@ -230,7 +230,7 @@ impl MarginalWeightInfo for T {} pub mod pallet { use super::*; use frame_support::{dispatch::PostDispatchInfo, pallet_prelude::*}; - use frame_system::pallet_prelude::*; + use frame_system::pallet_prelude::{BlockNumberFor as SystemBlockNumberFor, OriginFor}; /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); @@ -292,6 +292,35 @@ pub mod pallet { /// The preimage provider with which we look up call hashes to get the call. type Preimages: QueryPreimage + StorePreimage; + + /// Query the current block number. + /// + /// Must return monotonically increasing values when called from consecutive blocks. It is + /// generally expected that the values also do not differ "too much" between consecutive + /// blocks. A future addition to this pallet will allow bigger difference between + /// consecutive blocks to make it possible to be utilized by parachains with *Agile + /// Coretime*. *Agile Coretime* parachains are currently not supported and must continue to + /// use their local block number provider. + /// + /// Can be configured to return either: + /// - the local block number of the runtime via `frame_system::Pallet` + /// - a remote block number, eg from the relay chain through `RelaychainDataProvider` + /// - an arbitrary value through a custom implementation of the trait + /// + /// Suggested values: + /// - Solo- and Relay-chains should use `frame_system::Pallet`. There are no concerns with + /// this configuration. + /// - Parachains should also use `frame_system::Pallet` for the time being. The scheduler + /// pallet is not yet ready for the case that big numbers of blocks are skipped. In an + /// *Agile Coretime* chain with relay chain number provider configured, it could otherwise + /// happen that the scheduler will not be able to catch up to its agendas, since too many + /// relay blocks are missing if the parachain only produces blocks rarely. + /// + /// There is currently no migration provided to "hot-swap" block number providers and it is + /// therefore highly advised to stay with the default (local) values. If you still want to + /// swap block number providers on the fly, then please at least ensure that you do not run + /// any pallet migration in the same runtime upgrade. + type BlockNumberProvider: BlockNumberProvider; } #[pallet::storage] @@ -374,11 +403,12 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet { + impl Hooks> for Pallet { /// Execute the scheduled calls - fn on_initialize(now: BlockNumberFor) -> Weight { + fn on_initialize(_now: SystemBlockNumberFor) -> Weight { + let now = T::BlockNumberProvider::current_block_number(); let mut weight_counter = WeightMeter::with_limit(T::MaximumWeight::get()); - Self::service_agendas(&mut weight_counter, now, u32::max_value()); + Self::service_agendas(&mut weight_counter, now, u32::MAX); weight_counter.consumed() } } @@ -889,8 +919,7 @@ impl Pallet { fn resolve_time( when: DispatchTime>, ) -> Result, DispatchError> { - let now = frame_system::Pallet::::block_number(); - + let now = T::BlockNumberProvider::current_block_number(); let when = match when { DispatchTime::At(x) => x, // The current block has already completed it's scheduled tasks, so @@ -1165,7 +1194,7 @@ impl Pallet { let mut count_down = max; let service_agenda_base_weight = T::WeightInfo::service_agenda_base(max_items); while count_down > 0 && when <= now && weight.can_consume(service_agenda_base_weight) { - if !Self::service_agenda(weight, &mut executed, now, when, u32::max_value()) { + if !Self::service_agenda(weight, &mut executed, now, when, u32::MAX) { incomplete_since = incomplete_since.min(when); } when.saturating_inc(); diff --git a/substrate/frame/scheduler/src/migration.rs b/substrate/frame/scheduler/src/migration.rs index a304689a120cc..f3d04f215ee0d 100644 --- a/substrate/frame/scheduler/src/migration.rs +++ b/substrate/frame/scheduler/src/migration.rs @@ -19,7 +19,6 @@ use super::*; use frame_support::traits::OnRuntimeUpgrade; -use frame_system::pallet_prelude::BlockNumberFor; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; diff --git a/substrate/frame/scheduler/src/mock.rs b/substrate/frame/scheduler/src/mock.rs index 43a964bcf1497..a9aea97542acd 100644 --- a/substrate/frame/scheduler/src/mock.rs +++ b/substrate/frame/scheduler/src/mock.rs @@ -223,10 +223,11 @@ impl Config for Test { type RuntimeCall = RuntimeCall; type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EitherOfDiverse, EnsureSignedBy>; + type OriginPrivilegeCmp = EqualPrivilegeOnly; type MaxScheduledPerBlock = ConstU32<10>; type WeightInfo = TestWeightInfo; - type OriginPrivilegeCmp = EqualPrivilegeOnly; type Preimages = Preimage; + type BlockNumberProvider = frame_system::Pallet; } pub type LoggerCall = logger::Call; diff --git a/substrate/frame/scheduler/src/tests.rs b/substrate/frame/scheduler/src/tests.rs index 7552239341088..d0a3acc05ac7e 100644 --- a/substrate/frame/scheduler/src/tests.rs +++ b/substrate/frame/scheduler/src/tests.rs @@ -1636,8 +1636,9 @@ fn on_initialize_weight_is_correct() { )); // Will include the named periodic only + ::BlockNumberProvider::set_block_number(1); assert_eq!( - Scheduler::on_initialize(1), + Scheduler::on_initialize(42), // BN unused TestWeightInfo::service_agendas_base() + TestWeightInfo::service_agenda_base(1) + ::service_task(None, true, true) + @@ -1648,8 +1649,9 @@ fn on_initialize_weight_is_correct() { assert_eq!(logger::log(), vec![(root(), 2600u32)]); // Will include anon and anon periodic + ::BlockNumberProvider::set_block_number(2); assert_eq!( - Scheduler::on_initialize(2), + Scheduler::on_initialize(123), // BN unused TestWeightInfo::service_agendas_base() + TestWeightInfo::service_agenda_base(2) + ::service_task(None, false, true) + @@ -1663,8 +1665,9 @@ fn on_initialize_weight_is_correct() { assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); // Will include named only + ::BlockNumberProvider::set_block_number(3); assert_eq!( - Scheduler::on_initialize(3), + Scheduler::on_initialize(555), // BN unused TestWeightInfo::service_agendas_base() + TestWeightInfo::service_agenda_base(1) + ::service_task(None, true, false) + @@ -1678,7 +1681,8 @@ fn on_initialize_weight_is_correct() { ); // Will contain none - let actual_weight = Scheduler::on_initialize(4); + ::BlockNumberProvider::set_block_number(4); + let actual_weight = Scheduler::on_initialize(444); // BN unused assert_eq!( actual_weight, TestWeightInfo::service_agendas_base() + TestWeightInfo::service_agenda_base(0) @@ -2116,6 +2120,18 @@ fn migration_to_v4_works() { }); } +impl Into for u32 { + fn into(self) -> OriginCaller { + match self { + 3u32 => system::RawOrigin::Root.into(), + 2u32 => system::RawOrigin::None.into(), + 101u32 => system::RawOrigin::Signed(101).into(), + 102u32 => system::RawOrigin::Signed(102).into(), + _ => unreachable!("test make no use of it"), + } + } +} + #[test] fn test_migrate_origin() { new_test_ext().execute_with(|| { @@ -2151,18 +2167,6 @@ fn test_migrate_origin() { frame_support::migration::put_storage_value(b"Scheduler", b"Agenda", &k, old); } - impl Into for u32 { - fn into(self) -> OriginCaller { - match self { - 3u32 => system::RawOrigin::Root.into(), - 2u32 => system::RawOrigin::None.into(), - 101u32 => system::RawOrigin::Signed(101).into(), - 102u32 => system::RawOrigin::Signed(102).into(), - _ => unreachable!("test make no use of it"), - } - } - } - Scheduler::migrate_origin::(); assert_eq_uvec!( diff --git a/substrate/frame/support/procedural/src/benchmark.rs b/substrate/frame/support/procedural/src/benchmark.rs index 6ba7ad058297f..967eceb884ac5 100644 --- a/substrate/frame/support/procedural/src/benchmark.rs +++ b/substrate/frame/support/procedural/src/benchmark.rs @@ -30,7 +30,7 @@ use syn::{ token::{Comma, Gt, Lt, PathSep}, Attribute, Error, Expr, ExprBlock, ExprCall, ExprPath, FnArg, Item, ItemFn, ItemMod, Pat, Path, PathArguments, PathSegment, Result, ReturnType, Signature, Stmt, Token, Type, TypePath, - Visibility, WhereClause, + Visibility, WhereClause, WherePredicate, }; mod keywords { @@ -481,8 +481,25 @@ pub fn benchmarks( let module: ItemMod = syn::parse(tokens)?; let mod_span = module.span(); let where_clause = match syn::parse::(attrs.clone()) { - Ok(_) => quote!(), - Err(_) => syn::parse::(attrs)?.predicates.to_token_stream(), + Ok(_) => + if instance { + quote!(T: Config, I: 'static) + } else { + quote!(T: Config) + }, + Err(_) => { + let mut where_clause_predicates = syn::parse::(attrs)?.predicates; + + // Ensure the where clause contains the Config trait bound + if instance { + where_clause_predicates.push(syn::parse_str::("T: Config")?); + where_clause_predicates.push(syn::parse_str::("I:'static")?); + } else { + where_clause_predicates.push(syn::parse_str::("T: Config")?); + } + + where_clause_predicates.to_token_stream() + }, }; let mod_vis = module.vis; let mod_name = module.ident; @@ -568,10 +585,6 @@ pub fn benchmarks( false => quote!(T), true => quote!(T, I), }; - let type_impl_generics = match instance { - false => quote!(T: Config), - true => quote!(T: Config, I: 'static), - }; let frame_system = generate_access_from_frame_or_crate("frame-system")?; @@ -640,7 +653,7 @@ pub fn benchmarks( * } - impl<#type_impl_generics> #krate::BenchmarkingSetup<#type_use_generics> for SelectedBenchmark where #where_clause { + impl<#type_use_generics> #krate::BenchmarkingSetup<#type_use_generics> for SelectedBenchmark where #where_clause { fn components(&self) -> #krate::__private::Vec<(#krate::BenchmarkParameter, u32, u32)> { match self { #( @@ -671,8 +684,8 @@ pub fn benchmarks( } } #[cfg(any(feature = "runtime-benchmarks", test))] - impl<#type_impl_generics> #krate::Benchmarking for Pallet<#type_use_generics> - where T: #frame_system::Config, #where_clause + impl<#type_use_generics> #krate::Benchmarking for Pallet<#type_use_generics> + where T: #frame_system::Config,#where_clause { fn benchmarks( extra: bool, @@ -837,7 +850,7 @@ pub fn benchmarks( } #[cfg(test)] - impl<#type_impl_generics> Pallet<#type_use_generics> where T: #frame_system::Config, #where_clause { + impl<#type_use_generics> Pallet<#type_use_generics> where T: #frame_system::Config, #where_clause { /// Test a particular benchmark by name. /// /// This isn't called `test_benchmark_by_name` just in case some end-user eventually @@ -930,11 +943,6 @@ fn expand_benchmark( true => quote!(T, I), }; - let type_impl_generics = match is_instance { - false => quote!(T: Config), - true => quote!(T: Config, I: 'static), - }; - // used in the benchmarking impls let (pre_call, post_call, fn_call_body) = match &benchmark_def.call_def { BenchmarkCallDef::ExtrinsicCall { origin, expr_call, attr_span: _ } => { @@ -1030,13 +1038,11 @@ fn expand_benchmark( // modify signature generics, ident, and inputs, e.g: // before: `fn bench(u: Linear<1, 100>) -> Result<(), BenchmarkError>` - // after: `fn _bench , I: 'static>(u: u32, verify: bool) -> Result<(), + // after: `fn _bench (u: u32, verify: bool) where T: Config, I: 'static -> Result<(), // BenchmarkError>` let mut sig = benchmark_def.fn_sig; - sig.generics = parse_quote!(<#type_impl_generics>); - if !where_clause.is_empty() { - sig.generics.where_clause = parse_quote!(where #where_clause); - } + sig.generics = parse_quote!(<#type_use_generics>); + sig.generics.where_clause = parse_quote!(where #where_clause); sig.ident = Ident::new(format!("_{}", name.to_token_stream().to_string()).as_str(), Span::call_site()); let mut fn_param_inputs: Vec = @@ -1081,7 +1087,7 @@ fn expand_benchmark( struct #name; #[allow(unused_variables)] - impl<#type_impl_generics> #krate::BenchmarkingSetup<#type_use_generics> + impl<#type_use_generics> #krate::BenchmarkingSetup<#type_use_generics> for #name where #where_clause { fn components(&self) -> #krate::__private::Vec<(#krate::BenchmarkParameter, u32, u32)> { #krate::__private::vec! [ @@ -1123,7 +1129,7 @@ fn expand_benchmark( } #[cfg(test)] - impl<#type_impl_generics> Pallet<#type_use_generics> where T: #frame_system::Config, #where_clause { + impl<#type_use_generics> Pallet<#type_use_generics> where T: #frame_system::Config, #where_clause { #[allow(unused)] fn #test_ident() -> Result<(), #krate::BenchmarkError> { let selected_benchmark = SelectedBenchmark::#name; diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 444096bd9a5b8..2385a8f7ee4bf 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -785,7 +785,7 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied = help: the trait `Serialize` is implemented for `GenesisConfig` = note: required for `GenesisConfig` to implement `Serialize` note: required by a bound in `frame_support::sp_runtime::serde::ser::SerializeStruct::serialize_field` - --> $CARGO/serde-1.0.214/src/ser/mod.rs + --> $CARGO/serde-1.0.217/src/ser/mod.rs | | fn serialize_field(&mut self, key: &'static str, value: &T) -> Result<(), Self::Error> | --------------- required by a bound in this associated function diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index fa6b7284d889f..641cdef1cc087 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -65,7 +65,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied &mut T Arc Box - Cow<'a, T> + Cow<'_, T> Rc Vec bytes::bytes::Bytes @@ -148,7 +148,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied &mut T Arc Box - Cow<'a, T> + Cow<'_, T> Rc Vec bytes::bytes::Bytes @@ -210,7 +210,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied &mut T Arc Box - Cow<'a, T> + Cow<'_, T> Rc Vec bytes::bytes::Bytes diff --git a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index 944b194b7bcf1..a79a3509e69c8 100644 --- a/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/substrate/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -65,7 +65,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied &mut T Arc Box - Cow<'a, T> + Cow<'_, T> Rc Vec bytes::bytes::Bytes @@ -148,7 +148,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied &mut T Arc Box - Cow<'a, T> + Cow<'_, T> Rc Vec bytes::bytes::Bytes @@ -210,7 +210,7 @@ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied &mut T Arc Box - Cow<'a, T> + Cow<'_, T> Rc Vec bytes::bytes::Bytes diff --git a/templates/parachain/runtime/src/apis.rs b/templates/parachain/runtime/src/apis.rs index d7da43b86af16..53fa753946d84 100644 --- a/templates/parachain/runtime/src/apis.rs +++ b/templates/parachain/runtime/src/apis.rs @@ -252,7 +252,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; + use frame_benchmarking::BenchmarkList; use polkadot_sdk::frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use cumulus_pallet_session_benchmarking::Pallet as SessionBench; @@ -268,7 +268,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{BenchmarkError, Benchmarking, BenchmarkBatch}; + use frame_benchmarking::{BenchmarkError, BenchmarkBatch}; use super::*; use frame_system_benchmarking::Pallet as SystemBench; diff --git a/templates/solochain/runtime/src/apis.rs b/templates/solochain/runtime/src/apis.rs index 9dc588c43a2d5..536e8250056d3 100644 --- a/templates/solochain/runtime/src/apis.rs +++ b/templates/solochain/runtime/src/apis.rs @@ -226,7 +226,7 @@ impl_runtime_apis! { Vec, Vec, ) { - use frame_benchmarking::{baseline, Benchmarking, BenchmarkList}; + use frame_benchmarking::{baseline, BenchmarkList}; use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench; @@ -244,7 +244,7 @@ impl_runtime_apis! { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, alloc::string::String> { - use frame_benchmarking::{baseline, Benchmarking, BenchmarkBatch}; + use frame_benchmarking::{baseline, BenchmarkBatch}; use sp_storage::TrackedStorageKey; use frame_system_benchmarking::Pallet as SystemBench; use frame_system_benchmarking::extensions::Pallet as SystemExtensionsBench;