diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 4e90f7392c2..fcd14715e8a 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -42,27 +42,26 @@ on: type: boolean default: false - # TODO: Temporarily disabled to reduce network load, see #6894. - #push: - # # Skip main branch updates where Rust code and dependencies aren't modified. - # branches: - # - main - # paths: - # # code and tests - # - '**/*.rs' - # # hard-coded checkpoints and proptest regressions - # - '**/*.txt' - # # dependencies - # - '**/Cargo.toml' - # - '**/Cargo.lock' - # # configuration files - # - '.cargo/config.toml' - # - '**/clippy.toml' - # # workflow definitions - # - 'docker/**' - # - '.dockerignore' - # - '.github/workflows/cd-deploy-nodes-gcp.yml' - # - '.github/workflows/sub-build-docker-image.yml' + push: + # Skip main branch updates where Rust code and dependencies aren't modified. + branches: + - main + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/cd-deploy-nodes-gcp.yml' + - '.github/workflows/sub-build-docker-image.yml' # Only runs the Docker image tests, doesn't deploy any instances pull_request: @@ -176,6 +175,19 @@ jobs: test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' network: ${{ inputs.network || vars.ZCASH_NETWORK }} + # Finds a `tip` cached state disk for zebra from the main branch + # + # Passes the disk name to subsequent jobs using `cached_disk_name` output + # + get-disk-name: + name: Get disk name + uses: ./.github/workflows/sub-find-cached-disks.yml + with: + network: ${{ inputs.network || vars.ZCASH_NETWORK }} + disk_prefix: zebrad-cache + disk_suffix: tip + prefer_main_cached_state: true + # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. # @@ -196,9 +208,11 @@ jobs: matrix: network: [Mainnet, Testnet] name: Deploy ${{ matrix.network }} nodes - needs: [ build, versioning, test-configuration-file, test-zebra-conf-path ] + needs: [ build, versioning, test-configuration-file, test-zebra-conf-path, get-disk-name ] runs-on: ubuntu-latest timeout-minutes: 60 + env: + CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} permissions: contents: 'read' id-token: 'write' @@ -240,24 +254,31 @@ jobs: # but the implementation is failing as it's requiring the disk names, contrary to what is stated in the official documentation - name: Create instance template for ${{ matrix.network }} run: | + NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" + DISK_PARAMS="name=${NAME},device-name=${NAME},size=400GB,type=pd-ssd" + if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then + DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" + else + echo "No cached disk found for ${{ matrix.network }} in main branch" + exit 1 + fi gcloud compute instance-templates create-with-container zebrad-${{ needs.versioning.outputs.major_version || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK} \ - --boot-disk-size 300GB \ + --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ + --boot-disk-size 50GB \ --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ - --user-output-enabled \ - --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ + --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ + --create-disk="${DISK_PARAMS}" \ + --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=${NAME},mode=rw \ --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ --container-env "NETWORK=${{ matrix.network }},LOG_FILE=${{ vars.CD_LOG_FILE }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ - --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \ - --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \ - --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ --scopes cloud-platform \ - --labels=app=zebrad,environment=prod,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \ + --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ + --labels=app=zebrad,environment=staging,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \ --tags zebrad # Check if our destination instance group exists already @@ -297,9 +318,11 @@ jobs: # Note: this instances are not automatically replaced or deleted deploy-instance: name: Deploy single ${{ inputs.network }} instance - needs: [ build, test-configuration-file, test-zebra-conf-path ] + needs: [ build, test-configuration-file, test-zebra-conf-path, get-disk-name ] runs-on: ubuntu-latest timeout-minutes: 30 + env: + CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} permissions: contents: 'read' id-token: 'write' @@ -340,22 +363,30 @@ jobs: # Create instance template from container image - name: Manual deploy of a single ${{ inputs.network }} instance running zebrad run: | + NAME="zebrad-cache-${{ env.GITHUB_HEAD_REF_SLUG_URL || env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" + DISK_PARAMS="name=${NAME},device-name=${NAME},size=400GB,type=pd-ssd" + if [ -n "${{ env.CACHED_DISK_NAME }}" ]; then + DISK_PARAMS+=",image=${{ env.CACHED_DISK_NAME }}" + else + echo "No cached disk found for ${{ matrix.network }} in main branch" + exit 1 + fi gcloud compute instances create-with-container "zebrad-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}-${NETWORK}" \ - --boot-disk-size 300GB \ + --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ + --boot-disk-size 50GB \ --boot-disk-type=pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ - --user-output-enabled \ - --metadata google-logging-enabled=true,google-logging-use-fluentbit=true,google-monitoring-enabled=true \ + --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ + --create-disk="${DISK_PARAMS}" \ + --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=${NAME},mode=rw \ --container-stdin \ --container-tty \ --container-image ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} \ --container-env "NETWORK=${{ inputs.network }},LOG_FILE=${{ inputs.log_file }},LOG_COLOR=false,SENTRY_DSN=${{ vars.SENTRY_DSN }}" \ - --create-disk=name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},device-name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},auto-delete=yes,size=300GB,type=pd-ssd,mode=rw \ - --container-mount-disk=mount-path='/var/cache/zebrad-cache',name=zebrad-cache-${{ env.GITHUB_SHA_SHORT }}-${NETWORK},mode=rw \ - --machine-type ${{ vars.GCP_SMALL_MACHINE }} \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --service-account ${{ vars.GCP_DEPLOYMENTS_SA }} \ + --scopes cloud-platform \ + --metadata google-logging-enabled=true,google-monitoring-enabled=true \ --labels=app=zebrad,environment=qa,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }} \ --tags zebrad \ --zone ${{ vars.GCP_ZONE }} diff --git a/.github/workflows/ci-basic.yml b/.github/workflows/ci-basic.yml new file mode 100644 index 00000000000..c2cde8686bf --- /dev/null +++ b/.github/workflows/ci-basic.yml @@ -0,0 +1,35 @@ +name: Basic checks + +#on: [push, pull_request] +on: [push] + +jobs: + test: + name: Test on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-24.04] + + env: + # Use system-installed RocksDB library instead of building from scratch + ROCKSDB_LIB_DIR: /usr/lib + # Use system-installed Snappy library for compression in RocksDB + SNAPPY_LIB_DIR: /usr/lib/x86_64-linux-gnu + # Enable the `nu6` feature in `zcash_protocol` + RUSTFLAGS: '--cfg zcash_unstable="nu6"' + RUSTDOCFLAGS: '--cfg zcash_unstable="nu6"' + + steps: + - uses: actions/checkout@v4 + - name: Install dependencies on Ubuntu + #run: sudo apt-get update && sudo apt-get install -y protobuf-compiler build-essential librocksdb-dev + run: sudo apt-get update && sudo apt-get install -y protobuf-compiler librocksdb-dev + - name: Run tests + run: cargo test --verbose + - name: Verify working directory is clean + run: git diff --exit-code + - name: Run doc check + run: cargo doc --all-features --document-private-items + - name: Run format check + run: cargo fmt -- --check diff --git a/.github/workflows/ci-lint.yml b/.github/workflows/ci-lint.yml index 3137dee49db..a2ea13523b8 100644 --- a/.github/workflows/ci-lint.yml +++ b/.github/workflows/ci-lint.yml @@ -44,7 +44,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v45.0.0 + uses: tj-actions/changed-files@v45.0.2 with: files: | **/*.rs @@ -56,7 +56,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v45.0.0 + uses: tj-actions/changed-files@v45.0.2 with: files: | .github/workflows/*.yml diff --git a/.github/workflows/scripts/gcp-get-available-disks.sh b/.github/workflows/scripts/gcp-get-available-disks.sh deleted file mode 100755 index 667c6f36c4b..00000000000 --- a/.github/workflows/scripts/gcp-get-available-disks.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -# Description: -# Check if there are cached state disks available for subsequent jobs to use. -# -# This lookup uses the state version from constants.rs. -# It accepts disks generated by any branch, including draft and unmerged PRs. -# -# If the disk exists, sets the corresponding output to "true": -# - lwd_tip_disk -# - zebra_tip_disk -# - zebra_checkpoint_disk - -set -euxo pipefail - - -LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) -echo "STATE_VERSION: ${LOCAL_STATE_VERSION}" - -# Function to find a disk image and output its name -find_disk_image() { -local base_name="${1}" -local disk_type="${2}" -local disk_pattern="${base_name}-cache" -local output_var="${base_name}_${disk_type}_disk" -local disk_image - -disk_image=$(gcloud compute images list --filter="status=READY AND name~${disk_pattern}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${disk_type}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - -if [[ -z "${disk_image}" ]]; then - echo "No ${disk_type^^} disk found for ${base_name^^} on network: ${NETWORK}" - echo "${output_var}=false" >> "${GITHUB_OUTPUT}" -else - echo "Disk: ${disk_image}" - echo "${output_var}=true" >> "${GITHUB_OUTPUT}" -fi -} - -# Find and output LWD and Zebra disks -find_disk_image "lwd" "tip" -find_disk_image "zebrad" "tip" -find_disk_image "zebrad" "checkpoint" diff --git a/.github/workflows/scripts/gcp-get-cached-disks.sh b/.github/workflows/scripts/gcp-get-cached-disks.sh index 9b05c257096..0f38addf10f 100755 --- a/.github/workflows/scripts/gcp-get-cached-disks.sh +++ b/.github/workflows/scripts/gcp-get-cached-disks.sh @@ -1,20 +1,33 @@ #!/usr/bin/env bash -# Description: # This script finds a cached Google Cloud Compute image based on specific criteria. -# It prioritizes images from the current commit, falls back to the main branch, -# and finally checks other branches if needed. The selected image is used for -# setting up the environment in a CI/CD pipeline. +# +# If there are multiple disks: +# - prefer images generated from the same commit, then +# - if prefer_main_cached_state is true, prefer images from the `main` branch, then +# - use any images from any other branch or commit. +# +# Within each of these categories: +# - prefer newer images to older images +# +# The selected image is used for setting up the environment in a CI/CD pipeline. +# It also checks if specific disk types are available for subsequent jobs. set -eo pipefail -# Function to find and report a cached disk image +# Extract local state version +echo "Extracting local state version..." +LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) +echo "STATE_VERSION: ${LOCAL_STATE_VERSION}" + +# Function to find a cached disk image based on the git pattern (commit, main, or any branch) find_cached_disk_image() { - local search_pattern="${1}" + local git_pattern="${1}" local git_source="${2}" local disk_name + local disk_search_pattern="${DISK_PREFIX}-${git_pattern}-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" - disk_name=$(gcloud compute images list --filter="status=READY AND name~${search_pattern}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + disk_name=$(gcloud compute images list --filter="status=READY AND name~${disk_search_pattern}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) # Use >&2 to redirect to stderr and avoid sending wrong assignments to stdout if [[ -n "${disk_name}" ]]; then @@ -27,46 +40,71 @@ find_cached_disk_image() { fi } -# Extract local state version -echo "Extracting local state version..." -LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "${GITHUB_WORKSPACE}/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) -echo "STATE_VERSION: ${LOCAL_STATE_VERSION}" +# Check if both $DISK_PREFIX and $DISK_SUFFIX are set, as they are required to find a cached disk image +if [[ -n "${DISK_PREFIX}" && -n "${DISK_SUFFIX}" ]]; then + # Find the most suitable cached disk image + echo "Finding the most suitable cached disk image..." + CACHED_DISK_NAME="" + + # First, try to find a cached disk image from the current commit + CACHED_DISK_NAME=$(find_cached_disk_image ".+-${GITHUB_SHA_SHORT}" "commit") -# Define DISK_PREFIX based on the requiring state directory -if [[ "${NEEDS_LWD_STATE}" == "true" ]]; then - DISK_PREFIX="${LWD_STATE_DIR}" + # If no cached disk image is found + if [[ -z "${CACHED_DISK_NAME}" ]]; then + # Check if main branch images are preferred + if [[ "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then + CACHED_DISK_NAME=$(find_cached_disk_image "main-[0-9a-f]+" "main branch") + # Else, try to find one from any branch + else + CACHED_DISK_NAME=$(find_cached_disk_image ".+-[0-9a-f]+" "any branch") + fi + fi + + # Handle case where no suitable disk image is found + if [[ -z "${CACHED_DISK_NAME}" ]]; then + echo "No suitable cached state disk available." + echo "Cached state test jobs must depend on the cached state rebuild job." + exit 1 + fi + + echo "Selected Disk: ${CACHED_DISK_NAME}" else - DISK_PREFIX="${ZEBRA_STATE_DIR:-${DISK_PREFIX}}" + echo "DISK_PREFIX or DISK_SUFFIX is not set. Skipping disk image search." fi -# Find the most suitable cached disk image -echo "Finding the most suitable cached disk image..." -if [[ -z "${CACHED_DISK_NAME}" ]]; then - # Try to find a cached disk image from the current commit - COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${GITHUB_SHA_SHORT}-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" - CACHED_DISK_NAME=$(find_cached_disk_image "${COMMIT_DISK_PREFIX}" "commit") - # If no cached disk image is found, try to find one from the main branch - if [[ "${PREFER_MAIN_CACHED_STATE}" == "true" ]]; then - MAIN_DISK_PREFIX="${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" - CACHED_DISK_NAME=$(find_cached_disk_image "${MAIN_DISK_PREFIX}" "main branch") - # Else, try to find one from any branch +# Function to find and output available disk image types (e.g., lwd_tip_disk, zebra_tip_disk, zebra_checkpoint_disk) +find_available_disk_type() { + local base_name="${1}" + local disk_type="${2}" + local disk_pattern="${base_name}-cache" + local output_var="${base_name}_${disk_type}_disk" + local disk_name + + disk_name=$(gcloud compute images list --filter="status=READY AND name~${disk_pattern}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${disk_type}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + + # Use >&2 to redirect to stderr and avoid sending wrong assignments to stdout + if [[ -n "${disk_name}" ]]; then + echo "Found ${disk_type^^} disk: ${disk_name} for ${base_name^^} on network: ${NETWORK}" >&2 + disk_description=$(gcloud compute images describe "${disk_name}" --format="value(DESCRIPTION)") + echo "Description: ${disk_description}" >&2 + echo "true" # This is the actual return value when a disk is found else - ANY_DISK_PREFIX="${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${DISK_SUFFIX}" - CACHED_DISK_NAME=$(find_cached_disk_image "${ANY_DISK_PREFIX}" "any branch") + echo "No ${disk_type^^} disk found for ${base_name^^} on network: ${NETWORK}" >&2 + echo "false" # This is the actual return value when no disk is found fi +} +if [[ -n "${NETWORK}" ]]; then + # Check for specific disk images (lwd_tip_disk, zebra_tip_disk, zebra_checkpoint_disk) + echo "Checking for specific disk images..." + LWD_TIP_DISK=$(find_available_disk_type "lwd" "tip") + ZEBRA_TIP_DISK=$(find_available_disk_type "zebrad" "tip") + ZEBRA_CHECKPOINT_DISK=$(find_available_disk_type "zebrad" "checkpoint") fi -# Handle case where no suitable disk image is found -if [[ -z "${CACHED_DISK_NAME}" ]]; then - echo "No suitable cached state disk available." - echo "Expected pattern: ${COMMIT_DISK_PREFIX}" - echo "Cached state test jobs must depend on the cached state rebuild job." - exit 1 -fi - -echo "Selected Disk: ${CACHED_DISK_NAME}" - # Exporting variables for subsequent steps echo "Exporting variables for subsequent steps..." export CACHED_DISK_NAME="${CACHED_DISK_NAME}" export LOCAL_STATE_VERSION="${LOCAL_STATE_VERSION}" +export LWD_TIP_DISK="${LWD_TIP_DISK}" +export ZEBRA_TIP_DISK="${ZEBRA_TIP_DISK}" +export ZEBRA_CHECKPOINT_DISK="${ZEBRA_CHECKPOINT_DISK}" diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 83af03ad6e1..946241a4bae 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -4,6 +4,7 @@ # - Uses Docker Buildx for improved build performance and caching. # - Builds the Docker image and pushes it to both Google Artifact Registry and potentially DockerHub, depending on release type. # - Manages caching strategies to optimize build times across different branches. +# - Uses Docker Scout to display vulnerabilities and recommendations for the latest built image. name: Build docker image on: @@ -75,6 +76,7 @@ jobs: permissions: contents: 'read' id-token: 'write' + pull-requests: write # for `docker-scout` to be able to write the comment env: DOCKER_BUILD_SUMMARY: ${{ vars.DOCKER_BUILD_SUMMARY }} steps: @@ -150,7 +152,7 @@ jobs: # Setup Docker Buildx to use Docker Build Cloud - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v3.6.1 with: version: "lab:latest" driver: cloud @@ -179,3 +181,32 @@ jobs: # Don't read from the cache if the caller disabled it. # https://docs.docker.com/engine/reference/commandline/buildx_build/#options no-cache: ${{ inputs.no_cache }} + + # For the latest built image, display: + # - the vulnerabilities (ignoring the base image, and only displaying vulnerabilities with a critical or high security severity) + # - the available recommendations + # - compare it to the latest image indexed in Docker Hub (only displaying changed packages and vulnerabilities that already have a fix) + # + # Record the image to Scout environment based on the event type, for example: + # - `prod` for a release event + # - `stage` for a push event to the main branch + # - `dev` for a pull request event + - name: Docker Scout + id: docker-scout + uses: docker/scout-action@v1.14.0 + # We only run Docker Scout on the `runtime` target, as the other targets are not meant to be released + # and are commonly used for testing, and thus are ephemeral. + # TODO: Remove the `contains` check once we have a better way to determine if just new vulnerabilities are present. + # See: https://github.com/docker/scout-action/issues/56 + if: ${{ inputs.dockerfile_target == 'runtime' && contains(github.event.pull_request.title, 'Release v') }} + with: + command: cves,recommendations,compare,environment + image: us-docker.pkg.dev/${{ vars.GCP_PROJECT }}/zebra/${{ inputs.image_name }}:${{ steps.meta.outputs.version }} + to: zfnd/zebra:latest + ignore-base: true + ignore-unchanged: true + only-fixed: true + only-severities: critical,high + environment: ${{ (github.event_name == 'release' && !github.event.release.prerelease && 'prod') || (github.event_name == 'push' && github.ref_name == 'main' && 'stage') || (github.event_name == 'pull_request' && 'dev') }} + organization: zfnd + github-token: ${{ secrets.GITHUB_TOKEN }} # to be able to write the comment diff --git a/.github/workflows/sub-ci-integration-tests-gcp.yml b/.github/workflows/sub-ci-integration-tests-gcp.yml index 76cb168feb9..3ff5ab1e79a 100644 --- a/.github/workflows/sub-ci-integration-tests-gcp.yml +++ b/.github/workflows/sub-ci-integration-tests-gcp.yml @@ -31,6 +31,10 @@ on: #! #! The job names in `ci-integration-tests-gcp.yml`, `ci-integration-tests-gcp.patch.yml` and #! `ci-integration-tests-gcp.patch-external.yml` must be kept in sync. +#! +#! The test variables ZEBRA_CACHED_STATE_DIR and LIGHTWALLETD_DATA_DIR used in some steps are set in the +#! `sub-deploy-integration-tests-gcp.yml` workflow file as inputs. If modified in this file, they must +#! also be updated in the `sub-deploy-integration-tests-gcp.yml` file. jobs: # to also run a job on Mergify head branches, # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: @@ -79,7 +83,7 @@ jobs: app_name: zebrad test_id: sync-to-checkpoint test_description: Test sync up to mandatory checkpoint - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_DISK_REBUILD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} @@ -108,7 +112,7 @@ jobs: app_name: zebrad test_id: sync-past-checkpoint test_description: Test full validation sync from a cached state - test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1" + test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_CHECKPOINT_SYNC=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true saves_to_disk: false disk_suffix: checkpoint @@ -138,13 +142,12 @@ jobs: test_description: Test a full sync up to the tip # The value of FULL_SYNC_MAINNET_TIMEOUT_MINUTES is currently ignored. # TODO: update the test to use {{ input.network }} instead? - test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1" + test_variables: "-e NETWORK=Mainnet -e FULL_SYNC_MAINNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" # This test runs for longer than 6 hours, so it needs multiple jobs is_long_test: true needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - disk_suffix: tip height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit # We want to prevent multiple full zebrad syncs running at the same time, @@ -184,9 +187,6 @@ jobs: # update the disk on every PR, to increase CI speed saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -217,9 +217,6 @@ jobs: needs_zebra_state: true # test-update-sync updates the disk on every PR, so we don't need to do it here saves_to_disk: false - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit @@ -248,7 +245,7 @@ jobs: test_id: full-sync-testnet test_description: Test a full sync up to the tip on testnet # The value of FULL_SYNC_TESTNET_TIMEOUT_MINUTES is currently ignored. - test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1" + test_variables: "-e NETWORK=Testnet -e FULL_SYNC_TESTNET_TIMEOUT_MINUTES=0 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" network: "Testnet" # A full testnet sync could take 2-10 hours in April 2023. # The time varies a lot due to the small number of nodes. @@ -256,7 +253,6 @@ jobs: needs_zebra_state: false saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - disk_suffix: tip height_grep_text: 'current_height.*=.*Height.*\(' secrets: inherit # We want to prevent multiple full zebrad syncs running at the same time, @@ -300,9 +296,6 @@ jobs: # we don't have a test-update-sync-testnet job, so we need to update the disk here saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" height_grep_text: 'zebra_tip_height.*=.*Height.*\(' secrets: inherit @@ -335,10 +328,6 @@ jobs: saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" - lwd_state_dir: "lwd-cache" height_grep_text: "Waiting for block: " secrets: inherit # We want to prevent multiple lightwalletd full syncs running at the same time, @@ -372,10 +361,6 @@ jobs: saves_to_disk: true force_save_to_disk: ${{ inputs.force_save_to_disk || false }} disk_prefix: lwd-cache - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" - lwd_state_dir: "lwd-cache" height_grep_text: "Waiting for block: " secrets: inherit @@ -401,9 +386,6 @@ jobs: test_variables: "-e NETWORK=${{ inputs.network || vars.ZCASH_NETWORK }} -e TEST_LWD_RPC_CALL=1 -e ZEBRA_TEST_LIGHTWALLETD=1 -e ZEBRA_FORCE_USE_COLOR=1 -e ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache" needs_zebra_state: true saves_to_disk: false - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" secrets: inherit # Test that Zebra can handle a lightwalletd send transaction RPC call, using a cached Zebra tip state @@ -427,10 +409,6 @@ jobs: needs_zebra_state: true needs_lwd_state: true saves_to_disk: false - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" - lwd_state_dir: "lwd-cache" secrets: inherit # Test that Zebra can handle gRPC wallet calls, using a cached Zebra tip state @@ -454,10 +432,6 @@ jobs: needs_zebra_state: true needs_lwd_state: true saves_to_disk: false - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" - lwd_state_dir: "lwd-cache" secrets: inherit ## getblocktemplate-rpcs using cached Zebra state on mainnet @@ -485,9 +459,6 @@ jobs: needs_zebra_state: true needs_lwd_state: false saves_to_disk: false - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" secrets: inherit # Test that Zebra can handle a submit block RPC call, using a cached Zebra tip state @@ -511,9 +482,6 @@ jobs: needs_zebra_state: true needs_lwd_state: false saves_to_disk: false - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" secrets: inherit # Test that the scanner can continue scanning where it was left when zebrad restarts. @@ -537,9 +505,6 @@ jobs: needs_zebra_state: true needs_lwd_state: false saves_to_disk: true - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" secrets: inherit # Test that the scan task registers keys, deletes keys, and subscribes to results for keys while running. @@ -563,9 +528,6 @@ jobs: needs_zebra_state: true needs_lwd_state: false saves_to_disk: false - disk_suffix: tip - root_state_path: "/var/cache" - zebra_state_dir: "zebrad-cache" secrets: inherit failure-issue: diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 09af3dd310c..4d1d346ff1d 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -35,23 +35,15 @@ on: # Cached state # - # TODO: find a better name - root_state_path: - required: false - type: string - default: '/zebrad-cache' - description: 'Cached state base directory path' - # TODO: find a better name zebra_state_dir: required: false type: string - default: '' + default: '/var/cache/zebrad-cache' description: 'Zebra cached state directory and input image prefix to search in GCP' - # TODO: find a better name lwd_state_dir: required: false type: string - default: '' + default: '/var/cache/lwd-cache' description: 'Lightwalletd cached state directory and input image prefix to search in GCP' disk_prefix: required: false @@ -61,6 +53,7 @@ on: disk_suffix: required: false type: string + default: 'tip' description: 'Image name suffix' needs_zebra_state: required: true @@ -104,6 +97,29 @@ env: CACHED_STATE_UPDATE_LIMIT: 576 jobs: + # Find a cached state disk for ${{ inputs.test_id }}, matching all of: + # - disk cached state prefix -> zebrad-cache or lwd-cache + # - state version (from the source code) - v{N} + # - network (network) - mainnet or testnet + # - disk target height kind (disk_suffix) - checkpoint or tip + # + # If the test needs a lightwalletd state (needs_lwd_state) set the input disk_prefix accordingly + # - To lwd-cache if needed + # - To zebrad-cache if not + # + # Passes the disk name to subsequent jobs using `cached_disk_name` output + # Passes the state version to subsequent jobs using `state_version` output + # + get-disk-name: + name: Get disk name + uses: ./.github/workflows/sub-find-cached-disks.yml + with: + network: ${{ inputs.network || vars.ZCASH_NETWORK }} + disk_prefix: ${{ inputs.needs_lwd_state && 'lwd-cache' || inputs.needs_zebra_state && 'zebrad-cache' }} + disk_suffix: ${{ inputs.disk_suffix }} + prefer_main_cached_state: ${{ inputs.prefer_main_cached_state }} + test_id: ${{ inputs.test_id }} + # Show all the test logs, then follow the logs of the test we just launched, until it finishes. # Then check the result of the test. # @@ -111,9 +127,14 @@ jobs: test-result: name: Run ${{ inputs.test_id }} test runs-on: zfnd-runners + needs: [ get-disk-name ] + if: ${{ !cancelled() && !failure() }} timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }} outputs: - cached_disk_name: ${{ steps.get-disk-name.outputs.cached_disk_name }} + cached_disk_name: ${{ needs.get-disk-name.outputs.cached_disk_name }} + state_version: ${{ needs.get-disk-name.outputs.state_version }} + env: + CACHED_DISK_NAME: ${{ needs.get-disk-name.outputs.cached_disk_name }} permissions: contents: 'read' id-token: 'write' @@ -158,47 +179,8 @@ jobs: - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v2.1.1 - # Find a cached state disk for this job, matching all of: - # - disk cached state (lwd_state_dir/zebra_state_dir or disk_prefix) - zebrad-cache or lwd-cache - # - state version (from the source code) - v{N} - # - network (network) - mainnet or testnet - # - disk target height kind (disk_suffix) - checkpoint or tip - # - # If the test needs a lightwalletd state (needs_lwd_state) set the variable DISK_PREFIX accordingly - # - To ${{ inputs.lwd_state_dir }}" if needed - # - To ${{ inputs.zebra_state_dir || inputs.disk_prefix }} if not - # - # If there are multiple disks: - # - prefer images generated from the same commit, then - # - if prefer_main_cached_state is true, prefer images from the `main` branch, then - # - use any images from any other branch or commit. - # Within each of these categories: - # - prefer newer images to older images - # - # Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable - # Passes the state version to subsequent steps using $STATE_VERSION env variable - # - # TODO: move this script into a file, and call it from sub-find-cached-disks.yml as well. - - name: Find ${{ inputs.test_id }} cached state disk - id: get-disk-name - if: ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }} - env: - GITHUB_SHA_SHORT: ${{ env.GITHUB_SHA_SHORT }} - NEEDS_LWD_STATE: ${{ inputs.needs_lwd_state }} - LWD_STATE_DIR: ${{ inputs.lwd_state_dir }} - ZEBRA_STATE_DIR: ${{ inputs.zebra_state_dir }} - DISK_PREFIX: ${{ inputs.disk_prefix }} - NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input - DISK_SUFFIX: ${{ inputs.disk_suffix }} - PREFER_MAIN_CACHED_STATE: ${{ inputs.prefer_main_cached_state }} - run: | - source ./.github/workflows/scripts/gcp-get-cached-disks.sh - echo "STATE_VERSION=${LOCAL_STATE_VERSION}" >> "${GITHUB_ENV}" - echo "CACHED_DISK_NAME=${CACHED_DISK_NAME}" >> "${GITHUB_ENV}" - echo "cached_disk_name=${CACHED_DISK_NAME}" >> "${GITHUB_OUTPUT}" - # Create a Compute Engine virtual machine and attach a cached state disk using the - # $CACHED_DISK_NAME variable as the source image to populate the disk cached state + # $CACHED_DISK_NAME env as the source image to populate the disk cached state # if the test needs it. - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance @@ -256,8 +238,7 @@ jobs: # # The disk mounted in the VM is located at /dev/$DISK_NAME, we mount the root `/` of this disk to the docker # container, and might have two different paths (if lightwalletd state is needed): - # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR - # - /var/cache/lwd-cache -> ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} -> $LIGHTWALLETD_DATA_DIR + # - ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} # # Currently we do this by mounting the same disk at both paths. # @@ -268,7 +249,7 @@ jobs: # These paths must match the variables used by the tests in Rust, which are also set in # `ci-unit-tests-docker.yml` to be able to run this tests. # - # Although we're mounting the disk root to both directories, Zebra and Lightwalletd + # Although we're mounting the disk root to both directories, Zebra and Lightwalletd, tests # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, # the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} # are only used to match those variables paths. @@ -286,12 +267,12 @@ jobs: # Extract the correct disk name based on the device-name DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-) - MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" + MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.zebra_state_dir }}" # Check if we need to mount for Lightwalletd state # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then - MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}" + MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.lwd_state_dir }}" fi sudo docker run \ @@ -401,6 +382,9 @@ jobs: # Normally, if a job is skipped, all the jobs that depend on it are also skipped. # So we need to override the default success() check to make this job run. if: ${{ !cancelled() && !failure() && (inputs.saves_to_disk || inputs.force_save_to_disk) }} + env: + STATE_VERSION: ${{ needs.test-result.outputs.state_version }} + CACHED_DISK_NAME: ${{ needs.test-result.outputs.cached_disk_name }} permissions: contents: 'read' id-token: 'write' @@ -457,17 +441,6 @@ jobs: - name: Set up Cloud SDK uses: google-github-actions/setup-gcloud@v2.1.1 - # Get the state version from the local constants.rs file to be used in the image creation, - # as the state version is part of the disk image name. - # - # Passes the state version to subsequent steps using $STATE_VERSION env variable - - name: Get state version from constants.rs - run: | - LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" $GITHUB_WORKSPACE/zebra-state/src/constants.rs | grep -oE "[0-9]+" | tail -n1) - echo "STATE_VERSION: $LOCAL_STATE_VERSION" - - echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" - # Sets the $UPDATE_SUFFIX env var to "-u" if updating a previous cached state, # and the empty string otherwise. # @@ -641,7 +614,7 @@ jobs: - name: Get original cached state height from google cloud run: | ORIGINAL_HEIGHT="0" - ORIGINAL_DISK_NAME="${{ format('{0}', needs.test-result.outputs.cached_disk_name) }}" + ORIGINAL_DISK_NAME="${{ format('{0}', env.CACHED_DISK_NAME) }}" if [[ -n "$ORIGINAL_DISK_NAME" ]]; then ORIGINAL_HEIGHT=$(gcloud compute images list --filter="status=READY AND name=$ORIGINAL_DISK_NAME" --format="value(labels.height)") diff --git a/.github/workflows/sub-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml index 79fdbff8efb..00254c14be5 100644 --- a/.github/workflows/sub-find-cached-disks.yml +++ b/.github/workflows/sub-find-cached-disks.yml @@ -14,22 +14,43 @@ on: description: 'The Zcash network used to look up the disks' required: true type: string + disk_prefix: + required: false + type: string + disk_suffix: + required: false + type: string + prefer_main_cached_state: + required: false + type: boolean + test_id: + description: 'The test ID requiring the cached state disks' + required: false + type: string outputs: + state_version: + description: 'The version of the cached state disks' + value: ${{ jobs.get-cached-disks.outputs.state_version }} + cached_disk_name: + description: 'The name of the cached state disk' + value: ${{ jobs.get-cached-disks.outputs.cached_disk_name }} lwd_tip_disk: description: 'true if there is a lightwalletd and Zebra cached state disk, synced near the chain tip' - value: ${{ jobs.get-available-disks.outputs.lwd_tip_disk }} + value: ${{ jobs.get-cached-disks.outputs.lwd_tip_disk }} zebra_tip_disk: description: 'true if there is a Zebra cached state disk synced near the chain tip' - value: ${{ jobs.get-available-disks.outputs.zebra_tip_disk }} + value: ${{ jobs.get-cached-disks.outputs.zebra_tip_disk }} zebra_checkpoint_disk: description: 'true if there is a Zebra cached state disk synced to the mandatory Zebra checkpoint' - value: ${{ jobs.get-available-disks.outputs.zebra_checkpoint_disk }} + value: ${{ jobs.get-cached-disks.outputs.zebra_checkpoint_disk }} jobs: - get-available-disks: - name: Check if cached state disks exist + get-cached-disks: + name: Get ${{ inputs.test_id || inputs.network }} cached disk runs-on: ubuntu-latest outputs: + state_version: ${{ steps.get-available-disks.outputs.state_version }} + cached_disk_name: ${{ steps.get-available-disks.outputs.cached_disk_name }} lwd_tip_disk: ${{ steps.get-available-disks.outputs.lwd_tip_disk }} zebra_tip_disk: ${{ steps.get-available-disks.outputs.zebra_tip_disk }} zebra_checkpoint_disk: ${{ steps.get-available-disks.outputs.zebra_checkpoint_disk }} @@ -63,38 +84,18 @@ jobs: echo "NETWORK=${NETWORK_CAPS,,}" >> $GITHUB_ENV # Check if there are cached state disks available for subsequent jobs to use. - - name: Check if cached state disks exist + - name: Check if cached state disks exists id: get-available-disks env: - GITHUB_WORKSPACE: ${{ env.GITHUB_WORKSPACE }} + GITHUB_SHA_SHORT: ${{ env.GITHUB_SHA_SHORT }} NETWORK: ${{ env.NETWORK }} # use lowercase version from env, not input - # TODO: Use the `gcp-get-available-disks.sh` script instead of the inline script, - # as this is crashing. And it might related to the returned JSON values. + DISK_PREFIX: ${{ inputs.disk_prefix }} + DISK_SUFFIX: ${{ inputs.disk_suffix }} + PREFER_MAIN_CACHED_STATE: ${{ inputs.prefer_main_cached_state }} run: | - # ./.github/workflows/scripts/gcp-get-available-disks.sh - LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) - echo "STATE_VERSION: $LOCAL_STATE_VERSION" - LWD_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~lwd-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - if [[ -z "$LWD_TIP_DISK" ]]; then - echo "No TIP disk found for lightwalletd on network: ${NETWORK}" - echo "lwd_tip_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT" - else - echo "Disk: $LWD_TIP_DISK" - echo "lwd_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT" - fi - ZEBRA_TIP_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-tip" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - if [[ -z "$ZEBRA_TIP_DISK" ]]; then - echo "No TIP disk found for Zebra on network: ${NETWORK}" - echo "zebra_tip_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT" - else - echo "Disk: $ZEBRA_TIP_DISK" - echo "zebra_tip_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT" - fi - ZEBRA_CHECKPOINT_DISK=$(gcloud compute images list --filter="status=READY AND name~zebrad-cache-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-checkpoint" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - if [[ -z "$ZEBRA_CHECKPOINT_DISK" ]]; then - echo "No CHECKPOINT disk found for Zebra on network: ${NETWORK}" - echo "zebra_checkpoint_disk=${{ toJSON(false) }}" >> "$GITHUB_OUTPUT" - else - echo "Disk: $ZEBRA_CHECKPOINT_DISK" - echo "zebra_checkpoint_disk=${{ toJSON(true) }}" >> "$GITHUB_OUTPUT" - fi + source ./.github/workflows/scripts/gcp-get-cached-disks.sh + echo "state_version=${LOCAL_STATE_VERSION}" >> "${GITHUB_OUTPUT}" + echo "cached_disk_name=${CACHED_DISK_NAME}" >> "${GITHUB_OUTPUT}" + echo "lwd_tip_disk=${LWD_TIP_DISK}" >> "${GITHUB_OUTPUT}" + echo "zebra_tip_disk=${ZEBRA_TIP_DISK}" >> "${GITHUB_OUTPUT}" + echo "zebra_checkpoint_disk=${ZEBRA_CHECKPOINT_DISK}" >> "${GITHUB_OUTPUT}" diff --git a/.gitignore b/.gitignore index b4af6c0b81e..8bccc34ffc0 100644 --- a/.gitignore +++ b/.gitignore @@ -158,3 +158,9 @@ $RECYCLE.BIN/ # Windows shortcuts *.lnk + +# Python pycache +__pycache__/ + +# RPC tests cache +zebra-rpc/qa/cache/ diff --git a/Cargo.lock b/Cargo.lock index 76374083800..44876bcdb09 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -137,9 +137,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -152,33 +152,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -198,9 +198,9 @@ checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" [[package]] name = "arrayvec" @@ -210,9 +210,9 @@ checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" [[package]] name = "async-compression" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +checksum = "fec134f64e2bc57411226dfc4e52dec859ddfc7e711fc5e07b612584f000e4aa" dependencies = [ "flate2", "futures-core", @@ -245,9 +245,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", @@ -288,7 +288,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "itoa", "matchit", @@ -314,7 +314,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", @@ -339,6 +339,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.11.0" @@ -429,18 +435,16 @@ dependencies = [ ] [[package]] -name = "bip32" -version = "0.5.2" +name = "bip0039" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa13fae8b6255872fd86f7faf4b41168661d7d78609f7bfe6771b85c6739a15b" +checksum = "bef0f0152ec5cf17f49a5866afaa3439816207fd4f0a224c0211ffaf5e278426" dependencies = [ - "bs58", "hmac", - "rand_core 0.6.4", - "ripemd", - "secp256k1 0.27.0", + "pbkdf2", + "rand 0.8.5", "sha2", - "subtle", + "unicode-normalization", "zeroize", ] @@ -539,9 +543,9 @@ dependencies = [ [[package]] name = "bridgetree" -version = "0.5.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f62227647af796dd9f1637da0392676a2e200973b817b082fc9be89bf93ddd74" +checksum = "fbfcb6c5a091e80cb3d3b0c1a7f126af4631cd5065b1f9929b139f1be8f3fb62" dependencies = [ "incrementalmerkletree", ] @@ -558,9 +562,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" +checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c" dependencies = [ "memchr", "serde", @@ -580,9 +584,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.16.1" +version = "1.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" +checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" [[package]] name = "byteorder" @@ -662,13 +666,12 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.100" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c891175c3fb232128f48de6590095e59198bbeb8620c310be349bfc3afd12c7b" +checksum = "26a5c3fd7bfa1ce3897a3a3501d362b2d87b7f2583ebcb4a949ec25911025cbc" dependencies = [ "jobserver", "libc", - "once_cell", ] [[package]] @@ -692,6 +695,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -726,7 +735,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -829,9 +838,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "color-eyre" @@ -863,9 +872,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "console" @@ -887,8 +896,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86ed14aa9c9f927213c6e4f3ef75faaad3406134efe84ba2cb7983431d5f0931" dependencies = [ "futures-core", - "prost", - "prost-types", + "prost 0.13.1", + "prost-types 0.13.1", "tonic", "tracing-core", ] @@ -906,8 +915,8 @@ dependencies = [ "hdrhistogram", "humantime", "hyper-util", - "prost", - "prost-types", + "prost 0.13.1", + "prost-types 0.13.1", "serde", "serde_json", "thread_local", @@ -1041,6 +1050,18 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core 0.6.4", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -1091,12 +1112,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core 0.20.9", - "darling_macro 0.20.9", + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -1115,9 +1136,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", @@ -1140,11 +1161,11 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.9" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core 0.20.9", + "darling_core 0.20.10", "quote", "syn 2.0.72", ] @@ -1186,6 +1207,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", + "const-oid", "crypto-common", "subtle", ] @@ -1213,9 +1235,9 @@ dependencies = [ [[package]] name = "document-features" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5282ad69563b5fc40319526ba27e0e7363d552a896f0297d54f767717f9b95" +checksum = "cb6969eaabd2421f8a2775cfd2471a2b634372b4a25d41e3bd647b79912850a0" dependencies = [ "litrs", ] @@ -1226,6 +1248,20 @@ version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + [[package]] name = "ed25519" version = "2.2.3" @@ -1255,9 +1291,9 @@ dependencies = [ [[package]] name = "either" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "elasticsearch" @@ -1279,6 +1315,25 @@ dependencies = [ "void", ] +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array", + "group", + "pkcs8", + "rand_core 0.6.4", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encode_unicode" version = "0.3.6" @@ -1317,7 +1372,7 @@ dependencies = [ [[package]] name = "equihash" version = "0.2.0" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "blake2b_simd", "byteorder", @@ -1352,16 +1407,7 @@ dependencies = [ [[package]] name = "f4jumble" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a83e8d7fd0c526af4aad893b7c9fe41e2699ed8a776a6c74aecdeafe05afc75" -dependencies = [ - "blake2b_simd", -] - -[[package]] -name = "f4jumble" -version = "0.1.0" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "blake2b_simd", ] @@ -1409,9 +1455,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "7f211bbe8e69bbd0cfdea405084f128ae8b4aaa6b0b522fc8f2b009084797920" dependencies = [ "crc32fast", "miniz_oxide", @@ -1427,7 +1473,7 @@ dependencies = [ "futures-sink", "nanorand", "pin-project", - "spin", + "spin 0.9.8", ] [[package]] @@ -1571,6 +1617,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -1687,19 +1734,17 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "02b4af3693f1b705df946e9fe5631932443781d0aabb423b62fcd4d73f6d2fd0" dependencies = [ - "cfg-if 1.0.0", "crunchy", ] [[package]] name = "halo2_gadgets" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126a150072b0c38c7b573fe3eaf0af944a7fed09e154071bf2436d3f016f7230" +source = "git+https://github.com/QED-it/halo2?rev=7f5c0babd61f8ca46c9165a1adfac298d3fd3a11#7f5c0babd61f8ca46c9165a1adfac298d3fd3a11" dependencies = [ "arrayvec", "bitvec", @@ -1722,8 +1767,7 @@ checksum = "47716fe1ae67969c5e0b2ef826f32db8c3be72be325e1aa3c1951d06b5575ec5" [[package]] name = "halo2_proofs" version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b867a8d9bbb85fca76fff60652b5cd19b853a1c4d0665cb89bee68b18d2caf0" +source = "git+https://github.com/QED-it/halo2?rev=7f5c0babd61f8ca46c9165a1adfac298d3fd3a11#7f5c0babd61f8ca46c9165a1adfac298d3fd3a11" dependencies = [ "blake2b_simd", "ff", @@ -1764,6 +1808,19 @@ dependencies = [ "num-traits", ] +[[package]] +name = "hdwallet" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a03ba7d4c9ea41552cd4351965ff96883e629693ae85005c501bb4b9e1c48a7" +dependencies = [ + "lazy_static", + "rand_core 0.6.4", + "ring 0.16.20", + "secp256k1", + "thiserror", +] + [[package]] name = "heck" version = "0.3.3" @@ -1884,9 +1941,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http 1.1.0", @@ -1901,7 +1958,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -1974,7 +2031,7 @@ dependencies = [ "futures-util", "h2 0.4.5", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -2021,7 +2078,7 @@ dependencies = [ "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "hyper 1.4.1", "pin-project-lite", "socket2", @@ -2092,9 +2149,9 @@ dependencies = [ [[package]] name = "incrementalmerkletree" -version = "0.6.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75346da3bd8e3d8891d02508245ed2df34447ca6637e343829f8d08986e9cde2" +checksum = "eb1872810fb725b06b8c153dde9e86f3ec26747b9b60096da7a869883b549cbe" dependencies = [ "either", ] @@ -2210,9 +2267,9 @@ dependencies = [ [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -2249,9 +2306,9 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] @@ -2351,6 +2408,20 @@ dependencies = [ "subtle", ] +[[package]] +name = "k256" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +dependencies = [ + "cfg-if 1.0.0", + "ecdsa", + "elliptic-curve", + "once_cell", + "sha2", + "signature", +] + [[package]] name = "known-folders" version = "1.1.0" @@ -2366,7 +2437,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin", + "spin 0.9.8", ] [[package]] @@ -2395,12 +2466,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if 1.0.0", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -2434,6 +2505,16 @@ dependencies = [ "lz4-sys", ] +[[package]] +name = "libyml" +version = "0.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3302702afa434ffa30847a83305f0a69d6abd74293b6554c18ec85c7ef30c980" +dependencies = [ + "anyhow", + "version_check", +] + [[package]] name = "libz-sys" version = "1.1.18" @@ -2482,9 +2563,9 @@ checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" [[package]] name = "lz4-sys" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9764018d143cc854c9f17f0b907de70f14393b1f502da6375dce70f00514eb3" +checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868" dependencies = [ "cc", "libc", @@ -2640,6 +2721,18 @@ dependencies = [ "winapi", ] +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.6.0", + "cfg-if 1.0.0", + "cfg_aliases", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -2668,9 +2761,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", @@ -2753,9 +2846,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "opaque-debug" @@ -2771,9 +2864,8 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchard" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc7bde644aeb980be296cd908c6650894dc8541deb56f9f5294c52ed7ca568f" +version = "0.8.0" +source = "git+https://github.com/QED-it/orchard?branch=zsa1#6e6112c80eb751a93c0fd1d881e9ca69887e1154" dependencies = [ "aes", "bitvec", @@ -2781,10 +2873,12 @@ dependencies = [ "ff", "fpe", "group", + "half", "halo2_gadgets", "halo2_proofs", "hex", "incrementalmerkletree", + "k256", "lazy_static", "memuse", "nonempty", @@ -2794,7 +2888,6 @@ dependencies = [ "serde", "subtle", "tracing", - "visibility", "zcash_note_encryption", "zcash_spec", "zip32", @@ -2917,9 +3010,20 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if 1.0.0", "libc", - "redox_syscall 0.5.2", + "redox_syscall 0.5.3", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "password-hash" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d791538a6dcc1e7cb7fe6f6b58aca40e7f79403c45b2bc274008b5e647af1d8" +dependencies = [ + "base64ct", + "rand_core 0.6.4", + "subtle", ] [[package]] @@ -2937,6 +3041,16 @@ dependencies = [ "subtle", ] +[[package]] +name = "pbkdf2" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271779f35b581956db91a3e55737327a03aa051e90b1c47aeb189508533adfd7" +dependencies = [ + "digest", + "password-hash", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -2945,9 +3059,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" dependencies = [ "memchr", "thiserror", @@ -2956,9 +3070,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26293c9193fbca7b1a3bf9b79dc1e388e927e6cacaa78b4a3ab705a1d3d41459" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" dependencies = [ "pest", "pest_generator", @@ -2966,9 +3080,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ec22af7d3fb470a85dd2ca96b7c577a1eb4ef6f1683a9fe9a8c16e136c04687" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" dependencies = [ "pest", "pest_meta", @@ -2979,9 +3093,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.10" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a240022f37c361ec1878d646fc5b7d7c4d28d5946e1a80ad5a7a4f4ca0bdcd" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" dependencies = [ "once_cell", "pest", @@ -3087,9 +3201,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "da544ee218f0d287a911e9c99a39a8c9bc8fcad3cb8db5959940044ecfc67265" [[package]] name = "powerfmt" @@ -3099,9 +3213,12 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "prettyplease" @@ -3206,6 +3323,16 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "prost" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +dependencies = [ + "bytes", + "prost-derive 0.12.6", +] + [[package]] name = "prost" version = "0.13.1" @@ -3213,7 +3340,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" dependencies = [ "bytes", - "prost-derive", + "prost-derive 0.13.1", +] + +[[package]] +name = "prost-build" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +dependencies = [ + "bytes", + "heck 0.5.0", + "itertools 0.12.1", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost 0.12.6", + "prost-types 0.12.6", + "regex", + "syn 2.0.72", + "tempfile", ] [[package]] @@ -3230,13 +3378,26 @@ dependencies = [ "once_cell", "petgraph", "prettyplease", - "prost", - "prost-types", + "prost 0.13.1", + "prost-types 0.13.1", "regex", "syn 2.0.72", "tempfile", ] +[[package]] +name = "prost-derive" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +dependencies = [ + "anyhow", + "itertools 0.12.1", + "proc-macro2", + "quote", + "syn 2.0.72", +] + [[package]] name = "prost-derive" version = "0.13.1" @@ -3250,13 +3411,22 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "prost-types" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +dependencies = [ + "prost 0.12.6", +] + [[package]] name = "prost-types" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" dependencies = [ - "prost", + "prost 0.13.1", ] [[package]] @@ -3409,9 +3579,9 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "11.0.2" +version = "11.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e29830cbb1290e404f24c73af91c5d8d631ce7e128691e9477556b540cd01ecd" +checksum = "cb9ee317cfe3fbd54b36a511efc1edd42e216903c9cd575e686dd68a2ba90d8d" dependencies = [ "bitflags 2.6.0", ] @@ -3478,9 +3648,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ "bitflags 2.6.0", ] @@ -3583,15 +3753,40 @@ dependencies = [ "winreg", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + [[package]] name = "rgb" -version = "0.8.37" +version = "0.8.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05aaa8004b64fd573fc9d002f4e632d51ad4f026c2b5ba95fcb6c2f32c2c47d8" +checksum = "e12bc8d2f72df26a5d3178022df33720fbede0d31d82c7291662eff89836994d" dependencies = [ "bytemuck", ] +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin 0.5.2", + "untrusted 0.7.1", + "web-sys", + "winapi", +] + [[package]] name = "ring" version = "0.17.8" @@ -3602,8 +3797,8 @@ dependencies = [ "cfg-if 1.0.0", "getrandom 0.2.15", "libc", - "spin", - "untrusted", + "spin 0.9.8", + "untrusted 0.9.0", "windows-sys 0.52.0", ] @@ -3702,7 +3897,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring", + "ring 0.17.8", "rustls-webpki", "sct", ] @@ -3722,8 +3917,8 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -3761,9 +3956,8 @@ dependencies = [ [[package]] name = "sapling-crypto" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e379398fffad84e49f9a45a05635fc004f66086e65942dbf4eb95332c26d2a" +version = "0.1.3" +source = "git+https://github.com/QED-it/sapling-crypto?branch=zsa1#e19f4d916360842becf2842bfd9b27228e66fa81" dependencies = [ "aes", "bellman", @@ -3803,27 +3997,32 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] -name = "secp256k1" -version = "0.26.0" +name = "sec1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4124a35fe33ae14259c490fd70fa199a32b9ce9502f2ee6bc4f81ec06fa65894" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "secp256k1-sys", - "serde", + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", ] [[package]] name = "secp256k1" -version = "0.27.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" +checksum = "4124a35fe33ae14259c490fd70fa199a32b9ce9502f2ee6bc4f81ec06fa65894" dependencies = [ "secp256k1-sys", + "serde", ] [[package]] @@ -4064,23 +4263,25 @@ version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ - "darling 0.20.9", + "darling 0.20.10", "proc-macro2", "quote", "syn 2.0.72", ] [[package]] -name = "serde_yaml" -version = "0.9.34+deprecated" +name = "serde_yml" +version = "0.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +checksum = "59e2dd588bf1597a252c3b920e0143eb99b0f76e4e082f4c92ce34fbc9e71ddd" dependencies = [ "indexmap 2.3.0", "itoa", + "libyml", + "memchr", "ryu", "serde", - "unsafe-libyaml", + "version_check", ] [[package]] @@ -4105,9 +4306,9 @@ dependencies = [ [[package]] name = "shardtree" -version = "0.4.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78222845cd8bbe5eb95687407648ff17693a35de5e8abaa39a4681fb21e033f9" +checksum = "3b3cdd24424ce0b381646737fedddc33c4dcf7dcd2d545056b53f7982097bef5" dependencies = [ "bitflags 2.6.0", "either", @@ -4136,14 +4337,15 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ + "digest", "rand_core 0.6.4", ] [[package]] name = "similar" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42c91313f1d05da9b26f267f931cf178d4aba455b4c4622dd7355eb80c6640" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" [[package]] name = "sketches-ddsketch" @@ -4198,6 +4400,12 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + [[package]] name = "spin" version = "0.9.8" @@ -4642,14 +4850,14 @@ dependencies = [ "bytes", "h2 0.4.5", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "hyper 1.4.1", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", - "prost", + "prost 0.13.1", "socket2", "tokio", "tokio-stream", @@ -4659,6 +4867,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "tonic-build" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build 0.12.6", + "quote", + "syn 2.0.72", +] + [[package]] name = "tonic-build" version = "0.12.1" @@ -4667,7 +4888,7 @@ checksum = "568392c5a2bd0020723e3f387891176aabafe36fd9fcd074ad309dfa0c8eb964" dependencies = [ "prettyplease", "proc-macro2", - "prost-build", + "prost-build 0.13.1", "quote", "syn 2.0.72", ] @@ -4678,8 +4899,8 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b742c83ad673e9ab5b4ce0981f7b9e8932be9d60e8682cbf9120494764dbc173" dependencies = [ - "prost", - "prost-types", + "prost 0.13.1", + "prost-types 0.13.1", "tokio", "tokio-stream", "tonic", @@ -5019,10 +5240,10 @@ dependencies = [ ] [[package]] -name = "unsafe-libyaml" -version = "0.2.11" +name = "untrusted" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "untrusted" @@ -5065,9 +5286,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "serde", ] @@ -5108,20 +5329,9 @@ dependencies = [ [[package]] name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "visibility" -version = "0.1.1" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.72", -] +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "void" @@ -5331,11 +5541,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5351,7 +5561,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ "windows-core", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -5360,7 +5570,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -5378,7 +5588,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -5398,18 +5617,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -5420,9 +5639,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -5432,9 +5651,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -5444,15 +5663,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -5462,9 +5681,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -5474,9 +5693,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -5486,9 +5705,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -5498,9 +5717,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -5559,86 +5778,20 @@ checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" [[package]] name = "zcash_address" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d26f21381dc220836dd8d2a9a10dbe85928a26232b011bc6a42b611789b743" -dependencies = [ - "bech32", - "bs58", - "f4jumble 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.2.0", -] - -[[package]] -name = "zcash_address" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14bccd6cefb76f87b6d15a9e7b02b6c0515648c6de8e806c4e2d6f0f6ae640c5" -dependencies = [ - "bech32", - "bs58", - "f4jumble 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "zcash_address" -version = "0.5.0" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" +version = "0.3.2" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "bech32", "bs58", - "f4jumble 0.1.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_encoding 0.2.1 (git+https://github.com/zcash/librustzcash/)", - "zcash_protocol 0.3.0 (git+https://github.com/zcash/librustzcash/)", + "f4jumble", + "zcash_encoding", + "zcash_protocol", ] [[package]] name = "zcash_client_backend" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e3a0f3e5d7f299d8b7ef3237697630989c31ab1b162824c99c1cd8bc83715e" -dependencies = [ - "base64 0.21.7", - "bech32", - "bls12_381", - "bs58", - "crossbeam-channel", - "document-features", - "group", - "hex", - "incrementalmerkletree", - "memuse", - "nom", - "nonempty", - "percent-encoding", - "prost", - "rand_core 0.6.4", - "rayon", - "sapling-crypto", - "secrecy", - "shardtree", - "subtle", - "time", - "tonic-build", - "tracing", - "which", - "zcash_address 0.4.0", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_keys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_note_encryption", - "zcash_primitives 0.16.0", - "zcash_protocol 0.2.0", - "zip32", - "zip321 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "zcash_client_backend" -version = "0.13.0" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" +version = "0.12.1" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "base64 0.21.7", "bech32", @@ -5653,7 +5806,7 @@ dependencies = [ "nom", "nonempty", "percent-encoding", - "prost", + "prost 0.12.6", "rand_core 0.6.4", "rayon", "sapling-crypto", @@ -5661,33 +5814,23 @@ dependencies = [ "shardtree", "subtle", "time", - "tonic-build", + "tonic-build 0.10.2", "tracing", "which", - "zcash_address 0.5.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_encoding 0.2.1 (git+https://github.com/zcash/librustzcash/)", - "zcash_keys 0.3.0 (git+https://github.com/zcash/librustzcash/)", + "zcash_address", + "zcash_encoding", + "zcash_keys 0.2.0 (git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2)", "zcash_note_encryption", - "zcash_primitives 0.17.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_protocol 0.3.0 (git+https://github.com/zcash/librustzcash/)", + "zcash_primitives", + "zcash_protocol", "zip32", - "zip321 0.1.0 (git+https://github.com/zcash/librustzcash/)", -] - -[[package]] -name = "zcash_encoding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052d8230202f0a018cd9b5d1b56b94cd25e18eccc2d8665073bcea8261ab87fc" -dependencies = [ - "byteorder", - "nonempty", + "zip321", ] [[package]] name = "zcash_encoding" -version = "0.2.1" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" +version = "0.2.0" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "byteorder", "nonempty", @@ -5696,8 +5839,7 @@ dependencies = [ [[package]] name = "zcash_history" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fde17bf53792f9c756b313730da14880257d7661b5bfc69d0571c3a7c11a76d" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "blake2b_simd", "byteorder", @@ -5706,9 +5848,9 @@ dependencies = [ [[package]] name = "zcash_keys" -version = "0.3.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712faf4070107ab0b2828d0eda6aeaf4c3cb02564109832d95b97ad3467c95a5" +checksum = "663489ffb4e51bc4436ff8796832612a9ff3c6516f1c620b5a840cb5dcd7b866" dependencies = [ "bech32", "blake2b_simd", @@ -5723,17 +5865,17 @@ dependencies = [ "secrecy", "subtle", "tracing", - "zcash_address 0.4.0", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_primitives 0.16.0", - "zcash_protocol 0.2.0", + "zcash_address", + "zcash_encoding", + "zcash_primitives", + "zcash_protocol", "zip32", ] [[package]] name = "zcash_keys" -version = "0.3.0" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" +version = "0.2.0" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "bech32", "blake2b_simd", @@ -5748,18 +5890,17 @@ dependencies = [ "secrecy", "subtle", "tracing", - "zcash_address 0.5.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_encoding 0.2.1 (git+https://github.com/zcash/librustzcash/)", - "zcash_primitives 0.17.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_protocol 0.3.0 (git+https://github.com/zcash/librustzcash/)", + "zcash_address", + "zcash_encoding", + "zcash_primitives", + "zcash_protocol", "zip32", ] [[package]] name = "zcash_note_encryption" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b4580cd6cee12e44421dac43169be8d23791650816bdb34e6ddfa70ac89c1c5" +source = "git+https://github.com/QED-it/zcash_note_encryption?branch=zsa1#58384553aab76b2ee6d6eb328cf2187fa824ec9a" dependencies = [ "chacha20", "chacha20poly1305", @@ -5770,56 +5911,19 @@ dependencies = [ [[package]] name = "zcash_primitives" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f044bc9cf2887ec408196fbafb44749e5581f57cc18d8da7aabaeb60cc40c64" +version = "0.15.0" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "aes", + "bip0039", "blake2b_simd", - "bs58", "byteorder", "document-features", - "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "ff", - "fpe", - "group", - "hex", - "incrementalmerkletree", - "jubjub", - "memuse", - "nonempty", - "orchard", - "rand 0.8.5", - "rand_core 0.6.4", - "redjubjub", - "sapling-crypto", - "sha2", - "subtle", - "tracing", - "zcash_address 0.4.0", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_note_encryption", - "zcash_protocol 0.2.0", - "zcash_spec", - "zip32", -] - -[[package]] -name = "zcash_primitives" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d87ab6a55591a8cf1866749fdc739ae1bbd06e6cec07ab0bbe5d57ee3390eb2" -dependencies = [ - "aes", - "bip32", - "blake2b_simd", - "bs58", - "byteorder", - "document-features", - "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "equihash 0.2.0 (git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2)", "ff", "fpe", "group", + "hdwallet", "hex", "incrementalmerkletree", "jubjub", @@ -5831,58 +5935,23 @@ dependencies = [ "redjubjub", "ripemd", "sapling-crypto", - "secp256k1 0.27.0", - "sha2", - "subtle", - "tracing", - "zcash_address 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_note_encryption", - "zcash_protocol 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_spec", - "zip32", -] - -[[package]] -name = "zcash_primitives" -version = "0.17.0" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" -dependencies = [ - "aes", - "blake2b_simd", - "bs58", - "byteorder", - "document-features", - "equihash 0.2.0 (git+https://github.com/zcash/librustzcash/)", - "ff", - "fpe", - "group", - "hex", - "incrementalmerkletree", - "jubjub", - "memuse", - "nonempty", - "orchard", - "rand 0.8.5", - "rand_core 0.6.4", - "redjubjub", - "sapling-crypto", + "secp256k1", "sha2", "subtle", "tracing", - "zcash_address 0.5.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_encoding 0.2.1 (git+https://github.com/zcash/librustzcash/)", + "zcash_address", + "zcash_encoding", "zcash_note_encryption", - "zcash_protocol 0.3.0 (git+https://github.com/zcash/librustzcash/)", + "zcash_protocol", "zcash_spec", "zip32", ] [[package]] name = "zcash_proofs" -version = "0.17.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9fc0032b3d90f000f50dba7a996ad6556b7dba5b5145f93ab67b6eb74d3a48" +checksum = "5163a1110f4265cc5f2fdf87ac4497fd1e014b6ce0760ca8d16d8e3853a5c0f7" dependencies = [ "bellman", "blake2b_simd", @@ -5898,33 +5967,13 @@ dependencies = [ "sapling-crypto", "tracing", "xdg", - "zcash_primitives 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_primitives", ] [[package]] name = "zcash_protocol" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f35eac659fdbba614333d119217c5963c0d7cea43aee33176c4f2f95e5460d8d" -dependencies = [ - "document-features", - "memuse", -] - -[[package]] -name = "zcash_protocol" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1ff002bd41ba76b42d42a02ee11de06790b7fdbc904bdea4486b9a93b2a5e4" -dependencies = [ - "document-features", - "memuse", -] - -[[package]] -name = "zcash_protocol" -version = "0.3.0" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" +version = "0.1.1" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "document-features", "memuse", @@ -5964,6 +6013,7 @@ dependencies = [ "chrono", "color-eyre", "criterion", + "dirs", "ed25519-zebra", "equihash 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures", @@ -5988,7 +6038,7 @@ dependencies = [ "redjubjub", "ripemd", "sapling-crypto", - "secp256k1 0.26.0", + "secp256k1", "serde", "serde-big-array", "serde_json", @@ -5996,19 +6046,20 @@ dependencies = [ "sha2", "spandoc", "static_assertions", + "tempfile", "thiserror", "tinyvec", "tokio", "tracing", "uint", "x25519-dalek", - "zcash_address 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_client_backend 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_encoding 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_address", + "zcash_client_backend", + "zcash_encoding", "zcash_history", "zcash_note_encryption", - "zcash_primitives 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_primitives", + "zcash_protocol", "zebra-test", ] @@ -6065,15 +6116,15 @@ dependencies = [ "color-eyre", "futures-util", "insta", - "prost", + "prost 0.13.1", "serde", "tokio", "tokio-stream", "tonic", - "tonic-build", + "tonic-build 0.12.1", "tonic-reflection", "tower", - "zcash_primitives 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_primitives", "zebra-chain", "zebra-node-services", "zebra-state", @@ -6146,8 +6197,9 @@ dependencies = [ "jsonrpc-core", "jsonrpc-derive", "jsonrpc-http-server", + "nix", "proptest", - "prost", + "prost 0.13.1", "rand 0.8.5", "serde", "serde_json", @@ -6155,12 +6207,12 @@ dependencies = [ "tokio", "tokio-stream", "tonic", - "tonic-build", + "tonic-build 0.12.1", "tonic-reflection", "tower", "tracing", - "zcash_address 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_primitives 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_address", + "zcash_primitives", "zebra-chain", "zebra-consensus", "zebra-network", @@ -6202,11 +6254,11 @@ dependencies = [ "tower", "tracing", "tracing-subscriber", - "zcash_address 0.5.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_client_backend 0.13.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_keys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_address", + "zcash_client_backend", + "zcash_keys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "zcash_note_encryption", - "zcash_primitives 0.17.0 (git+https://github.com/zcash/librustzcash/)", + "zcash_primitives", "zebra-chain", "zebra-grpc", "zebra-node-services", @@ -6316,7 +6368,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "serde_yaml", + "serde_yml", "structopt", "syn 2.0.72", "thiserror", @@ -6324,9 +6376,9 @@ dependencies = [ "tokio", "tracing-error", "tracing-subscriber", - "zcash_client_backend 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_primitives 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zcash_protocol 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zcash_client_backend", + "zcash_primitives", + "zcash_protocol", "zebra-chain", "zebra-node-services", "zebra-rpc", @@ -6366,7 +6418,7 @@ dependencies = [ "pin-project", "proptest", "proptest-derive", - "prost", + "prost 0.13.1", "rand 0.8.5", "rayon", "regex", @@ -6382,7 +6434,7 @@ dependencies = [ "tokio-stream", "toml 0.8.19", "tonic", - "tonic-build", + "tonic-build 0.12.1", "tower", "tracing", "tracing-appender", @@ -6406,18 +6458,19 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", @@ -6457,25 +6510,12 @@ dependencies = [ [[package]] name = "zip321" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dc85f862f7be64fb0d46f9eb5b82ad54e58cde314fa979d5bae591bc0143693" -dependencies = [ - "base64 0.21.7", - "nom", - "percent-encoding", - "zcash_address 0.4.0", - "zcash_protocol 0.2.0", -] - -[[package]] -name = "zip321" -version = "0.1.0" -source = "git+https://github.com/zcash/librustzcash/#a1047adf0b6f324dad415db34762dc26f8367ce4" +version = "0.0.0" +source = "git+https://github.com/QED-it/librustzcash?branch=txv6-separate-bundles-rebased-dd2#04ebee7fb22303c1e1dc6428def3dd3cecc4715d" dependencies = [ "base64 0.21.7", "nom", "percent-encoding", - "zcash_address 0.5.0 (git+https://github.com/zcash/librustzcash/)", - "zcash_protocol 0.3.0 (git+https://github.com/zcash/librustzcash/)", + "zcash_address", + "zcash_protocol", ] diff --git a/Cargo.toml b/Cargo.toml index a006d6eb8a6..e8b55f1ba8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,17 +22,17 @@ resolver = "2" # `cargo release` settings [workspace.dependencies] -incrementalmerkletree = "0.6.0" -orchard = "0.9.0" -sapling-crypto = "0.2.0" -zcash_address = "0.5.0" -zcash_client_backend = "0.13.0" -zcash_encoding = "0.2.1" +incrementalmerkletree = "0.5.1" +orchard = "0.8.0" +sapling-crypto = "0.1.3" +zcash_address = "0.3.2" +zcash_client_backend = "0.12.1" +zcash_encoding = "0.2.0" zcash_history = "0.4.0" -zcash_keys = "0.3.0" -zcash_primitives = "0.17.0" -zcash_proofs = "0.17.0" -zcash_protocol = "0.3.0" +zcash_keys = "0.2.0" +zcash_primitives = "0.15.0" +zcash_proofs = "0.15.0" +zcash_protocol = "0.1.1" [workspace.metadata.release] @@ -102,3 +102,15 @@ panic = "abort" # - add "-flto=thin" to all C/C++ code builds # - see https://doc.rust-lang.org/rustc/linker-plugin-lto.html#cc-code-as-a-dependency-in-rust lto = "thin" + +[patch.crates-io] +halo2_proofs = { version = "0.3.0", git = "https://github.com/QED-it/halo2", rev = "7f5c0babd61f8ca46c9165a1adfac298d3fd3a11" } +zcash_note_encryption = { version = "0.4.0", git = "https://github.com/QED-it/zcash_note_encryption", branch = "zsa1" } +sapling-crypto = { version = "0.1.3", git = "https://github.com/QED-it/sapling-crypto", branch = "zsa1" } +orchard = { version = "0.8.0", git = "https://github.com/QED-it/orchard", branch = "zsa1" } +zcash_primitives = { version = "0.15.0", git = "https://github.com/QED-it/librustzcash", branch = "txv6-separate-bundles-rebased-dd2" } +zcash_protocol = { version = "0.1.1", git = "https://github.com/QED-it/librustzcash", branch = "txv6-separate-bundles-rebased-dd2" } +zcash_address = { version = "0.3.2", git = "https://github.com/QED-it/librustzcash", branch = "txv6-separate-bundles-rebased-dd2" } +zcash_encoding = { version = "0.2.0", git = "https://github.com/QED-it/librustzcash", branch = "txv6-separate-bundles-rebased-dd2" } +zcash_history = { version = "0.4.0", git = "https://github.com/QED-it/librustzcash", branch = "txv6-separate-bundles-rebased-dd2" } +zcash_client_backend = { version = "0.12.1", git = "https://github.com/QED-it/librustzcash", branch = "txv6-separate-bundles-rebased-dd2" } diff --git a/book/src/user/requirements.md b/book/src/user/requirements.md index df95aa139ba..d908a7487de 100644 --- a/book/src/user/requirements.md +++ b/book/src/user/requirements.md @@ -1,16 +1,22 @@ # System Requirements -We recommend the following requirements for compiling and running `zebrad`: +Zebra has the following hardware requirements. + +## Recommended Requirements - 4 CPU cores - 16 GB RAM -- 300 GB available disk space for building binaries and storing cached chain - state +- 300 GB available disk space - 100 Mbps network connection, with 300 GB of uploads and downloads per month -Zebra's tests can take over an hour, depending on your machine. Note that you -might be able to build and run Zebra on slower systems — we haven't tested its -exact limits yet. +## Minimum Hardware Requirements + +- 2 CPU cores +- 4 GB RAM +- 300 GB available disk space + +[Zebra has successfully run on an Orange Pi Zero 2W with a 512 GB microSD card +without any issues.](https://x.com/Zerodartz/status/1811460885996798159) ## Disk Requirements @@ -48,9 +54,6 @@ networks. - Ongoing updates: 10 MB - 10 GB upload and download per day, depending on user-created transaction size and peer requests. -Zebra performs an initial sync every time its internal database version changes, -so some version upgrades might require a full download of the whole chain. - Zebra needs some peers which have a round-trip latency of 2 seconds or less. If this is a problem for you, please [open a ticket.](https://github.com/ZcashFoundation/zebra/issues/new/choose) diff --git a/docker/Dockerfile b/docker/Dockerfile index 3820d87adf4..c71e3e422f5 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,12 +1,13 @@ +# syntax=docker/dockerfile:1 +# check=skip=UndefinedVar + # If you want to include a file in the Docker image, add it to .dockerignore. # -# We are using five stages: -# - chef: installs cargo-chef -# - planner: computes the recipe file -# - deps: caches our dependencies and sets the needed variables -# - tests: builds tests -# - release: builds release binary -# - runtime: is our runtime environment +# We are using 4 stages: +# - deps: install build dependencies and sets the needed variables +# - tests: builds tests binaries +# - release: builds release binaries +# - runtime: runs the release binaries # # We first set default values for build arguments used across the stages. # Each stage must define the build arguments (ARGs) it uses. @@ -19,26 +20,19 @@ ARG FEATURES="default-release-binaries" ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" ARG EXPERIMENTAL_FEATURES="" -# This stage implements cargo-chef for docker layer caching -FROM rust:bookworm as chef -RUN cargo install cargo-chef --locked -WORKDIR /opt/zebrad - -# Analyze the current project to determine the minimum subset of files -# (Cargo.lock and Cargo.toml manifests) required to build it and cache dependencies -# -# The recipe.json is the equivalent of the Python requirements.txt file -FROM chef AS planner -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - +ARG APP_HOME="/opt/zebrad" +ARG RUST_VERSION=1.79.0 # In this stage we download all system requirements to build the project # # It also captures all the build arguments to be used as environment variables. # We set defaults for the arguments, in case the build does not include this information. -FROM chef AS deps +FROM rust:${RUST_VERSION}-bookworm AS deps SHELL ["/bin/bash", "-xo", "pipefail", "-c"] -COPY --from=planner /opt/zebrad/recipe.json recipe.json + +# Set the default path for the zebrad binary +ARG APP_HOME +ENV APP_HOME=${APP_HOME} +WORKDIR ${APP_HOME} # Install zebra build deps and Dockerfile deps RUN apt-get -qq update && \ @@ -48,27 +42,8 @@ RUN apt-get -qq update && \ clang \ ca-certificates \ protobuf-compiler \ - rsync \ rocksdb-tools \ - ; \ - rm -rf /var/lib/apt/lists/* /tmp/* - -# Install google OS Config agent to be able to get information from the VMs being deployed -# into GCP for integration testing purposes, and as Mainnet nodes -# TODO: this shouldn't be a hardcoded requirement for everyone -RUN if [ "$(uname -m)" != "aarch64" ]; then \ - apt-get -qq update && \ - apt-get -qq install -y --no-install-recommends \ - curl \ - lsb-release \ - && \ - echo "deb http://packages.cloud.google.com/apt google-compute-engine-$(lsb_release -cs)-stable main" > /etc/apt/sources.list.d/google-compute-engine.list && \ - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ - apt-get -qq update && \ - apt-get -qq install -y --no-install-recommends google-osconfig-agent; \ - fi \ - && \ - rm -rf /var/lib/apt/lists/* /tmp/* + && rm -rf /var/lib/apt/lists/* /tmp/* # Build arguments and variables set for tracelog levels and debug information # @@ -86,11 +61,14 @@ ARG COLORBT_SHOW_HIDDEN ENV COLORBT_SHOW_HIDDEN=${COLORBT_SHOW_HIDDEN:-1} ARG SHORT_SHA -# If this is not set, it must be the empty string, so Zebra can try an alternative git commit source: +# If this is not set, it must be an empty string, so Zebra can try an alternative git commit source: # https://github.com/ZcashFoundation/zebra/blob/9ebd56092bcdfc1a09062e15a0574c94af37f389/zebrad/src/application.rs#L179-L182 ENV SHORT_SHA=${SHORT_SHA:-} -ENV CARGO_HOME="/opt/zebrad/.cargo/" +ENV CARGO_HOME="${APP_HOME}/.cargo/" + +# Copy the entrypoint script to be used on both images +COPY ./docker/entrypoint.sh /etc/zebrad/entrypoint.sh # In this stage we build tests (without running then) # @@ -98,12 +76,6 @@ ENV CARGO_HOME="/opt/zebrad/.cargo/" # An entrypoint.sh is only available in this step for easier test handling with variables. FROM deps AS tests -COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/local/bin/ - -# cargo uses timestamps for its cache, so they need to be in this order: -# unmodified source files < previous build cache < modified source files -COPY . . - # Skip IPv6 tests by default, as some CI environment don't have IPv6 available ARG ZEBRA_SKIP_IPV6_TESTS ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} @@ -116,66 +88,81 @@ ARG EXPERIMENTAL_FEATURES # TODO: add empty $EXPERIMENTAL_FEATURES when we can avoid adding an extra space to the end of the string ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}" -# Re-hydrate the minimum project skeleton identified by `cargo chef prepare` in the planner stage, -# over the top of the original source files, -# and build it to cache all possible sentry and test dependencies. -# -# This is the caching Docker layer for Rust tests! -# It creates fake empty test binaries so dependencies are built, but Zebra is not fully built. -# -# TODO: add --locked when cargo-chef supports it -RUN cargo chef cook --tests --release --features "${ENTRYPOINT_FEATURES}" --workspace --recipe-path recipe.json -# Undo the source file changes made by cargo-chef. -# rsync invalidates the cargo cache for the changed files only, by updating their timestamps. -# This makes sure the fake empty binaries created by cargo-chef are rebuilt. -COPY --from=planner /opt/zebrad zebra-original -RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ . -RUN rm -r zebra-original - # Build Zebra test binaries, but don't run them -RUN cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace --no-run -RUN cp /opt/zebrad/target/release/zebrad /usr/local/bin -RUN cp /opt/zebrad/target/release/zebra-checkpoints /usr/local/bin -COPY ./docker/entrypoint.sh / -RUN chmod u+x /entrypoint.sh +# Leverage a cache mount to /usr/local/cargo/registry/ +# for downloaded dependencies, a cache mount to /usr/local/cargo/git/db +# for git repository dependencies, and a cache mount to ${APP_HOME}/target/ for +# compiled dependencies which will speed up subsequent builds. +# Leverage a bind mount to each crate directory to avoid having to copy the +# source code into the container. Once built, copy the executable to an +# output directory before the cache mounted ${APP_HOME}/target/ is unmounted. +RUN --mount=type=bind,source=zebrad,target=zebrad \ + --mount=type=bind,source=zebra-chain,target=zebra-chain \ + --mount=type=bind,source=zebra-network,target=zebra-network \ + --mount=type=bind,source=zebra-state,target=zebra-state \ + --mount=type=bind,source=zebra-script,target=zebra-script \ + --mount=type=bind,source=zebra-consensus,target=zebra-consensus \ + --mount=type=bind,source=zebra-rpc,target=zebra-rpc \ + --mount=type=bind,source=zebra-node-services,target=zebra-node-services \ + --mount=type=bind,source=zebra-test,target=zebra-test \ + --mount=type=bind,source=zebra-utils,target=zebra-utils \ + --mount=type=bind,source=zebra-scan,target=zebra-scan \ + --mount=type=bind,source=zebra-grpc,target=zebra-grpc \ + --mount=type=bind,source=tower-batch-control,target=tower-batch-control \ + --mount=type=bind,source=tower-fallback,target=tower-fallback \ + --mount=type=bind,source=Cargo.toml,target=Cargo.toml \ + --mount=type=bind,source=Cargo.lock,target=Cargo.lock \ + --mount=type=cache,target=${APP_HOME}/target/ \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/usr/local/cargo/registry/ \ +cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace --no-run && \ +cp ${APP_HOME}/target/release/zebrad /usr/local/bin && \ +cp ${APP_HOME}/target/release/zebra-checkpoints /usr/local/bin + +# Copy the lightwalletd binary and source files to be able to run tests +COPY --from=electriccoinco/lightwalletd:latest /usr/local/bin/lightwalletd /usr/local/bin/ +COPY ./ ./ # Entrypoint environment variables ENV ENTRYPOINT_FEATURES=${ENTRYPOINT_FEATURES} # We repeat the ARGs here, so they are available in the entrypoint.sh script for $RUN_ALL_EXPERIMENTAL_TESTS -ARG EXPERIMENTAL_FEATURES="shielded-scan journald prometheus filter-reload" +ARG EXPERIMENTAL_FEATURES="journald prometheus filter-reload" ENV ENTRYPOINT_FEATURES_EXPERIMENTAL="${ENTRYPOINT_FEATURES} ${EXPERIMENTAL_FEATURES}" # By default, runs the entrypoint tests specified by the environmental variables (if any are set) -ENTRYPOINT [ "/entrypoint.sh" ] +ENTRYPOINT [ "/etc/zebrad/entrypoint.sh" ] # In this stage we build a release (generate the zebrad binary) # -# This step also adds `cargo chef` as this stage is completely independent from the +# This step also adds `cache mounts` as this stage is completely independent from the # `test` stage. This step is a dependency for the `runtime` stage, which uses the resulting # zebrad binary from this step. FROM deps AS release -COPY . . - ARG FEATURES -# This is the caching layer for Rust zebrad builds. -# It creates a fake empty zebrad binary, see above for details. -# -# TODO: add --locked when cargo-chef supports it -RUN cargo chef cook --release --features "${FEATURES}" --package zebrad --bin zebrad --recipe-path recipe.json - -# Undo the source file changes made by cargo-chef, so the fake empty zebrad binary is rebuilt. -COPY --from=planner /opt/zebrad zebra-original -RUN rsync --recursive --checksum --itemize-changes --verbose zebra-original/ . -RUN rm -r zebra-original - -# Build zebrad -RUN cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad - -COPY ./docker/entrypoint.sh / -RUN chmod u+x /entrypoint.sh +RUN --mount=type=bind,source=tower-batch-control,target=tower-batch-control \ + --mount=type=bind,source=tower-fallback,target=tower-fallback \ + --mount=type=bind,source=zebra-chain,target=zebra-chain \ + --mount=type=bind,source=zebra-consensus,target=zebra-consensus \ + --mount=type=bind,source=zebra-grpc,target=zebra-grpc \ + --mount=type=bind,source=zebra-network,target=zebra-network \ + --mount=type=bind,source=zebra-node-services,target=zebra-node-services \ + --mount=type=bind,source=zebra-rpc,target=zebra-rpc \ + --mount=type=bind,source=zebra-scan,target=zebra-scan \ + --mount=type=bind,source=zebra-script,target=zebra-script \ + --mount=type=bind,source=zebra-state,target=zebra-state \ + --mount=type=bind,source=zebra-test,target=zebra-test \ + --mount=type=bind,source=zebra-utils,target=zebra-utils \ + --mount=type=bind,source=zebrad,target=zebrad \ + --mount=type=bind,source=Cargo.toml,target=Cargo.toml \ + --mount=type=bind,source=Cargo.lock,target=Cargo.lock \ + --mount=type=cache,target=${APP_HOME}/target/ \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/usr/local/cargo/registry/ \ +cargo build --locked --release --features "${FEATURES}" --package zebrad --bin zebrad && \ +cp ${APP_HOME}/target/release/zebrad /usr/local/bin # This stage is only used when deploying nodes or when only the resulting zebrad binary is needed # @@ -183,14 +170,18 @@ RUN chmod u+x /entrypoint.sh # binary from the `release` stage FROM debian:bookworm-slim AS runtime +# Set the default path for the zebrad binary +ARG APP_HOME +ENV APP_HOME=${APP_HOME} +WORKDIR ${APP_HOME} + RUN apt-get update && \ apt-get install -y --no-install-recommends \ ca-certificates \ curl \ rocksdb-tools \ gosu \ - && \ - rm -rf /var/lib/apt/lists/* /tmp/* + && rm -rf /var/lib/apt/lists/* /tmp/* # Create a non-privileged user that the app will run under. # Running as root inside the container is running as root in the Docker host @@ -208,6 +199,7 @@ RUN addgroup --system --gid ${GID} ${USER} \ --system \ --disabled-login \ --shell /bin/bash \ + --home ${APP_HOME} \ --uid "${UID}" \ --gid "${GID}" \ ${USER} @@ -217,16 +209,20 @@ ARG FEATURES ENV FEATURES=${FEATURES} # Path and name of the config file +# These are set to a default value when not defined in the environment ENV ZEBRA_CONF_DIR=${ZEBRA_CONF_DIR:-/etc/zebrad} ENV ZEBRA_CONF_FILE=${ZEBRA_CONF_FILE:-zebrad.toml} -COPY --from=release /opt/zebrad/target/release/zebrad /usr/local/bin -COPY --from=release /entrypoint.sh / +RUN mkdir -p ${ZEBRA_CONF_DIR} && chown ${UID}:${UID} ${ZEBRA_CONF_DIR} \ + && chown ${UID}:${UID} ${APP_HOME} + +COPY --from=release /usr/local/bin/zebrad /usr/local/bin +COPY --from=release /etc/zebrad/entrypoint.sh /etc/zebrad # Expose configured ports EXPOSE 8233 18233 # Update the config file based on the Docker run variables, # and launch zebrad with it -ENTRYPOINT [ "/entrypoint.sh" ] +ENTRYPOINT [ "/etc/zebrad/entrypoint.sh" ] CMD ["zebrad"] diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 3dd5275d643..d71be57805d 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -10,8 +10,6 @@ # 4. Node Startup: Starts the node, allowing it to begin its operations. # -# Show the commands we are executing -set -x # Exit if a command fails set -e # Exit if any command in a pipeline fails @@ -250,23 +248,20 @@ case "$1" in # Run a Zebra full sync test on mainnet. run_cargo_test "${ENTRYPOINT_FEATURES}" "full_sync_mainnet" # List directory generated by test - # TODO: replace with ${ZEBRA_CACHED_STATE_DIR} in Rust and workflows - check_directory_files "/zebrad-cache" + check_directory_files "${ZEBRA_CACHED_STATE_DIR}" elif [[ -n "${FULL_SYNC_TESTNET_TIMEOUT_MINUTES}" ]]; then # Run a Zebra full sync test on testnet. run_cargo_test "${ENTRYPOINT_FEATURES}" "full_sync_testnet" # List directory generated by test - # TODO: replace with ${ZEBRA_CACHED_STATE_DIR} in Rust and workflows - check_directory_files "/zebrad-cache" + check_directory_files "${ZEBRA_CACHED_STATE_DIR}" elif [[ "${TEST_DISK_REBUILD}" -eq "1" ]]; then # Run a Zebra sync up to the mandatory checkpoint. # # TODO: use environmental variables instead of Rust features (part of #2995) run_cargo_test "test_sync_to_mandatory_checkpoint_${NETWORK,,},${ENTRYPOINT_FEATURES}" "sync_to_mandatory_checkpoint_${NETWORK,,}" - # TODO: replace with ${ZEBRA_CACHED_STATE_DIR} in Rust and workflows - check_directory_files "/zebrad-cache" + check_directory_files "${ZEBRA_CACHED_STATE_DIR}" elif [[ "${TEST_UPDATE_SYNC}" -eq "1" ]]; then # Run a Zebra sync starting at the cached tip, and syncing to the latest tip. @@ -279,8 +274,7 @@ case "$1" in # Run a Zebra sync starting at the cached mandatory checkpoint, and syncing past it. # # List directory used by test - # TODO: replace with ${ZEBRA_CACHED_STATE_DIR} in Rust and workflows - check_directory_files "/zebrad-cache" + check_directory_files "${ZEBRA_CACHED_STATE_DIR}" # TODO: use environmental variables instead of Rust features (part of #2995) run_cargo_test "test_sync_past_mandatory_checkpoint_${NETWORK,,},${ENTRYPOINT_FEATURES}" "sync_past_mandatory_checkpoint_${NETWORK,,}" @@ -357,11 +351,15 @@ case "$1" in exec cargo test --locked --release --features "zebra-test" --package zebra-scan -- --nocapture --include-ignored scan_task_commands else - exec gosu "$USER" "$@" + exec "$@" fi fi ;; *) - exec gosu "$USER" "$@" + if command -v gosu >/dev/null 2>&1; then + exec gosu "$USER" "$@" + else + exec "$@" + fi ;; esac diff --git a/openapi.yaml b/openapi.yaml index 2a7636a90b7..58a754c9731 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -28,7 +28,7 @@ paths: default: getinfo id: type: string - default: x2r3lRddGL + default: uf2E54tQkk params: type: array items: {} @@ -61,7 +61,7 @@ paths: default: getblockchaininfo id: type: string - default: w8Lb0nAvLd + default: Sbre3vivr8 params: type: array items: {} @@ -99,7 +99,7 @@ paths: default: getaddressbalance id: type: string - default: QbTztoTvRo + default: f5qarOBgzK params: type: array items: {} @@ -147,7 +147,7 @@ paths: default: sendrawtransaction id: type: string - default: aDK5RQWj16 + default: IlNHvAcSMS params: type: array items: {} @@ -196,7 +196,7 @@ paths: default: getblock id: type: string - default: xxCP1d61X0 + default: s9678BM3Lc params: type: array items: {} @@ -239,7 +239,7 @@ paths: default: getbestblockhash id: type: string - default: DoZgd1j7xW + default: FGQPJY8Tp8 params: type: array items: {} @@ -272,7 +272,7 @@ paths: default: getbestblockheightandhash id: type: string - default: 0iUFHsOjk3 + default: c2MfkL7xP9 params: type: array items: {} @@ -305,7 +305,7 @@ paths: default: getrawmempool id: type: string - default: WXG2c6FcCK + default: BugnNFhJpA params: type: array items: {} @@ -343,7 +343,7 @@ paths: default: z_gettreestate id: type: string - default: 38P0xXV0do + default: fCUQvR1BVa params: type: array items: {} @@ -393,7 +393,7 @@ paths: default: z_getsubtreesbyindex id: type: string - default: 662iR8VZGT + default: TtPnptV6EU params: type: array items: {} @@ -432,7 +432,7 @@ paths: default: getrawtransaction id: type: string - default: UuvVrzSzqC + default: QqYeOGSzje params: type: array items: {} @@ -480,7 +480,7 @@ paths: default: getaddresstxids id: type: string - default: KMss2wDMwH + default: AsWWVyqp8x params: type: array items: {} @@ -528,7 +528,7 @@ paths: default: getaddressutxos id: type: string - default: 4Y6BAhe6Lf + default: Qscn5dUFgD params: type: array items: {} @@ -554,6 +554,39 @@ paths: error: type: string default: Invalid parameters + /stop: + post: + tags: + - control + description: Stop the running zebrad process. + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + method: + type: string + default: stop + id: + type: string + default: WuIaPXV5fO + params: + type: array + items: {} + default: '[]' + responses: + '200': + description: OK + content: + application/json: + schema: + type: object + properties: + result: + type: object + default: 'null' /getblockcount: post: tags: @@ -571,7 +604,7 @@ paths: default: getblockcount id: type: string - default: nzPm5W3X1G + default: '5F9M7Wp0oI' params: type: array items: {} @@ -609,7 +642,7 @@ paths: default: getblockhash id: type: string - default: KLKosq2Z8E + default: f7hdgVjctr params: type: array items: {} @@ -657,7 +690,7 @@ paths: default: getblocktemplate id: type: string - default: spj7gKe2AA + default: pq0uXn3YGs params: type: array items: {} @@ -695,7 +728,7 @@ paths: default: submitblock id: type: string - default: QOQsC3nA7z + default: bs4v4JmVw3 params: type: array items: {} @@ -728,7 +761,7 @@ paths: default: getmininginfo id: type: string - default: Si3Sdb9ICT + default: pp5xV6v3pm params: type: array items: {} @@ -743,7 +776,7 @@ paths: properties: result: type: object - default: '{"networksolps":0,"networkhashps":0,"chain":"","testnet":false}' + default: '{"blocks":0,"networksolps":0,"networkhashps":0,"chain":"","testnet":false}' /getnetworksolps: post: tags: @@ -761,7 +794,7 @@ paths: default: getnetworksolps id: type: string - default: jWvKPdOxDa + default: '7bU98TeCV6' params: type: array items: {} @@ -794,7 +827,7 @@ paths: default: getnetworkhashps id: type: string - default: wnFwBVFrN0 + default: fskOJeXqjo params: type: array items: {} @@ -827,7 +860,7 @@ paths: default: getpeerinfo id: type: string - default: NpKiq59CE8 + default: jPV8ufjDdt params: type: array items: {} @@ -865,7 +898,7 @@ paths: default: validateaddress id: type: string - default: PDjTChWgFW + default: xOyxICseV9 params: type: array items: {} @@ -903,7 +936,7 @@ paths: default: z_validateaddress id: type: string - default: aCeb6xbIuo + default: xa6PoC4uN6 params: type: array items: {} @@ -941,7 +974,7 @@ paths: default: getblocksubsidy id: type: string - default: EeBvVXCJon + default: vYEVtnVK9o params: type: array items: {} @@ -984,7 +1017,7 @@ paths: default: getdifficulty id: type: string - default: jg2K8N0ZG4 + default: tVzSTZu2sD params: type: array items: {} @@ -1022,7 +1055,7 @@ paths: default: z_listunifiedreceivers id: type: string - default: Y3gscsg8yT + default: le2NmJBmPt params: type: array items: {} @@ -1038,3 +1071,51 @@ paths: result: type: object default: '{"orchard":"orchard address if any","sapling":"sapling address if any","p2pkh":"p2pkh address if any","p2sh":"p2sh address if any"}' + /generate: + post: + tags: + - generating + description: |- + Mine blocks immediately. Returns the block hashes of the generated blocks. + + **Request body `params` arguments:** + + - `num_blocks` - Number of blocks to be generated. + requestBody: + required: true + content: + application/json: + schema: + type: object + properties: + method: + type: string + default: generate + id: + type: string + default: vVVOWxHqlN + params: + type: array + items: {} + default: '[1]' + responses: + '200': + description: OK + content: + application/json: + schema: + type: object + properties: + result: + type: object + default: '{}' + '400': + description: Bad request + content: + application/json: + schema: + type: object + properties: + error: + type: string + default: Invalid parameters diff --git a/supply-chain/config.toml b/supply-chain/config.toml index 9ca2020fc37..21bfeebddba 100644 --- a/supply-chain/config.toml +++ b/supply-chain/config.toml @@ -1,4 +1,3 @@ - # cargo-vet config file [cargo-vet] @@ -1414,10 +1413,6 @@ criteria = "safe-to-deploy" version = "3.8.1" criteria = "safe-to-deploy" -[[exemptions.serde_yaml]] -version = "0.9.34+deprecated" -criteria = "safe-to-deploy" - [[exemptions.sha2]] version = "0.10.8" criteria = "safe-to-deploy" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index a7a4a7efc05..ec1c825176d 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -68,7 +68,7 @@ bitflags = "2.5.0" bitflags-serde-legacy = "0.1.1" blake2b_simd = "1.0.2" blake2s_simd = "1.0.2" -bridgetree = "0.5.0" +bridgetree = "0.4.0" bs58 = { version = "0.5.1", features = ["check"] } byteorder = "1.5.0" @@ -81,6 +81,8 @@ group = "0.13.0" incrementalmerkletree.workspace = true jubjub = "0.10.0" lazy_static = "1.4.0" +tempfile = "3.11.0" +dirs = "5.0.1" num-integer = "0.1.46" primitive-types = "0.12.2" rand_core = "0.6.4" @@ -176,3 +178,7 @@ required-features = ["bench"] [[bench]] name = "redpallas" harness = false + +# FIXME: remove this and all zcash_unstable usage in the code after updating librustzcash +[lints.rust] +unexpected_cfgs = { level = "warn", check-cfg = ['cfg(zcash_unstable, values("nu6"))'] } diff --git a/zebra-chain/src/chain_tip/mock.rs b/zebra-chain/src/chain_tip/mock.rs index 46ca5e89e5e..f1fc8fb6e27 100644 --- a/zebra-chain/src/chain_tip/mock.rs +++ b/zebra-chain/src/chain_tip/mock.rs @@ -106,7 +106,7 @@ impl ChainTip for MockChainTip { } fn best_tip_mined_transaction_ids(&self) -> Arc<[transaction::Hash]> { - unreachable!("Method not used in tests"); + Arc::new([]) } fn estimate_distance_to_network_chain_tip( diff --git a/zebra-chain/src/common.rs b/zebra-chain/src/common.rs new file mode 100644 index 00000000000..2af22887d8d --- /dev/null +++ b/zebra-chain/src/common.rs @@ -0,0 +1,71 @@ +//! Common functions used in Zebra. + +use std::{ + ffi::OsString, + fs, + io::{self, Write}, + path::PathBuf, +}; + +use tempfile::PersistError; + +/// Returns Zebra's default cache directory path. +pub fn default_cache_dir() -> PathBuf { + dirs::cache_dir() + .unwrap_or_else(|| std::env::current_dir().unwrap().join("cache")) + .join("zebra") +} + +/// Accepts a target file path and a byte-slice. +/// +/// Atomically writes the byte-slice to a file to avoid corrupting the file if Zebra +/// panics, crashes, or exits while the file is being written, or if multiple Zebra instances +/// try to read and write the same file. +/// +/// Returns the provided file path if successful. +/// +/// # Concurrency +/// +/// This function blocks on filesystem operations and should be called in a blocking task +/// when calling from an async environment. +/// +/// # Panics +/// +/// If the provided `file_path` is a directory path. +pub fn atomic_write( + file_path: PathBuf, + data: &[u8], +) -> io::Result>> { + // Get the file's parent directory, or use Zebra's default cache directory + let file_dir = file_path + .parent() + .map(|p| p.to_owned()) + .unwrap_or_else(default_cache_dir); + + // Create the directory if needed. + fs::create_dir_all(&file_dir)?; + + // Give the temporary file a similar name to the permanent file, + // but hide it in directory listings. + let mut tmp_file_prefix: OsString = ".tmp.".into(); + tmp_file_prefix.push( + file_path + .file_name() + .expect("file path must have a file name"), + ); + + // Create the temporary file in the same directory as the permanent file, + // so atomic filesystem operations are possible. + let mut tmp_file = tempfile::Builder::new() + .prefix(&tmp_file_prefix) + .tempfile_in(file_dir)?; + + tmp_file.write_all(data)?; + + // Atomically write the temp file to `file_path`. + let persist_result = tmp_file + .persist(&file_path) + // Drops the temp file and returns the file path. + .map(|_| file_path); + Ok(persist_result) +} diff --git a/zebra-chain/src/lib.rs b/zebra-chain/src/lib.rs index 4faaeab70cc..460d3a850f0 100644 --- a/zebra-chain/src/lib.rs +++ b/zebra-chain/src/lib.rs @@ -22,6 +22,7 @@ pub mod amount; pub mod block; pub mod chain_sync_status; pub mod chain_tip; +pub mod common; pub mod diagnostic; pub mod error; pub mod fmt; diff --git a/zebra-chain/src/orchard/note/ciphertexts.rs b/zebra-chain/src/orchard/note/ciphertexts.rs index 8f857cf1444..b27ffbc53a1 100644 --- a/zebra-chain/src/orchard/note/ciphertexts.rs +++ b/zebra-chain/src/orchard/note/ciphertexts.rs @@ -1,5 +1,7 @@ //! Encrypted parts of Orchard notes. +// FIXME: make it a generic and add support for OrchardZSA (encrypted tote size ofr it is not 580!) + use std::{fmt, io}; use serde_big_array::BigArray; diff --git a/zebra-chain/src/parameters/network_upgrade.rs b/zebra-chain/src/parameters/network_upgrade.rs index 356ae86577f..ab66fbc7895 100644 --- a/zebra-chain/src/parameters/network_upgrade.rs +++ b/zebra-chain/src/parameters/network_upgrade.rs @@ -530,7 +530,12 @@ impl From for NetworkUpgrade { zcash_protocol::consensus::NetworkUpgrade::Heartwood => Self::Heartwood, zcash_protocol::consensus::NetworkUpgrade::Canopy => Self::Canopy, zcash_protocol::consensus::NetworkUpgrade::Nu5 => Self::Nu5, + // FIXME: remove cfg + #[cfg(zcash_unstable = "nu6")] zcash_protocol::consensus::NetworkUpgrade::Nu6 => Self::Nu6, + // FIXME: remove cfg and process Nu7 properly (uses Self::Nu6 for now) + #[cfg(zcash_unstable = "nu6")] + zcash_protocol::consensus::NetworkUpgrade::Nu7 => Self::Nu6, } } } diff --git a/zebra-chain/src/primitives/zcash_primitives.rs b/zebra-chain/src/primitives/zcash_primitives.rs index 7ab2f32d751..be90f18ed23 100644 --- a/zebra-chain/src/primitives/zcash_primitives.rs +++ b/zebra-chain/src/primitives/zcash_primitives.rs @@ -137,6 +137,16 @@ impl zp_tx::components::orchard::MapAuth + for IdentityMap +{ + fn map_issue_authorization(&self, s: orchard::issuance::Signed) -> orchard::issuance::Signed { + s + } +} + #[derive(Debug)] struct PrecomputedAuth<'a> { _phantom: std::marker::PhantomData<&'a ()>, @@ -146,6 +156,14 @@ impl<'a> zp_tx::Authorization for PrecomputedAuth<'a> { type TransparentAuth = TransparentAuth<'a>; type SaplingAuth = sapling_crypto::bundle::Authorized; type OrchardAuth = orchard::bundle::Authorized; + + // FIXME: is this correct? + #[cfg(zcash_unstable = "nu6")] + type OrchardZsaAuth = orchard::bundle::Authorized; + + // FIXME: is this correct? + #[cfg(zcash_unstable = "nu6")] + type IssueAuth = orchard::issuance::Signed; } // End of (mostly) copied code @@ -275,7 +293,14 @@ impl<'a> PrecomputedTxData<'a> { }; let tx_data: zp_tx::TransactionData = alt_tx .into_data() - .map_authorization(f_transparent, IdentityMap, IdentityMap); + // FIXME: do we need to pass another arg values or orchard_zsa and issue instead of IdentityMap? + .map_authorization( + f_transparent, + IdentityMap, + IdentityMap, + IdentityMap, + IdentityMap, + ); PrecomputedTxData { tx_data, diff --git a/zebra-consensus/src/primitives/halo2.rs b/zebra-consensus/src/primitives/halo2.rs index ffc58a5feb8..447d9bbd449 100644 --- a/zebra-consensus/src/primitives/halo2.rs +++ b/zebra-consensus/src/primitives/halo2.rs @@ -10,7 +10,7 @@ use std::{ use futures::{future::BoxFuture, FutureExt}; use once_cell::sync::Lazy; -use orchard::circuit::VerifyingKey; +use orchard::{circuit::VerifyingKey, orchard_flavor::OrchardVanilla}; use rand::{thread_rng, CryptoRng, RngCore}; use thiserror::Error; @@ -75,7 +75,8 @@ pub type ItemVerifyingKey = VerifyingKey; lazy_static::lazy_static! { /// The halo2 proof verifying key. - pub static ref VERIFYING_KEY: ItemVerifyingKey = ItemVerifyingKey::build(); + // FIXME: support OrchardZSA? + pub static ref VERIFYING_KEY: ItemVerifyingKey = ItemVerifyingKey::build::(); } // === TEMPORARY BATCH HALO2 SUBSTITUTE === @@ -143,6 +144,15 @@ impl From<&zebra_chain::orchard::ShieldedData> for Item { .flags .contains(zebra_chain::orchard::Flags::ENABLE_OUTPUTS); + // FIXME: simplify the flags creation - make `Flags::from_parts` method pub? + // FIXME: support OrchardZSA? + let flags = match (enable_spend, enable_output) { + (false, false) => orchard::builder::BundleType::DISABLED.flags(), + (false, true) => orchard::bundle::Flags::SPENDS_DISABLED_WITHOUT_ZSA, + (true, false) => orchard::bundle::Flags::OUTPUTS_DISABLED, + (true, true) => orchard::bundle::Flags::ENABLED_WITHOUT_ZSA, + }; + let instances = shielded_data .actions() .map(|action| { @@ -155,8 +165,7 @@ impl From<&zebra_chain::orchard::ShieldedData> for Item { )) .expect("should be a valid redpallas spendauth verification key"), note::ExtractedNoteCommitment::from_bytes(&action.cm_x.into()).unwrap(), - enable_spend, - enable_output, + flags, ) }) .collect(); diff --git a/zebra-consensus/src/primitives/halo2/tests.rs b/zebra-consensus/src/primitives/halo2/tests.rs index e654adcc546..9b5c367e640 100644 --- a/zebra-consensus/src/primitives/halo2/tests.rs +++ b/zebra-consensus/src/primitives/halo2/tests.rs @@ -11,6 +11,8 @@ use orchard::{ bundle::Flags, circuit::ProvingKey, keys::{FullViewingKey, Scope, SpendingKey}, + note::AssetBase, + orchard_flavor::OrchardVanilla, value::NoteValue, Anchor, Bundle, }; @@ -23,9 +25,10 @@ use zebra_chain::{ use crate::primitives::halo2::*; +// FIXME: add support for OrchardZSA (see OrchardVanilla and AssetBase::native() usage below) #[allow(dead_code, clippy::print_stdout)] fn generate_test_vectors() { - let proving_key = ProvingKey::build(); + let proving_key = ProvingKey::build::(); let rng = OsRng; @@ -50,11 +53,17 @@ fn generate_test_vectors() { for _ in 0..num_recipients { builder - .add_output(None, recipient, NoteValue::from_raw(note_value), None) + .add_output( + None, + recipient, + NoteValue::from_raw(note_value), + AssetBase::native(), + None, + ) .unwrap(); } - let bundle: Bundle<_, i64> = builder.build(rng).unwrap().unwrap().0; + let bundle: Bundle<_, i64, OrchardVanilla> = builder.build(rng).unwrap().unwrap().0; let bundle = bundle .create_proof(&proving_key, rng) @@ -79,7 +88,14 @@ fn generate_test_vectors() { rk: <[u8; 32]>::from(a.rk()).into(), cm_x: pallas::Base::from_repr(a.cmx().into()).unwrap(), ephemeral_key: a.encrypted_note().epk_bytes.try_into().unwrap(), - enc_ciphertext: a.encrypted_note().enc_ciphertext.into(), + // FIXME: support OrchardZSA too, 580 works for OrchardVanilla only! + // FIXME: consider more "type safe" way to do the following conversion + // (now it goes through &[u8]) + enc_ciphertext: <[u8; 580]>::try_from( + a.encrypted_note().enc_ciphertext.as_ref(), + ) + .unwrap() + .into(), out_ciphertext: a.encrypted_note().out_ciphertext.into(), }; zebra_chain::orchard::shielded_data::AuthorizedAction { diff --git a/zebra-network/src/config.rs b/zebra-network/src/config.rs index 00f4f8b4460..7936ea0e787 100644 --- a/zebra-network/src/config.rs +++ b/zebra-network/src/config.rs @@ -2,7 +2,6 @@ use std::{ collections::HashSet, - ffi::OsString, io::{self, ErrorKind}, net::{IpAddr, SocketAddr}, time::Duration, @@ -10,11 +9,11 @@ use std::{ use indexmap::IndexSet; use serde::{de, Deserialize, Deserializer}; -use tempfile::NamedTempFile; -use tokio::{fs, io::AsyncWriteExt}; -use tracing::Span; +use tokio::fs; +use tracing::Span; use zebra_chain::{ + common::atomic_write, parameters::{ testnet::{self, ConfiguredActivationHeights, ConfiguredFundingStreams}, Magic, Network, NetworkKind, @@ -503,90 +502,36 @@ impl Config { // Make a newline-separated list let peer_data = peer_list.join("\n"); - // Write to a temporary file, so the cache is not corrupted if Zebra shuts down or crashes - // at the same time. - // - // # Concurrency - // - // We want to use async code to avoid blocking the tokio executor on filesystem operations, - // but `tempfile` is implemented using non-asyc methods. So we wrap its filesystem - // operations in `tokio::spawn_blocking()`. - // - // TODO: split this out into an atomic_write_to_tmp_file() method if we need to re-use it - - // Create the peer cache directory if needed - let peer_cache_dir = peer_cache_file - .parent() - .expect("cache path always has a network directory") - .to_owned(); - tokio::fs::create_dir_all(&peer_cache_dir).await?; - - // Give the temporary file a similar name to the permanent cache file, - // but hide it in directory listings. - let mut tmp_peer_cache_prefix: OsString = ".tmp.".into(); - tmp_peer_cache_prefix.push( - peer_cache_file - .file_name() - .expect("cache file always has a file name"), - ); - - // Create the temporary file. - // Do blocking filesystem operations on a dedicated thread. + // Write the peer cache file atomically so the cache is not corrupted if Zebra shuts down + // or crashes. let span = Span::current(); - let tmp_peer_cache_file = tokio::task::spawn_blocking(move || { - span.in_scope(move || { - // Put the temporary file in the same directory as the permanent file, - // so atomic filesystem operations are possible. - tempfile::Builder::new() - .prefix(&tmp_peer_cache_prefix) - .tempfile_in(peer_cache_dir) - }) + let write_result = tokio::task::spawn_blocking(move || { + span.in_scope(move || atomic_write(peer_cache_file, peer_data.as_bytes())) }) .await - .expect("unexpected panic creating temporary peer cache file")?; - - // Write the list to the file asynchronously, by extracting the inner file, using it, - // then combining it back into a type that will correctly drop the file on error. - let (tmp_peer_cache_file, tmp_peer_cache_path) = tmp_peer_cache_file.into_parts(); - let mut tmp_peer_cache_file = tokio::fs::File::from_std(tmp_peer_cache_file); - tmp_peer_cache_file.write_all(peer_data.as_bytes()).await?; - - let tmp_peer_cache_file = - NamedTempFile::from_parts(tmp_peer_cache_file, tmp_peer_cache_path); - - // Atomically replace the current cache with the temporary cache. - // Do blocking filesystem operations on a dedicated thread. - let span = Span::current(); - tokio::task::spawn_blocking(move || { - span.in_scope(move || { - let result = tmp_peer_cache_file.persist(&peer_cache_file); - - // Drops the temp file if needed - match result { - Ok(_temp_file) => { - info!( - cached_ip_count = ?peer_list.len(), - ?peer_cache_file, - "updated cached peer IP addresses" - ); + .expect("could not write the peer cache file")?; + + match write_result { + Ok(peer_cache_file) => { + info!( + cached_ip_count = ?peer_list.len(), + ?peer_cache_file, + "updated cached peer IP addresses" + ); - for ip in &peer_list { - metrics::counter!( - "zcash.net.peers.cache", - "cache" => peer_cache_file.display().to_string(), - "remote_ip" => ip.to_string() - ) - .increment(1); - } - - Ok(()) - } - Err(error) => Err(error.error), + for ip in &peer_list { + metrics::counter!( + "zcash.net.peers.cache", + "cache" => peer_cache_file.display().to_string(), + "remote_ip" => ip.to_string() + ) + .increment(1); } - }) - }) - .await - .expect("unexpected panic making temporary peer cache file permanent") + + Ok(()) + } + Err(error) => Err(error.error), + } } } diff --git a/zebra-network/src/config/cache_dir.rs b/zebra-network/src/config/cache_dir.rs index 99b75f9f4e1..1e80b27bb9a 100644 --- a/zebra-network/src/config/cache_dir.rs +++ b/zebra-network/src/config/cache_dir.rs @@ -2,7 +2,7 @@ use std::path::{Path, PathBuf}; -use zebra_chain::parameters::Network; +use zebra_chain::{common::default_cache_dir, parameters::Network}; /// A cache directory config field. /// @@ -56,12 +56,7 @@ impl CacheDir { /// Returns the `zebra-network` base cache directory, if enabled. pub fn cache_dir(&self) -> Option { match self { - Self::IsEnabled(is_enabled) => is_enabled.then(|| { - dirs::cache_dir() - .unwrap_or_else(|| std::env::current_dir().unwrap().join("cache")) - .join("zebra") - }), - + Self::IsEnabled(is_enabled) => is_enabled.then(default_cache_dir), Self::CustomPath(cache_dir) => Some(cache_dir.to_owned()), } } diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 5c188b34b40..8d0992dcf5a 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -34,7 +34,7 @@ rpc-client = [ "serde_json", ] -shielded-scan = ["tokio"] +shielded-scan = [] [dependencies] zebra-chain = { path = "../zebra-chain" , version = "1.0.0-beta.39" } @@ -48,7 +48,7 @@ jsonrpc-core = { version = "18.0.0", optional = true } reqwest = { version = "0.11.26", default-features = false, features = ["rustls-tls"], optional = true } serde = { version = "1.0.204", optional = true } serde_json = { version = "1.0.122", optional = true } -tokio = { version = "1.39.2", features = ["time"], optional = true } +tokio = { version = "1.39.2", features = ["time", "sync"] } [dev-dependencies] diff --git a/zebra-node-services/src/mempool.rs b/zebra-node-services/src/mempool.rs index 98c1969bbad..fbaaf029c75 100644 --- a/zebra-node-services/src/mempool.rs +++ b/zebra-node-services/src/mempool.rs @@ -4,6 +4,7 @@ use std::collections::HashSet; +use tokio::sync::oneshot; use zebra_chain::transaction::{self, UnminedTx, UnminedTxId}; #[cfg(feature = "getblocktemplate-rpcs")] @@ -114,13 +115,11 @@ pub enum Response { /// Returns matching cached rejected [`UnminedTxId`]s from the mempool, RejectedTransactionIds(HashSet), - /// Returns a list of queue results. - /// - /// These are the results of the initial queue checks. - /// The transaction may also fail download or verification later. + /// Returns a list of initial queue checks results and a oneshot receiver + /// for awaiting download and/or verification results. /// /// Each result matches the request at the corresponding vector index. - Queued(Vec>), + Queued(Vec>, BoxError>>), /// Confirms that the mempool has checked for recently verified transactions. CheckedForVerifiedTransactions, diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index e45df94b000..babae9123f1 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -87,6 +87,8 @@ tracing = "0.1.39" hex = { version = "0.4.3", features = ["serde"] } serde = { version = "1.0.204", features = ["serde_derive"] } +# For the `stop` RPC method. +nix = { version = "0.29.0", features = ["signal"] } zcash_primitives = { workspace = true, features = ["transparent-inputs"] } diff --git a/zebra-rpc/qa/README.md b/zebra-rpc/qa/README.md new file mode 100644 index 00000000000..cc46d7b54ef --- /dev/null +++ b/zebra-rpc/qa/README.md @@ -0,0 +1,88 @@ +The [pull-tester](/pull-tester/) folder contains a script to call +multiple tests from the [rpc-tests](/rpc-tests/) folder. + +Every pull request to the zebra repository is built and run through +the regression test suite. You can also run all or only individual +tests locally. + +Test dependencies +================= + +Before running the tests, the following must be installed. + +Unix +---- + +The `zmq`, `toml` and `base58` Python libraries are required. On Ubuntu or Debian-based +distributions they can be installed via: +``` +sudo apt-get install python3-zmq python3-base58 +``` + +OS X +------ + +``` +pip3 install pyzmq base58 toml +``` + +Running tests locally +===================== + +Make sure `zebrad` binary exists in the `../target/debug/` folder or set the binary path with: +``` +export CARGO_BIN_EXE_zebrad=/path/to/zebrad +``` + +You can run any single test by calling + + ./qa/pull-tester/rpc-tests.py + +Run the regression test suite with + + ./qa/pull-tester/rpc-tests.py + +By default, tests will be run in parallel. To specify how many jobs to run, +append `--jobs=n` (default n=4). + +If you want to create a basic coverage report for the RPC test suite, append `--coverage`. + +Possible options, which apply to each individual test run: + +``` + -h, --help show this help message and exit + --nocleanup Leave zcashds and test.* datadir on exit or error + --noshutdown Don't stop zcashds after the test execution + --srcdir=SRCDIR Source directory containing zcashd/zcash-cli + (default: ../../src) + --tmpdir=TMPDIR Root directory for datadirs + --tracerpc Print out all RPC calls as they are made + --coveragedir=COVERAGEDIR + Write tested RPC commands into this directory +``` + +If you set the environment variable `PYTHON_DEBUG=1` you will get some debug +output (example: `PYTHON_DEBUG=1 qa/pull-tester/rpc-tests.py wallet`). + +A 200-block -regtest blockchain and wallets for four nodes +is created the first time a regression test is run and +is stored in the cache/ directory. Each node has the miner +subsidy from 25 mature blocks (25*10=250 ZEC) in its wallet. + +After the first run, the cache/ blockchain and wallets are +copied into a temporary directory and used as the initial +test state. + +If you get into a bad state, you should be able +to recover with: + +```bash +rm -rf cache +killall zcashd +``` + +Writing tests +============= +You are encouraged to write tests for new or existing features. +Further information about the test framework and individual RPC +tests is found in [rpc-tests](rpc-tests). diff --git a/zebra-rpc/qa/base_config.toml b/zebra-rpc/qa/base_config.toml new file mode 100644 index 00000000000..502a2a75b1d --- /dev/null +++ b/zebra-rpc/qa/base_config.toml @@ -0,0 +1,12 @@ +[mining] +miner_address = "t27eWDgjFYJGVXmzrXeVjnb5J3uXDM9xH9v" + +[network] +listen_addr = "127.0.0.1:0" +network = "Regtest" + +[rpc] +listen_addr = "127.0.0.1:0" + +[state] +cache_dir = "" diff --git a/zebra-rpc/qa/pull-tester/rpc-tests.py b/zebra-rpc/qa/pull-tester/rpc-tests.py new file mode 100755 index 00000000000..00194f0aa53 --- /dev/null +++ b/zebra-rpc/qa/pull-tester/rpc-tests.py @@ -0,0 +1,401 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2020-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . +""" +rpc-tests.py - run regression test suite + +This module calls down into individual test cases via subprocess. It will +forward all unrecognized arguments onto the individual test scripts. + +RPC tests are disabled on Windows by default. Use --force to run them anyway. + +For a description of arguments recognized by test scripts, see +`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`. + +""" + +import argparse +import configparser +import os +import time +import shutil +import sys +import subprocess +import tempfile +import re + +SERIAL_SCRIPTS = [ + # These tests involve enough shielded spends (consuming all CPU + # cores) that we can't run them in parallel. +] + +FLAKY_SCRIPTS = [ + # These tests have intermittent failures that we haven't diagnosed yet. +] + +BASE_SCRIPTS= [ + # Scripts that are run by the travis build process + # Longest test should go first, to favor running tests in parallel + 'reindex.py', + 'getmininginfo.py'] + +ZMQ_SCRIPTS = [ + # ZMQ test can only be run if bitcoin was built with zmq-enabled. + # call rpc_tests.py with --nozmq to explicitly exclude these tests. +] + +EXTENDED_SCRIPTS = [ + # These tests are not run by the travis build process. + # Longest test should go first, to favor running tests in parallel +] + +ALL_SCRIPTS = SERIAL_SCRIPTS + FLAKY_SCRIPTS + BASE_SCRIPTS + ZMQ_SCRIPTS + EXTENDED_SCRIPTS + +def main(): + # Parse arguments and pass through unrecognised args + parser = argparse.ArgumentParser(add_help=False, + usage='%(prog)s [rpc-test.py options] [script options] [scripts]', + description=__doc__, + epilog=''' + Help text and arguments for individual test script:''', + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') + parser.add_argument('--deterministic', '-d', action='store_true', help='make the output a bit closer to deterministic in order to compare runs.') + parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude. Do not include the .py extension in the name.') + parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') + parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') + parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') + parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.') + parser.add_argument('--machines', '-m', type=int, default=-1, help='how many machines to shard the tests over. must also provide individual shard index. Default=-1 (no sharding).') + parser.add_argument('--rpcgroup', '-r', type=int, default=-1, help='individual shard index. must also provide how many machines to shard the tests over. Default=-1 (no sharding).') + parser.add_argument('--nozmq', action='store_true', help='do not run the zmq tests') + args, unknown_args = parser.parse_known_args() + + # Create a set to store arguments and create the passon string + tests = set(arg for arg in unknown_args if arg[:2] != "--") + passon_args = [arg for arg in unknown_args if arg[:2] == "--"] + + # Read config generated by configure. + config = configparser.ConfigParser() + config.read_file(open(os.path.dirname(__file__) + "/tests_config.ini")) + + enable_wallet = config["components"].getboolean("ENABLE_WALLET") + enable_utils = config["components"].getboolean("ENABLE_UTILS") + enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") + enable_zmq = config["components"].getboolean("ENABLE_ZMQ") and not args.nozmq + + if config["environment"]["EXEEXT"] == ".exe" and not args.force: + # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 + # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 + print("Tests currently disabled on Windows by default. Use --force option to enable") + sys.exit(0) + + if not (enable_wallet and enable_utils and enable_bitcoind): + print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled") + print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make") + sys.exit(0) + + # python3-zmq may not be installed. Handle this gracefully and with some helpful info + if enable_zmq: + try: + import zmq + zmq # Silences pyflakes + except ImportError: + print("ERROR: \"import zmq\" failed. Use --nozmq to run without the ZMQ tests." + "To run zmq tests, see dependency info in /qa/README.md.") + raise + + # Build list of tests + if tests: + # Individual tests have been specified. Run specified tests that exist + # in the ALL_SCRIPTS list. Accept the name with or without .py extension. + test_list = [t for t in ALL_SCRIPTS if + (t in tests or re.sub(".py$", "", t) in tests)] + + print("Running individually selected tests: ") + for t in test_list: + print("\t" + t) + else: + # No individual tests have been specified. Run base tests, and + # optionally ZMQ tests and extended tests. + test_list = SERIAL_SCRIPTS + FLAKY_SCRIPTS + BASE_SCRIPTS + if enable_zmq: + test_list += ZMQ_SCRIPTS + if args.extended: + test_list += EXTENDED_SCRIPTS + # TODO: BASE_SCRIPTS and EXTENDED_SCRIPTS are sorted by runtime + # (for parallel running efficiency). This combined list will is no + # longer sorted. + + # Remove the test cases that the user has explicitly asked to exclude. + if args.exclude: + for exclude_test in args.exclude.split(','): + if exclude_test + ".py" in test_list: + test_list.remove(exclude_test + ".py") + + if not test_list: + print("No valid test scripts specified. Check that your test is in one " + "of the test lists in rpc-tests.py, or run rpc-tests.py with no arguments to run all tests") + sys.exit(0) + + if args.help: + # Print help for rpc-tests.py, then print help of the first script and exit. + parser.print_help() + subprocess.check_call((config["environment"]["SRCDIR"] + '/qa/rpc-tests/' + test_list[0]).split() + ['-h']) + sys.exit(0) + + + if (args.rpcgroup == -1) != (args.machines == -1): + print("ERROR: Please use both -m and -r options when using parallel rpc_groups.") + sys.exit(0) + if args.machines == 0: + print("ERROR: -m/--machines must be greater than 0") + sys.exit(0) + if args.machines > 0 and (args.rpcgroup >= args.machines): + print("ERROR: -r/--rpcgroup must be less than -m/--machines") + sys.exit(0) + if args.rpcgroup != -1 and args.machines != -1 and args.machines > args.rpcgroup: + # Ceiling division using floor division, by inverting the world. + # https://stackoverflow.com/a/17511341 + k = -(len(test_list) // -args.machines) + split_list = list(test_list[i*k:(i+1)*k] for i in range(args.machines)) + tests_to_run = split_list[args.rpcgroup] + else: + tests_to_run = test_list + all_passed = run_tests( + RPCTestHandler, + tests_to_run, + config["environment"]["SRCDIR"], + config["environment"]["BUILDDIR"], + config["environment"]["EXEEXT"], + args.jobs, + args.coverage, + args.deterministic, + passon_args) + sys.exit(not all_passed) + +def run_tests(test_handler, test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, deterministic=False, args=[]): + BOLD = ("","") + if os.name == 'posix': + # primitive formatting on supported + # terminal via ANSI escape sequences: + BOLD = ('\033[0m', '\033[1m') + + #Set env vars + if "CARGO_BIN_EXE_zebrad" not in os.environ: + os.environ["CARGO_BIN_EXE_zebrad"] = os.path.join("..", "target", "debug", "zebrad") + + tests_dir = src_dir + '/qa/rpc-tests/' + + flags = ["--srcdir={}/src".format(build_dir)] + args + flags.append("--cachedir=%s/qa/cache" % build_dir) + + if enable_coverage: + coverage = RPCCoverage() + flags.append(coverage.flag) + print("Initializing coverage directory at %s\n" % coverage.dir) + else: + coverage = None + + if len(test_list) > 1 and jobs > 1: + # Populate cache + subprocess.check_output([tests_dir + 'create_cache.py'] + flags) + + #Run Tests + time_sum = 0 + time0 = time.time() + + job_queue = test_handler(jobs, tests_dir, test_list, flags) + + max_len_name = len(max(test_list, key=len)) + total_count = 0 + passed_count = 0 + results = [] + try: + for _ in range(len(test_list)): + (name, stdout, stderr, passed, duration) = job_queue.get_next(deterministic) + time_sum += duration + + print('\n' + BOLD[1] + name + BOLD[0] + ":") + print('' if passed else stdout + '\n', end='') + # TODO: Zebrad always produce the welcome message in the stderr. + # Ignoring stderr output here until that is fixed. + #print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='') + print("Pass: %s%s%s" % (BOLD[1], passed, BOLD[0]), end='') + if deterministic: + print("\n", end='') + else: + print(", Duration: %s s" % (duration,)) + total_count += 1 + if passed: + passed_count += 1 + + new_result = "%s | %s" % (name.ljust(max_len_name), str(passed).ljust(6)) + if not deterministic: + new_result += (" | %s s" % (duration,)) + results.append(new_result) + except (InterruptedError, KeyboardInterrupt): + print('\nThe following tests were running when interrupted:') + for j in job_queue.jobs: + print("•", j[0]) + print('\n', end='') + + all_passed = passed_count == total_count + + if all_passed: + success_rate = "True" + else: + success_rate = "%d/%d" % (passed_count, total_count) + header = "%s | PASSED" % ("TEST".ljust(max_len_name),) + footer = "%s | %s" % ("ALL".ljust(max_len_name), str(success_rate).ljust(6)) + if not deterministic: + header += " | DURATION" + footer += " | %s s (accumulated)\nRuntime: %s s" % (time_sum, int(time.time() - time0)) + print( + BOLD[1] + header + BOLD[0] + "\n\n" + + "\n".join(sorted(results)) + "\n" + + BOLD[1] + footer + BOLD[0]) + + if coverage: + coverage.report_rpc_coverage() + + print("Cleaning up coverage data") + coverage.cleanup() + + return all_passed + +class RPCTestHandler: + """ + Trigger the testscrips passed in via the list. + """ + + def __init__(self, num_tests_parallel, tests_dir, test_list=None, flags=None): + assert(num_tests_parallel >= 1) + self.num_jobs = num_tests_parallel + self.tests_dir = tests_dir + self.test_list = test_list + self.flags = flags + self.num_running = 0 + # In case there is a graveyard of zombie bitcoinds, we can apply a + # pseudorandom offset to hopefully jump over them. + # (625 is PORT_RANGE/MAX_NODES) + self.portseed_offset = int(time.time() * 1000) % 625 + self.jobs = [] + + def start_test(self, args, stdout, stderr): + return subprocess.Popen( + args, + universal_newlines=True, + stdout=stdout, + stderr=stderr) + + def get_next(self, deterministic): + while self.num_running < self.num_jobs and self.test_list: + # Add tests + self.num_running += 1 + t = self.test_list.pop(0) + port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)] + log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) + log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) + self.jobs.append((t, + time.time(), + self.start_test((self.tests_dir + t).split() + self.flags + port_seed, + log_stdout, + log_stderr), + log_stdout, + log_stderr)) + # Run serial scripts on their own. We always run these first, + # so we won't have added any other jobs yet. + if t in SERIAL_SCRIPTS: + break + if not self.jobs: + raise IndexError('pop from empty list') + while True: + # Return first proc that finishes + time.sleep(.5) + for j in self.jobs: + (name, time0, proc, log_out, log_err) = j + if proc.poll() is not None: + log_out.seek(0), log_err.seek(0) + [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] + log_out.close(), log_err.close() + # We can't check for an empty stderr in Zebra so we just check for the return code. + passed = proc.returncode == 0 + self.num_running -= 1 + self.jobs.remove(j) + return name, stdout, stderr, passed, int(time.time() - time0) + if not deterministic: + print('.', end='', flush=True) + + +class RPCCoverage(object): + """ + Coverage reporting utilities for pull-tester. + + Coverage calculation works by having each test script subprocess write + coverage files into a particular directory. These files contain the RPC + commands invoked during testing, as well as a complete listing of RPC + commands per `bitcoin-cli help` (`rpc_interface.txt`). + + After all tests complete, the commands run are combined and diff'd against + the complete list to calculate uncovered RPC commands. + + See also: qa/rpc-tests/test_framework/coverage.py + + """ + def __init__(self): + self.dir = tempfile.mkdtemp(prefix="coverage") + self.flag = '--coveragedir=%s' % self.dir + + def report_rpc_coverage(self): + """ + Print out RPC commands that were unexercised by tests. + + """ + uncovered = self._get_uncovered_rpc_commands() + + if uncovered: + print("Uncovered RPC commands:") + print("".join((" - %s\n" % i) for i in sorted(uncovered))) + else: + print("All RPC commands covered.") + + def cleanup(self): + return shutil.rmtree(self.dir) + + def _get_uncovered_rpc_commands(self): + """ + Return a set of currently untested RPC commands. + + """ + # This is shared from `qa/rpc-tests/test-framework/coverage.py` + reference_filename = 'rpc_interface.txt' + coverage_file_prefix = 'coverage.' + + coverage_ref_filename = os.path.join(self.dir, reference_filename) + coverage_filenames = set() + all_cmds = set() + covered_cmds = set() + + if not os.path.isfile(coverage_ref_filename): + raise RuntimeError("No coverage reference found") + + with open(coverage_ref_filename, 'r', encoding='utf8') as f: + all_cmds.update([i.strip() for i in f.readlines()]) + + for root, dirs, files in os.walk(self.dir): + for filename in files: + if filename.startswith(coverage_file_prefix): + coverage_filenames.add(os.path.join(root, filename)) + + for filename in coverage_filenames: + with open(filename, 'r', encoding='utf8') as f: + covered_cmds.update([i.strip() for i in f.readlines()]) + + return all_cmds - covered_cmds + + +if __name__ == '__main__': + main() diff --git a/zebra-rpc/qa/pull-tester/tests_config.ini b/zebra-rpc/qa/pull-tester/tests_config.ini new file mode 100755 index 00000000000..f3df78bc0f2 --- /dev/null +++ b/zebra-rpc/qa/pull-tester/tests_config.ini @@ -0,0 +1,19 @@ +# Copyright (c) 2013-2016 The Bitcoin Core developers +# Copyright (c) 2020-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +# These environment variables are set by the build process and read by +# rpc-tests.py + +[environment] +SRCDIR=. +BUILDDIR=. +EXEEXT= + +[components] +# Which components are enabled. These are commented out by `configure` if they were disabled when running config. +ENABLE_WALLET=true +ENABLE_UTILS=true +ENABLE_BITCOIND=true +ENABLE_ZMQ=false diff --git a/zebra-rpc/qa/rpc-tests/create_cache.py b/zebra-rpc/qa/rpc-tests/create_cache.py new file mode 100755 index 00000000000..4403e4ae312 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/create_cache.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright (c) 2016 The Bitcoin Core developers +# Copyright (c) 2020-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +# +# Helper script to create the cache +# (see BitcoinTestFramework.setup_chain) +# + +from test_framework.test_framework import BitcoinTestFramework + +class CreateCache(BitcoinTestFramework): + + def __init__(self): + super().__init__() + + # Test network and test nodes are not required: + self.num_nodes = 0 + self.nodes = [] + + def setup_network(self): + pass + + def run_test(self): + pass + +if __name__ == '__main__': + CreateCache().main() + diff --git a/zebra-rpc/qa/rpc-tests/getmininginfo.py b/zebra-rpc/qa/rpc-tests/getmininginfo.py new file mode 100755 index 00000000000..ddd024aed2c --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/getmininginfo.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import start_nodes + + +class GetMiningInfoTest(BitcoinTestFramework): + ''' + Test getmininginfo. + ''' + + def __init__(self): + super().__init__() + self.num_nodes = 1 + self.cache_behavior = 'clean' + + def setup_network(self, split=False): + self.nodes = start_nodes(self.num_nodes, self.options.tmpdir) + self.is_network_split = False + self.sync_all() + + def run_test(self): + node = self.nodes[0] + + info = node.getmininginfo() + assert(info['blocks'] == 0) + # No blocks have been mined yet, so these fields should not be present. + assert('currentblocksize' not in info) + assert('currentblocktx' not in info) + + node.generate(1) + + info = node.getmininginfo() + assert(info['blocks'] == 1) + # One block has been mined, so these fields should now be present. + assert('currentblocksize' in info) + assert('currentblocktx' in info) + assert(info['currentblocksize'] > 0) + # The transaction count doesn't include the coinbase + assert(info['currentblocktx'] == 0) + + +if __name__ == '__main__': + GetMiningInfoTest().main() diff --git a/zebra-rpc/qa/rpc-tests/reindex.py b/zebra-rpc/qa/rpc-tests/reindex.py new file mode 100755 index 00000000000..a301c377ee0 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/reindex.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2017-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +# +# Test -reindex and -reindex-chainstate with CheckBlockIndex +# + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal, \ + start_node, stop_node, wait_bitcoinds +import time + +class ReindexTest(BitcoinTestFramework): + + def __init__(self): + super().__init__() + self.cache_behavior = 'clean' + self.num_nodes = 1 + + def setup_network(self): + self.nodes = [] + self.is_network_split = False + self.nodes.append(start_node(0, self.options.tmpdir)) + + def reindex(self, justchainstate=False): + # When zebra reindexes, it will only do it up to the finalized chain height. + # This happens after the first 100 blocks, so we need to generate 100 blocks + # for the reindex to be able to catch block 1. + finalized_height = 100 + + self.nodes[0].generate(finalized_height) + blockcount = self.nodes[0].getblockcount() - (finalized_height - 1) + + stop_node(self.nodes[0], 0) + wait_bitcoinds() + + self.nodes[0]=start_node(0, self.options.tmpdir) + + while self.nodes[0].getblockcount() < blockcount: + time.sleep(0.1) + assert_equal(self.nodes[0].getblockcount(), blockcount) + print("Success") + + def run_test(self): + self.reindex(False) + self.reindex(True) + self.reindex(False) + self.reindex(True) + +if __name__ == '__main__': + ReindexTest().main() diff --git a/zebra-rpc/qa/rpc-tests/test_framework/__init__.py b/zebra-rpc/qa/rpc-tests/test_framework/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/zebra-rpc/qa/rpc-tests/test_framework/authproxy.py b/zebra-rpc/qa/rpc-tests/test_framework/authproxy.py new file mode 100644 index 00000000000..e555706365e --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/authproxy.py @@ -0,0 +1,166 @@ +""" + Copyright 2011 Jeff Garzik + + AuthServiceProxy has the following improvements over python-jsonrpc's + ServiceProxy class: + + - HTTP connections persist for the life of the AuthServiceProxy object + (if server supports HTTP/1.1) + - sends protocol 'version', per JSON-RPC 1.1 + - sends proper, incrementing 'id' + - sends Basic HTTP authentication headers + - parses all JSON numbers that look like floats as Decimal + - uses standard Python json lib + + Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: + + Copyright (c) 2007 Jan-Klaas Kollhof + + This file is part of jsonrpc. + + jsonrpc is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this software; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import base64 +import decimal +import json +import logging +from http.client import HTTPConnection, HTTPSConnection, BadStatusLine +from urllib.parse import urlparse + +USER_AGENT = "AuthServiceProxy/0.1" + +HTTP_TIMEOUT = 600 + +log = logging.getLogger("BitcoinRPC") + +class JSONRPCException(Exception): + def __init__(self, rpc_error): + Exception.__init__(self, rpc_error.get("message")) + self.error = rpc_error + +def EncodeDecimal(o): + if isinstance(o, decimal.Decimal): + return str(o) + raise TypeError(repr(o) + " is not JSON serializable") + + +class AuthServiceProxy(): + __id_count = 0 + + def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None): + self.__service_url = service_url + self._service_name = service_name + self.__url = urlparse(service_url) + (user, passwd) = (self.__url.username, self.__url.password) + try: + user = user.encode('utf8') + except AttributeError: + pass + try: + passwd = passwd.encode('utf8') + except AttributeError: + pass + authpair = user + b':' + passwd + self.__auth_header = b'Basic ' + base64.b64encode(authpair) + + self.timeout = timeout + self._set_conn(connection) + + def _set_conn(self, connection=None): + port = 80 if self.__url.port is None else self.__url.port + if connection: + self.__conn = connection + self.timeout = connection.timeout + elif self.__url.scheme == 'https': + self.__conn = HTTPSConnection(self.__url.hostname, port, timeout=self.timeout) + else: + self.__conn = HTTPConnection(self.__url.hostname, port, timeout=self.timeout) + + def __getattr__(self, name): + if name.startswith('__') and name.endswith('__'): + # Python internal stuff + raise AttributeError + if self._service_name is not None: + name = "%s.%s" % (self._service_name, name) + return AuthServiceProxy(self.__service_url, name, connection=self.__conn) + + def _request(self, method, path, postdata): + ''' + Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). + This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. + ''' + headers = {'Host': self.__url.hostname, + 'User-Agent': USER_AGENT, + 'Authorization': self.__auth_header, + 'Content-type': 'application/json'} + try: + self.__conn.request(method, path, postdata, headers) + return self._get_response() + except Exception as e: + # If connection was closed, try again. + # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset. + # ConnectionResetError happens on FreeBSD with Python 3.4. + # This can be simplified now that we depend on Python 3 (previously, we could not + # refer to BrokenPipeError or ConnectionResetError which did not exist on Python 2) + if ((isinstance(e, BadStatusLine) and e.line == "''") + or e.__class__.__name__ in ('BrokenPipeError', 'ConnectionResetError')): + self.__conn.close() + self.__conn.request(method, path, postdata, headers) + return self._get_response() + else: + raise + + def __call__(self, *args): + AuthServiceProxy.__id_count += 1 + + log.debug("-%s-> %s %s"%(AuthServiceProxy.__id_count, self._service_name, + json.dumps(args, default=EncodeDecimal))) + postdata = json.dumps({'version': '1.1', + 'method': self._service_name, + 'params': args, + 'id': AuthServiceProxy.__id_count}, default=EncodeDecimal) + response = self._request('POST', self.__url.path, postdata) + if response['error'] is not None: + raise JSONRPCException(response['error']) + elif 'result' not in response: + raise JSONRPCException({ + 'code': -343, 'message': 'missing JSON-RPC result'}) + else: + return response['result'] + + def _batch(self, rpc_call_list): + postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal) + log.debug("--> "+postdata) + return self._request('POST', self.__url.path, postdata) + + def _get_response(self): + http_response = self.__conn.getresponse() + if http_response is None: + raise JSONRPCException({ + 'code': -342, 'message': 'missing HTTP response from server'}) + + content_type = http_response.getheader('Content-Type') + if content_type != 'application/json': + raise JSONRPCException({ + 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}) + + responsedata = http_response.read().decode('utf8') + response = json.loads(responsedata, parse_float=decimal.Decimal) + if "error" in response and response["error"] is None: + log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal))) + else: + log.debug("<-- "+responsedata) + return response diff --git a/zebra-rpc/qa/rpc-tests/test_framework/bignum.py b/zebra-rpc/qa/rpc-tests/test_framework/bignum.py new file mode 100644 index 00000000000..f56cea98e7a --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/bignum.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 +# +# bignum.py +# +# This file is copied from python-bitcoinlib. +# +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . +# + +"""Bignum routines""" + +import struct + + +# generic big endian MPI format + +def bn_bytes(v, have_ext=False): + ext = 0 + if have_ext: + ext = 1 + return ((v.bit_length()+7)//8) + ext + +def bn2bin(v): + s = bytearray() + i = bn_bytes(v) + while i > 0: + s.append((v >> ((i-1) * 8)) & 0xff) + i -= 1 + return s + +def bin2bn(s): + l = 0 + for ch in s: + l = (l << 8) | ch + return l + +def bn2mpi(v): + have_ext = False + if v.bit_length() > 0: + have_ext = (v.bit_length() & 0x07) == 0 + + neg = False + if v < 0: + neg = True + v = -v + + s = struct.pack(b">I", bn_bytes(v, have_ext)) + ext = bytearray() + if have_ext: + ext.append(0) + v_bin = bn2bin(v) + if neg: + if have_ext: + ext[0] |= 0x80 + else: + v_bin[0] |= 0x80 + return s + ext + v_bin + +def mpi2bn(s): + if len(s) < 4: + return None + s_size = bytes(s[:4]) + v_len = struct.unpack(b">I", s_size)[0] + if len(s) != (v_len + 4): + return None + if v_len == 0: + return 0 + + v_str = bytearray(s[4:]) + neg = False + i = v_str[0] + if i & 0x80: + neg = True + i &= ~0x80 + v_str[0] = i + + v = bin2bn(v_str) + + if neg: + return -v + return v + +# bitcoin-specific little endian format, with implicit size +def mpi2vch(s): + r = s[4:] # strip size + r = r[::-1] # reverse string, converting BE->LE + return r + +def bn2vch(v): + return bytes(mpi2vch(bn2mpi(v))) + +def vch2mpi(s): + r = struct.pack(b">I", len(s)) # size + r += s[::-1] # reverse string, converting LE->BE + return r + +def vch2bn(s): + return mpi2bn(vch2mpi(s)) + diff --git a/zebra-rpc/qa/rpc-tests/test_framework/blockstore.py b/zebra-rpc/qa/rpc-tests/test_framework/blockstore.py new file mode 100644 index 00000000000..e83ee5fab58 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/blockstore.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +# BlockStore: a helper class that keeps a map of blocks and implements +# helper functions for responding to getheaders and getdata, +# and for constructing a getheaders message +# + +from .mininode import CBlock, CBlockHeader, CBlockLocator, CTransaction, msg_block, msg_headers, msg_tx + +import sys +from io import BytesIO +import dbm.ndbm + +class BlockStore(): + def __init__(self, datadir): + self.blockDB = dbm.ndbm.open(datadir + "/blocks", 'c') + self.currentBlock = 0 + self.headers_map = dict() + + def close(self): + self.blockDB.close() + + def get(self, blockhash): + serialized_block = None + try: + serialized_block = self.blockDB[repr(blockhash)] + except KeyError: + return None + f = BytesIO(serialized_block) + ret = CBlock() + ret.deserialize(f) + ret.calc_sha256() + return ret + + def get_header(self, blockhash): + try: + return self.headers_map[blockhash] + except KeyError: + return None + + # Note: this pulls full blocks out of the database just to retrieve + # the headers -- perhaps we could keep a separate data structure + # to avoid this overhead. + def headers_for(self, locator, hash_stop, current_tip=None): + if current_tip is None: + current_tip = self.currentBlock + current_block_header = self.get_header(current_tip) + if current_block_header is None: + return None + + response = msg_headers() + headersList = [ current_block_header ] + maxheaders = 2000 + while (headersList[0].sha256 not in locator.vHave): + prevBlockHash = headersList[0].hashPrevBlock + prevBlockHeader = self.get_header(prevBlockHash) + if prevBlockHeader is not None: + headersList.insert(0, prevBlockHeader) + else: + break + headersList = headersList[:maxheaders] # truncate if we have too many + hashList = [x.sha256 for x in headersList] + index = len(headersList) + if (hash_stop in hashList): + index = hashList.index(hash_stop)+1 + response.headers = headersList[:index] + return response + + def add_block(self, block): + block.calc_sha256() + try: + self.blockDB[repr(block.sha256)] = bytes(block.serialize()) + except TypeError as e: + print("Unexpected error: ", sys.exc_info()[0], e.args) + self.currentBlock = block.sha256 + self.headers_map[block.sha256] = CBlockHeader(block) + + def add_header(self, header): + self.headers_map[header.sha256] = header + + def get_blocks(self, inv): + responses = [] + for i in inv: + if (i.type == 2): # MSG_BLOCK + block = self.get(i.hash) + if block is not None: + responses.append(msg_block(block)) + return responses + + def get_locator(self, current_tip=None): + if current_tip is None: + current_tip = self.currentBlock + r = [] + counter = 0 + step = 1 + lastBlock = self.get(current_tip) + while lastBlock is not None: + r.append(lastBlock.hashPrevBlock) + for i in range(step): + lastBlock = self.get(lastBlock.hashPrevBlock) + if lastBlock is None: + break + counter += 1 + if counter > 10: + step *= 2 + locator = CBlockLocator() + locator.vHave = r + return locator + +class TxStore(object): + def __init__(self, datadir): + self.txDB = dbm.ndbm.open(datadir + "/transactions", 'c') + + def close(self): + self.txDB.close() + + def get(self, txhash): + serialized_tx = None + try: + serialized_tx = self.txDB[repr(txhash)] + except KeyError: + return None + f = BytesIO(serialized_tx) + ret = CTransaction() + ret.deserialize(f) + ret.calc_sha256() + return ret + + def add_transaction(self, tx): + tx.calc_sha256() + try: + self.txDB[repr(tx.sha256)] = bytes(tx.serialize()) + except TypeError as e: + print("Unexpected error: ", sys.exc_info()[0], e.args) + + def get_transactions(self, inv): + responses = [] + for i in inv: + if (i.type == 1): # MSG_TX + tx = self.get(i.hash) + if tx is not None: + responses.append(msg_tx(tx)) + return responses diff --git a/zebra-rpc/qa/rpc-tests/test_framework/blocktools.py b/zebra-rpc/qa/rpc-tests/test_framework/blocktools.py new file mode 100644 index 00000000000..9c6fa430d2d --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/blocktools.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +# blocktools.py - utilities for manipulating blocks and transactions +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2017-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +from hashlib import blake2b + +from .mininode import ( + CBlock, CTransaction, CTxIn, CTxOut, COutPoint, + BLOSSOM_POW_TARGET_SPACING_RATIO, +) +from .script import CScript, OP_0, OP_EQUAL, OP_HASH160, OP_TRUE, OP_CHECKSIG + +# Create a block (with regtest difficulty) +def create_block(hashprev, coinbase, nTime=None, nBits=None, hashBlockCommitments=None): + block = CBlock() + if nTime is None: + import time + block.nTime = int(time.time()+600) + else: + block.nTime = nTime + block.hashPrevBlock = hashprev + if hashBlockCommitments is None: + # By default NUs up to Sapling are active from block 1, so we set this to the empty root. + hashBlockCommitments = 0x3e49b5f954aa9d3545bc6c37744661eea48d7c34e3000d82b7f0010c30f4c2fb + block.hashBlockCommitments = hashBlockCommitments + if nBits is None: + block.nBits = 0x200f0f0f # difficulty retargeting is disabled in REGTEST chainparams + else: + block.nBits = nBits + block.vtx.append(coinbase) + block.hashMerkleRoot = block.calc_merkle_root() + block.hashAuthDataRoot = block.calc_auth_data_root() + block.calc_sha256() + return block + +def derive_block_commitments_hash(chain_history_root, auth_data_root): + digest = blake2b( + digest_size=32, + person=b'ZcashBlockCommit') + digest.update(chain_history_root) + digest.update(auth_data_root) + digest.update(b'\x00' * 32) + return digest.digest() + +def serialize_script_num(value): + r = bytearray(0) + if value == 0: + return r + neg = value < 0 + absvalue = -value if neg else value + while (absvalue): + r.append(int(absvalue & 0xff)) + absvalue >>= 8 + if r[-1] & 0x80: + r.append(0x80 if neg else 0) + elif neg: + r[-1] |= 0x80 + return r + +# Create a coinbase transaction, assuming no miner fees. +# If pubkey is passed in, the coinbase output will be a P2PK output; +# otherwise an anyone-can-spend output. +def create_coinbase(height, pubkey=None, after_blossom=False, outputs=[], lockboxvalue=0): + coinbase = CTransaction() + coinbase.nExpiryHeight = height + coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), + CScript([height, OP_0]), 0xffffffff)) + coinbaseoutput = CTxOut() + coinbaseoutput.nValue = int(12.5*100000000) + if after_blossom: + coinbaseoutput.nValue //= BLOSSOM_POW_TARGET_SPACING_RATIO + halvings = height // 150 # regtest + coinbaseoutput.nValue >>= halvings + coinbaseoutput.nValue -= lockboxvalue + + if (pubkey != None): + coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG]) + else: + coinbaseoutput.scriptPubKey = CScript([OP_TRUE]) + coinbase.vout = [ coinbaseoutput ] + + if len(outputs) == 0 and halvings == 0: # regtest + froutput = CTxOut() + froutput.nValue = coinbaseoutput.nValue // 5 + # regtest + fraddr = bytearray([0x67, 0x08, 0xe6, 0x67, 0x0d, 0xb0, 0xb9, 0x50, + 0xda, 0xc6, 0x80, 0x31, 0x02, 0x5c, 0xc5, 0xb6, + 0x32, 0x13, 0xa4, 0x91]) + froutput.scriptPubKey = CScript([OP_HASH160, fraddr, OP_EQUAL]) + coinbaseoutput.nValue -= froutput.nValue + coinbase.vout.append(froutput) + + coinbaseoutput.nValue -= sum(output.nValue for output in outputs) + assert coinbaseoutput.nValue >= 0, coinbaseoutput.nValue + coinbase.vout.extend(outputs) + coinbase.calc_sha256() + return coinbase + +# Create a transaction with an anyone-can-spend output, that spends the +# nth output of prevtx. +def create_transaction(prevtx, n, sig, value): + tx = CTransaction() + assert(n < len(prevtx.vout)) + tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff)) + tx.vout.append(CTxOut(value, b"")) + tx.calc_sha256() + return tx diff --git a/zebra-rpc/qa/rpc-tests/test_framework/comptool.py b/zebra-rpc/qa/rpc-tests/test_framework/comptool.py new file mode 100755 index 00000000000..47e4efb3272 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/comptool.py @@ -0,0 +1,446 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2017-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +from .blockstore import BlockStore, TxStore +from .mininode import ( + CBlock, + CBlockHeader, + CTransaction, + CInv, + msg_block, + msg_getheaders, + msg_headers, + msg_inv, + msg_mempool, + msg_ping, + mininode_lock, + MAX_INV_SZ, + NodeConn, + NodeConnCB, +) +from .util import p2p_port + +import time + +''' +This is a tool for comparing two or more bitcoinds to each other +using a script provided. + +To use, create a class that implements get_tests(), and pass it in +as the test generator to TestManager. get_tests() should be a python +generator that returns TestInstance objects. See below for definition. + +In practice get_tests is always implemented on a subclass of ComparisonTestFramework. +''' + +# TestNode behaves as follows: +# Configure with a BlockStore and TxStore +# on_inv: log the message but don't request +# on_headers: log the chain tip +# on_pong: update ping response map (for synchronization) +# on_getheaders: provide headers via BlockStore +# on_getdata: provide blocks via BlockStore + +def wait_until(predicate, attempts=float('inf'), timeout=float('inf')): + attempt = 0 + elapsed = 0 + + while attempt < attempts and elapsed < timeout: + with mininode_lock: + if predicate(): + return True + attempt += 1 + elapsed += 0.05 + time.sleep(0.05) + + return False + +class RejectResult(object): + ''' + Outcome that expects rejection of a transaction or block. + ''' + def __init__(self, code, reason=b''): + self.code = code + self.reason = reason + def match(self, other): + if self.code != other.code: + return False + return other.reason.startswith(self.reason) + def __repr__(self): + return '%i:%s' % (self.code,self.reason or '*') + +class TestNode(NodeConnCB): + + def __init__(self, block_store, tx_store): + NodeConnCB.__init__(self) + self.create_callback_map() + self.conn = None + self.bestblockhash = None + self.block_store = block_store + self.block_request_map = {} + self.tx_store = tx_store + self.tx_request_map = {} + self.block_reject_map = {} + self.tx_reject_map = {} + + # When the pingmap is non-empty we're waiting for + # a response + self.pingMap = {} + self.lastInv = [] + self.closed = False + + def on_close(self, conn): + self.closed = True + + def add_connection(self, conn): + self.conn = conn + + def on_headers(self, conn, message): + if len(message.headers) > 0: + best_header = message.headers[-1] + best_header.calc_sha256() + self.bestblockhash = best_header.sha256 + + def on_getheaders(self, conn, message): + response = self.block_store.headers_for(message.locator, message.hashstop) + if response is not None: + conn.send_message(response) + + def on_getdata(self, conn, message): + [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)] + [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)] + + for i in message.inv: + if i.type == 1: + self.tx_request_map[i.hash] = True + elif i.type == 2: + self.block_request_map[i.hash] = True + + def on_inv(self, conn, message): + self.lastInv = [x.hash for x in message.inv] + + def on_pong(self, conn, message): + try: + del self.pingMap[message.nonce] + except KeyError: + raise AssertionError("Got pong for unknown ping [%s]" % repr(message)) + + def on_reject(self, conn, message): + if message.message == b'tx': + self.tx_reject_map[message.data] = RejectResult(message.code, message.reason) + if message.message == b'block': + self.block_reject_map[message.data] = RejectResult(message.code, message.reason) + + def send_inv(self, obj): + mtype = 2 if isinstance(obj, CBlock) else 1 + self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)])) + + def send_getheaders(self): + # We ask for headers from their last tip. + m = msg_getheaders() + m.locator = self.block_store.get_locator(self.bestblockhash) + self.conn.send_message(m) + + def send_header(self, header): + m = msg_headers() + m.headers.append(header) + self.conn.send_message(m) + + # This assumes BIP31 + def send_ping(self, nonce): + self.pingMap[nonce] = True + self.conn.send_message(msg_ping(nonce)) + + def received_ping_response(self, nonce): + return nonce not in self.pingMap + + def send_mempool(self): + self.lastInv = [] + self.conn.send_message(msg_mempool()) + +# TestInstance: +# +# Instances of these are generated by the test generator, and fed into the +# comptool. +# +# "blocks_and_transactions" should be an array of +# [obj, True/False/None, hash/None]: +# - obj is either a CBlock, CBlockHeader, or a CTransaction, and +# - the second value indicates whether the object should be accepted +# into the blockchain or mempool (for tests where we expect a certain +# answer), or "None" if we don't expect a certain answer and are just +# comparing the behavior of the nodes being tested. +# - the third value is the hash to test the tip against (if None or omitted, +# use the hash of the block) +# - NOTE: if a block header, no test is performed; instead the header is +# just added to the block_store. This is to facilitate block delivery +# when communicating with headers-first clients (when withholding an +# intermediate block). +# sync_every_block: if True, then each block will be inv'ed, synced, and +# nodes will be tested based on the outcome for the block. If False, +# then inv's accumulate until all blocks are processed (or max inv size +# is reached) and then sent out in one inv message. Then the final block +# will be synced across all connections, and the outcome of the final +# block will be tested. +# sync_every_tx: analogous to behavior for sync_every_block, except if outcome +# on the final tx is None, then contents of entire mempool are compared +# across all connections. (If outcome of final tx is specified as true +# or false, then only the last tx is tested against outcome.) + +class TestInstance(object): + def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False): + self.blocks_and_transactions = objects if objects else [] + self.sync_every_block = sync_every_block + self.sync_every_tx = sync_every_tx + +class TestManager(object): + + def __init__(self, testgen, datadir): + self.test_generator = testgen + self.connections = [] + self.test_nodes = [] + self.block_store = BlockStore(datadir) + self.tx_store = TxStore(datadir) + self.ping_counter = 1 + + def add_all_connections(self, nodes): + for i in range(len(nodes)): + # Create a p2p connection to each node + test_node = TestNode(self.block_store, self.tx_store) + self.test_nodes.append(test_node) + self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node)) + # Make sure the TestNode (callback class) has a reference to its + # associated NodeConn + test_node.add_connection(self.connections[-1]) + + def wait_for_disconnections(self): + def disconnected(): + return all(node.closed for node in self.test_nodes) + return wait_until(disconnected, timeout=10) + + def wait_for_verack(self): + def veracked(): + return all(node.verack_received for node in self.test_nodes) + return wait_until(veracked, timeout=10) + + def wait_for_pings(self, counter): + def received_pongs(): + return all(node.received_ping_response(counter) for node in self.test_nodes) + return wait_until(received_pongs) + + # sync_blocks: Wait for all connections to request the blockhash given + # then send get_headers to find out the tip of each node, and synchronize + # the response by using a ping (and waiting for pong with same nonce). + def sync_blocks(self, blockhash, num_blocks): + def blocks_requested(): + return all( + blockhash in node.block_request_map and node.block_request_map[blockhash] + for node in self.test_nodes + ) + + # --> error if not requested + if not wait_until(blocks_requested, attempts=20*num_blocks): + # print [ c.cb.block_request_map for c in self.connections ] + raise AssertionError("Not all nodes requested block") + + # Send getheaders message + [ c.cb.send_getheaders() for c in self.connections ] + + # Send ping and wait for response -- synchronization hack + [ c.cb.send_ping(self.ping_counter) for c in self.connections ] + self.wait_for_pings(self.ping_counter) + self.ping_counter += 1 + + # Analogous to sync_block (see above) + def sync_transaction(self, txhash, num_events): + # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events) + def transaction_requested(): + return all( + txhash in node.tx_request_map and node.tx_request_map[txhash] + for node in self.test_nodes + ) + + # --> error if not requested + if not wait_until(transaction_requested, attempts=20*num_events): + # print [ c.cb.tx_request_map for c in self.connections ] + raise AssertionError("Not all nodes requested transaction") + + # Get the mempool + [ c.cb.send_mempool() for c in self.connections ] + + # Send ping and wait for response -- synchronization hack + [ c.cb.send_ping(self.ping_counter) for c in self.connections ] + self.wait_for_pings(self.ping_counter) + self.ping_counter += 1 + + # Sort inv responses from each node + with mininode_lock: + [ c.cb.lastInv.sort() for c in self.connections ] + + # Verify that the tip of each connection all agree with each other, and + # with the expected outcome (if given) + def check_results(self, blockhash, outcome): + with mininode_lock: + for c in self.connections: + if outcome is None: + if c.cb.bestblockhash != self.connections[0].cb.bestblockhash: + return False + elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code + if c.cb.bestblockhash == blockhash: + return False + if blockhash not in c.cb.block_reject_map: + print('Block not in reject map: %064x' % (blockhash)) + return False + if not outcome.match(c.cb.block_reject_map[blockhash]): + print('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)) + return False + elif ((c.cb.bestblockhash == blockhash) != outcome): + if outcome is True and blockhash in c.cb.block_reject_map: + print('Block rejected with %s instead of accepted: %064x' % (c.cb.block_reject_map[blockhash], blockhash)) + return False + return True + + # Either check that the mempools all agree with each other, or that + # txhash's presence in the mempool matches the outcome specified. + # This is somewhat of a strange comparison, in that we're either comparing + # a particular tx to an outcome, or the entire mempools altogether; + # perhaps it would be useful to add the ability to check explicitly that + # a particular tx's existence in the mempool is the same across all nodes. + def check_mempool(self, txhash, outcome): + with mininode_lock: + for c in self.connections: + if outcome is None: + # Make sure the mempools agree with each other + if c.cb.lastInv != self.connections[0].cb.lastInv: + # print c.rpc.getrawmempool() + return False + elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code + if txhash in c.cb.lastInv: + return False + if txhash not in c.cb.tx_reject_map: + print('Tx not in reject map: %064x' % (txhash)) + return False + if not outcome.match(c.cb.tx_reject_map[txhash]): + print('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)) + return False + elif ((txhash in c.cb.lastInv) != outcome): + # print c.rpc.getrawmempool(), c.cb.lastInv + return False + return True + + def run(self): + # Wait until verack is received + self.wait_for_verack() + + test_number = 1 + for test_instance in self.test_generator.get_tests(): + # We use these variables to keep track of the last block + # and last transaction in the tests, which are used + # if we're not syncing on every block or every tx. + [ block, block_outcome, tip ] = [ None, None, None ] + [ tx, tx_outcome ] = [ None, None ] + invqueue = [] + + for test_obj in test_instance.blocks_and_transactions: + b_or_t = test_obj[0] + outcome = test_obj[1] + # Determine if we're dealing with a block or tx + if isinstance(b_or_t, CBlock): # Block test runner + block = b_or_t + block_outcome = outcome + tip = block.sha256 + # each test_obj can have an optional third argument + # to specify the tip we should compare with + # (default is to use the block being tested) + if len(test_obj) >= 3: + tip = test_obj[2] + + # Add to shared block_store, set as current block + # If there was an open getdata request for the block + # previously, and we didn't have an entry in the + # block_store, then immediately deliver, because the + # node wouldn't send another getdata request while + # the earlier one is outstanding. + first_block_with_hash = True + if self.block_store.get(block.sha256) is not None: + first_block_with_hash = False + with mininode_lock: + self.block_store.add_block(block) + for c in self.connections: + if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True: + # There was a previous request for this block hash + # Most likely, we delivered a header for this block + # but never had the block to respond to the getdata + c.send_message(msg_block(block)) + else: + c.cb.block_request_map[block.sha256] = False + # Either send inv's to each node and sync, or add + # to invqueue for later inv'ing. + if (test_instance.sync_every_block): + # if we expect success, send inv and sync every block + # if we expect failure, just push the block and see what happens. + if outcome == True: + [ c.cb.send_inv(block) for c in self.connections ] + self.sync_blocks(block.sha256, 1) + else: + [ c.send_message(msg_block(block)) for c in self.connections ] + [ c.cb.send_ping(self.ping_counter) for c in self.connections ] + self.wait_for_pings(self.ping_counter) + self.ping_counter += 1 + if (not self.check_results(tip, outcome)): + raise AssertionError("Test failed at test %d" % test_number) + else: + invqueue.append(CInv(2, block.sha256)) + elif isinstance(b_or_t, CBlockHeader): + block_header = b_or_t + self.block_store.add_header(block_header) + [ c.cb.send_header(block_header) for c in self.connections ] + + else: # Tx test runner + assert(isinstance(b_or_t, CTransaction)) + tx = b_or_t + tx_outcome = outcome + # Add to shared tx store and clear map entry + with mininode_lock: + self.tx_store.add_transaction(tx) + for c in self.connections: + c.cb.tx_request_map[tx.sha256] = False + # Again, either inv to all nodes or save for later + if (test_instance.sync_every_tx): + [ c.cb.send_inv(tx) for c in self.connections ] + self.sync_transaction(tx.sha256, 1) + if (not self.check_mempool(tx.sha256, outcome)): + raise AssertionError("Test failed at test %d" % test_number) + else: + invqueue.append(CInv(1, tx.sha256)) + # Ensure we're not overflowing the inv queue + if len(invqueue) == MAX_INV_SZ: + [ c.send_message(msg_inv(invqueue)) for c in self.connections ] + invqueue = [] + + # Do final sync if we weren't syncing on every block or every tx. + if (not test_instance.sync_every_block and block is not None): + if len(invqueue) > 0: + [ c.send_message(msg_inv(invqueue)) for c in self.connections ] + invqueue = [] + self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions)) + if (not self.check_results(tip, block_outcome)): + raise AssertionError("Block test failed at test %d" % test_number) + if (not test_instance.sync_every_tx and tx is not None): + if len(invqueue) > 0: + [ c.send_message(msg_inv(invqueue)) for c in self.connections ] + invqueue = [] + self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions)) + if (not self.check_mempool(tx.sha256, tx_outcome)): + raise AssertionError("Mempool test failed at test %d" % test_number) + + print("Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]) + test_number += 1 + + [ c.disconnect_node() for c in self.connections ] + self.wait_for_disconnections() + self.block_store.close() + self.tx_store.close() diff --git a/zebra-rpc/qa/rpc-tests/test_framework/coverage.py b/zebra-rpc/qa/rpc-tests/test_framework/coverage.py new file mode 100644 index 00000000000..02e1b7b4da6 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/coverage.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2020-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +""" +This module contains utilities for doing coverage analysis on the RPC +interface. + +It provides a way to track which RPC commands are exercised during +testing. + +""" +import os + + +REFERENCE_FILENAME = 'rpc_interface.txt' + + +class AuthServiceProxyWrapper(object): + """ + An object that wraps AuthServiceProxy to record specific RPC calls. + + """ + def __init__(self, auth_service_proxy_instance, coverage_logfile=None): + """ + Kwargs: + auth_service_proxy_instance (AuthServiceProxy): the instance + being wrapped. + coverage_logfile (str): if specified, write each service_name + out to a file when called. + + """ + self.auth_service_proxy_instance = auth_service_proxy_instance + self.coverage_logfile = coverage_logfile + + def __getattr__(self, *args, **kwargs): + return_val = self.auth_service_proxy_instance.__getattr__( + *args, **kwargs) + + return AuthServiceProxyWrapper(return_val, self.coverage_logfile) + + def __call__(self, *args, **kwargs): + """ + Delegates to AuthServiceProxy, then writes the particular RPC method + called to a file. + + """ + return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) + rpc_method = self.auth_service_proxy_instance._service_name + + if self.coverage_logfile: + with open(self.coverage_logfile, 'a+', encoding='utf8') as f: + f.write("%s\n" % rpc_method) + + return return_val + + @property + def url(self): + return self.auth_service_proxy_instance.url + + +def get_filename(dirname, n_node): + """ + Get a filename unique to the test process ID and node. + + This file will contain a list of RPC commands covered. + """ + pid = str(os.getpid()) + return os.path.join( + dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) + + +def write_all_rpc_commands(dirname, node): + """ + Write out a list of all RPC functions available in `bitcoin-cli` for + coverage comparison. This will only happen once per coverage + directory. + + Args: + dirname (str): temporary test dir + node (AuthServiceProxy): client + + Returns: + bool. if the RPC interface file was written. + + """ + filename = os.path.join(dirname, REFERENCE_FILENAME) + + if os.path.isfile(filename): + return False + + help_output = node.help().split('\n') + commands = set() + + for line in help_output: + line = line.strip() + + # Ignore blanks and headers + if line and not line.startswith('='): + commands.add("%s\n" % line.split()[0]) + + with open(filename, 'w', encoding='utf8') as f: + f.writelines(list(commands)) + + return True diff --git a/zebra-rpc/qa/rpc-tests/test_framework/equihash.py b/zebra-rpc/qa/rpc-tests/test_framework/equihash.py new file mode 100755 index 00000000000..e05544fb4c1 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/equihash.py @@ -0,0 +1,294 @@ +from operator import itemgetter +import struct +from functools import reduce + +DEBUG = False +VERBOSE = False + + +word_size = 32 +word_mask = (1<= 8 and word_size >= 7+bit_len + bit_len_mask = (1<= bit_len: + acc_bits -= bit_len + for x in range(byte_pad, out_width): + out[j+x] = ( + # Big-endian + acc_value >> (acc_bits+(8*(out_width-x-1))) + ) & ( + # Apply bit_len_mask across byte boundaries + (bit_len_mask >> (8*(out_width-x-1))) & 0xFF + ) + j += out_width + + return out + +def compress_array(inp, out_len, bit_len, byte_pad=0): + assert bit_len >= 8 and word_size >= 7+bit_len + + in_width = (bit_len+7)//8 + byte_pad + assert out_len == bit_len*len(inp)//(8*in_width) + out = bytearray(out_len) + + bit_len_mask = (1 << bit_len) - 1 + + # The acc_bits least-significant bits of acc_value represent a bit sequence + # in big-endian order. + acc_bits = 0; + acc_value = 0; + + j = 0 + for i in range(out_len): + # When we have fewer than 8 bits left in the accumulator, read the next + # input element. + if acc_bits < 8: + acc_value = ((acc_value << bit_len) & word_mask) | inp[j] + for x in range(byte_pad, in_width): + acc_value = acc_value | ( + ( + # Apply bit_len_mask across byte boundaries + inp[j+x] & ((bit_len_mask >> (8*(in_width-x-1))) & 0xFF) + ) << (8*(in_width-x-1))); # Big-endian + j += in_width + acc_bits += bit_len + + acc_bits -= 8 + out[i] = (acc_value >> acc_bits) & 0xFF + + return out + +def get_indices_from_minimal(minimal, bit_len): + eh_index_size = 4 + assert (bit_len+7)//8 <= eh_index_size + len_indices = 8*eh_index_size*len(minimal)//bit_len + byte_pad = eh_index_size - (bit_len+7)//8 + expanded = expand_array(minimal, len_indices, bit_len, byte_pad) + return [struct.unpack('>I', expanded[i:i+4])[0] for i in range(0, len_indices, eh_index_size)] + +def get_minimal_from_indices(indices, bit_len): + eh_index_size = 4 + assert (bit_len+7)//8 <= eh_index_size + len_indices = len(indices)*eh_index_size + min_len = bit_len*len_indices//(8*eh_index_size) + byte_pad = eh_index_size - (bit_len+7)//8 + byte_indices = bytearray(b''.join([struct.pack('>I', i) for i in indices])) + return compress_array(byte_indices, min_len, bit_len, byte_pad) + + +def hash_nonce(digest, nonce): + for i in range(8): + digest.update(struct.pack('> (32*i))) + +def hash_xi(digest, xi): + digest.update(struct.pack(' 0: + # 2b) Find next set of unordered pairs with collisions on first n/(k+1) bits + j = 1 + while j < len(X): + if not has_collision(X[-1][0], X[-1-j][0], i, collision_length): + break + j += 1 + + # 2c) Store tuples (X_i ^ X_j, (i, j)) on the table + for l in range(0, j-1): + for m in range(l+1, j): + # Check that there are no duplicate indices in tuples i and j + if distinct_indices(X[-1-l][1], X[-1-m][1]): + if X[-1-l][1][0] < X[-1-m][1][0]: + concat = X[-1-l][1] + X[-1-m][1] + else: + concat = X[-1-m][1] + X[-1-l][1] + Xc.append((xor(X[-1-l][0], X[-1-m][0]), concat)) + + # 2d) Drop this set + while j > 0: + X.pop(-1) + j -= 1 + # 2e) Replace previous list with new list + X = Xc + + # k+1) Find a collision on last 2n(k+1) bits + if DEBUG: + print('Final round:') + print('- Sorting list') + X.sort(key=itemgetter(0)) + if DEBUG and VERBOSE: + for Xi in X[-32:]: + print('%s %s' % (print_hash(Xi[0]), Xi[1])) + if DEBUG: print('- Finding collisions') + solns = [] + while len(X) > 0: + j = 1 + while j < len(X): + if not (has_collision(X[-1][0], X[-1-j][0], k, collision_length) and + has_collision(X[-1][0], X[-1-j][0], k+1, collision_length)): + break + j += 1 + + for l in range(0, j-1): + for m in range(l+1, j): + res = xor(X[-1-l][0], X[-1-m][0]) + if count_zeroes(res) == 8*hash_length and distinct_indices(X[-1-l][1], X[-1-m][1]): + if DEBUG and VERBOSE: + print('Found solution:') + print('- %s %s' % (print_hash(X[-1-l][0]), X[-1-l][1])) + print('- %s %s' % (print_hash(X[-1-m][0]), X[-1-m][1])) + if X[-1-l][1][0] < X[-1-m][1][0]: + solns.append(list(X[-1-l][1] + X[-1-m][1])) + else: + solns.append(list(X[-1-m][1] + X[-1-l][1])) + + # 2d) Drop this set + while j > 0: + X.pop(-1) + j -= 1 + return [get_minimal_from_indices(soln, collision_length+1) for soln in solns] + +def gbp_validate(digest, minimal, n, k): + validate_params(n, k) + collision_length = n//(k+1) + hash_length = (k+1)*((collision_length+7)//8) + indices_per_hash_output = 512//n + solution_width = (1 << k)*(collision_length+1)//8 + + if len(minimal) != solution_width: + print('Invalid solution length: %d (expected %d)' % \ + (len(minimal), solution_width)) + return False + + X = [] + for i in get_indices_from_minimal(minimal, collision_length+1): + r = i % indices_per_hash_output + # X_i = H(I||V||x_i) + curr_digest = digest.copy() + hash_xi(curr_digest, i//indices_per_hash_output) + tmp_hash = curr_digest.digest() + X.append(( + expand_array(bytearray(tmp_hash[r*n//8:(r+1)*n//8]), + hash_length, collision_length), + (i,) + )) + + for r in range(1, k+1): + Xc = [] + for i in range(0, len(X), 2): + if not has_collision(X[i][0], X[i+1][0], r, collision_length): + print('Invalid solution: invalid collision length between StepRows') + return False + if X[i+1][1][0] < X[i][1][0]: + print('Invalid solution: Index tree incorrectly ordered') + return False + if not distinct_indices(X[i][1], X[i+1][1]): + print('Invalid solution: duplicate indices') + return False + Xc.append((xor(X[i][0], X[i+1][0]), X[i][1] + X[i+1][1])) + X = Xc + + if len(X) != 1: + print('Invalid solution: incorrect length after end of rounds: %d' % len(X)) + return False + + if count_zeroes(X[0][0]) != 8*hash_length: + print('Invalid solution: incorrect number of zeroes: %d' % count_zeroes(X[0][0])) + return False + + return True + +def zcash_person(n, k): + return b'ZcashPoW' + struct.pack('= n): + raise ValueError('n must be larger than k') + if (((n//(k+1))+1) >= 32): + raise ValueError('Parameters must satisfy n/(k+1)+1 < 32') diff --git a/zebra-rpc/qa/rpc-tests/test_framework/flyclient.py b/zebra-rpc/qa/rpc-tests/test_framework/flyclient.py new file mode 100644 index 00000000000..71221bdc9fa --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/flyclient.py @@ -0,0 +1,207 @@ +from hashlib import blake2b +import struct +from typing import (List, Optional) + +from .mininode import (CBlockHeader, block_work_from_compact, ser_compactsize, ser_uint256) +from .util import (NU5_BRANCH_ID, NU6_BRANCH_ID) + +def H(msg: bytes, consensusBranchId: int) -> bytes: + digest = blake2b( + digest_size=32, + person=b'ZcashHistory' + struct.pack(" 'ZcashMMRNode': + '''Create a leaf node from a block''' + if v2_data is not None: + assert consensusBranchId in [NU5_BRANCH_ID, NU6_BRANCH_ID] + orchard_root = v2_data[0] + orchard_tx_count = v2_data[1] + else: + orchard_root = None + orchard_tx_count = None + + node = Z() + node.left_child = None + node.right_child = None + node.hashSubtreeCommitment = ser_uint256(block.rehash()) + node.nEarliestTimestamp = block.nTime + node.nLatestTimestamp = block.nTime + node.nEarliestTargetBits = block.nBits + node.nLatestTargetBits = block.nBits + node.hashEarliestSaplingRoot = sapling_root + node.hashLatestSaplingRoot = sapling_root + node.nSubTreeTotalWork = block_work_from_compact(block.nBits) + node.nEarliestHeight = height + node.nLatestHeight = height + node.nSaplingTxCount = sapling_tx_count + node.hashEarliestOrchardRoot = orchard_root + node.hashLatestOrchardRoot = orchard_root + node.nOrchardTxCount = orchard_tx_count + node.consensusBranchId = consensusBranchId + return node + + def serialize(self) -> bytes: + '''serializes a node''' + buf = b'' + buf += self.hashSubtreeCommitment + buf += struct.pack(" ZcashMMRNode: + parent = ZcashMMRNode() + parent.left_child = left_child + parent.right_child = right_child + parent.hashSubtreeCommitment = H( + left_child.serialize() + right_child.serialize(), + left_child.consensusBranchId, + ) + parent.nEarliestTimestamp = left_child.nEarliestTimestamp + parent.nLatestTimestamp = right_child.nLatestTimestamp + parent.nEarliestTargetBits = left_child.nEarliestTargetBits + parent.nLatestTargetBits = right_child.nLatestTargetBits + parent.hashEarliestSaplingRoot = left_child.hashEarliestSaplingRoot + parent.hashLatestSaplingRoot = right_child.hashLatestSaplingRoot + parent.nSubTreeTotalWork = left_child.nSubTreeTotalWork + right_child.nSubTreeTotalWork + parent.nEarliestHeight = left_child.nEarliestHeight + parent.nLatestHeight = right_child.nLatestHeight + parent.nSaplingTxCount = left_child.nSaplingTxCount + right_child.nSaplingTxCount + parent.hashEarliestOrchardRoot = left_child.hashEarliestOrchardRoot + parent.hashLatestOrchardRoot = right_child.hashLatestOrchardRoot + parent.nOrchardTxCount = ( + left_child.nOrchardTxCount + right_child.nOrchardTxCount + if left_child.nOrchardTxCount is not None and right_child.nOrchardTxCount is not None + else None) + parent.consensusBranchId = left_child.consensusBranchId + return parent + +def make_root_commitment(root: ZcashMMRNode) -> bytes: + '''Makes the root commitment for a blockheader''' + return H(root.serialize(), root.consensusBranchId) + +def get_peaks(node: ZcashMMRNode) -> List[ZcashMMRNode]: + peaks: List[ZcashMMRNode] = [] + + # Get number of leaves. + leaves = node.nLatestHeight - (node.nEarliestHeight - 1) + assert(leaves > 0) + + # Check if the number of leaves in this subtree is a power of two. + if (leaves & (leaves - 1)) == 0: + # This subtree is full, and therefore a single peak. This also covers + # the case of a single isolated leaf. + peaks.append(node) + else: + # This is one of the generated nodes; search within its children. + peaks.extend(get_peaks(node.left_child)) + peaks.extend(get_peaks(node.right_child)) + + return peaks + + +def bag_peaks(peaks: List[ZcashMMRNode]) -> ZcashMMRNode: + ''' + "Bag" a list of peaks, and return the final root + ''' + root = peaks[0] + for i in range(1, len(peaks)): + root = make_parent(root, peaks[i]) + return root + + +def append(root: ZcashMMRNode, leaf: ZcashMMRNode) -> ZcashMMRNode: + '''Append a leaf to an existing tree, return the new tree root''' + # recursively find a list of peaks in the current tree + peaks: List[ZcashMMRNode] = get_peaks(root) + merged: List[ZcashMMRNode] = [] + + # Merge peaks from right to left. + # This will produce a list of peaks in reverse order + current = leaf + for peak in peaks[::-1]: + current_leaves = current.nLatestHeight - (current.nEarliestHeight - 1) + peak_leaves = peak.nLatestHeight - (peak.nEarliestHeight - 1) + + if current_leaves == peak_leaves: + current = make_parent(peak, current) + else: + merged.append(current) + current = peak + merged.append(current) + + # finally, bag the merged peaks + return bag_peaks(merged[::-1]) + +def delete(root: ZcashMMRNode) -> ZcashMMRNode: + ''' + Delete the rightmost leaf node from an existing MMR + Return the new tree root + ''' + + n_leaves = root.nLatestHeight - (root.nEarliestHeight - 1) + # if there were an odd number of leaves, + # simply replace root with left_child + if n_leaves & 1: + return root.left_child + + # otherwise, we need to re-bag the peaks. + else: + # first peak + peaks = [root.left_child] + + # we do this traversing the right (unbalanced) side of the tree + # we keep the left side (balanced subtree or leaf) of each subtree + # until we reach a leaf + subtree_root = root.right_child + while subtree_root.left_child: + peaks.append(subtree_root.left_child) + subtree_root = subtree_root.right_child + + new_root = bag_peaks(peaks) + return new_root diff --git a/zebra-rpc/qa/rpc-tests/test_framework/key.py b/zebra-rpc/qa/rpc-tests/test_framework/key.py new file mode 100644 index 00000000000..ba3038fe044 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/key.py @@ -0,0 +1,215 @@ +# Copyright (c) 2011 Sam Rushing +# +# key.py - OpenSSL wrapper +# +# This file is modified from python-bitcoinlib. +# + +"""ECC secp256k1 crypto routines + +WARNING: This module does not mlock() secrets; your private keys may end up on +disk in swap! Use with caution! +""" + +import ctypes +import ctypes.util +import hashlib +import sys + +ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32') + +ssl.BN_new.restype = ctypes.c_void_p +ssl.BN_new.argtypes = [] + +ssl.BN_bin2bn.restype = ctypes.c_void_p +ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p] + +ssl.BN_CTX_free.restype = None +ssl.BN_CTX_free.argtypes = [ctypes.c_void_p] + +ssl.BN_CTX_new.restype = ctypes.c_void_p +ssl.BN_CTX_new.argtypes = [] + +ssl.ECDH_compute_key.restype = ctypes.c_int +ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p] + +ssl.ECDSA_sign.restype = ctypes.c_int +ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + +ssl.ECDSA_verify.restype = ctypes.c_int +ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p] + +ssl.EC_KEY_free.restype = None +ssl.EC_KEY_free.argtypes = [ctypes.c_void_p] + +ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p +ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int] + +ssl.EC_KEY_get0_group.restype = ctypes.c_void_p +ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p] + +ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p +ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p] + +ssl.EC_KEY_set_private_key.restype = ctypes.c_int +ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + +ssl.EC_KEY_set_conv_form.restype = None +ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int] + +ssl.EC_KEY_set_public_key.restype = ctypes.c_int +ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + +ssl.i2o_ECPublicKey.restype = ctypes.c_void_p +ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p] + +ssl.EC_POINT_new.restype = ctypes.c_void_p +ssl.EC_POINT_new.argtypes = [ctypes.c_void_p] + +ssl.EC_POINT_free.restype = None +ssl.EC_POINT_free.argtypes = [ctypes.c_void_p] + +ssl.EC_POINT_mul.restype = ctypes.c_int +ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + +# this specifies the curve used with ECDSA. +NID_secp256k1 = 714 # from openssl/obj_mac.h + +# Thx to Sam Devlin for the ctypes magic 64-bit fix. +def _check_result(val, func, args): + if val == 0: + raise ValueError + else: + return ctypes.c_void_p (val) + +ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p +ssl.EC_KEY_new_by_curve_name.errcheck = _check_result + +class CECKey(object): + """Wrapper around OpenSSL's EC_KEY""" + + POINT_CONVERSION_COMPRESSED = 2 + POINT_CONVERSION_UNCOMPRESSED = 4 + + def __init__(self): + self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1) + + def __del__(self): + if ssl: + ssl.EC_KEY_free(self.k) + self.k = None + + def set_secretbytes(self, secret): + priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new()) + group = ssl.EC_KEY_get0_group(self.k) + pub_key = ssl.EC_POINT_new(group) + ctx = ssl.BN_CTX_new() + if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx): + raise ValueError("Could not derive public key from the supplied secret.") + ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx) + ssl.EC_KEY_set_private_key(self.k, priv_key) + ssl.EC_KEY_set_public_key(self.k, pub_key) + ssl.EC_POINT_free(pub_key) + ssl.BN_CTX_free(ctx) + return self.k + + def set_privkey(self, key): + self.mb = ctypes.create_string_buffer(key) + return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key)) + + def set_pubkey(self, key): + self.mb = ctypes.create_string_buffer(key) + return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key)) + + def get_privkey(self): + size = ssl.i2d_ECPrivateKey(self.k, 0) + mb_pri = ctypes.create_string_buffer(size) + ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri))) + return mb_pri.raw + + def get_pubkey(self): + size = ssl.i2o_ECPublicKey(self.k, 0) + mb = ctypes.create_string_buffer(size) + ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb))) + return mb.raw + + def get_raw_ecdh_key(self, other_pubkey): + ecdh_keybuffer = ctypes.create_string_buffer(32) + r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32, + ssl.EC_KEY_get0_public_key(other_pubkey.k), + self.k, 0) + if r != 32: + raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed') + return ecdh_keybuffer.raw + + def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()): + # FIXME: be warned it's not clear what the kdf should be as a default + r = self.get_raw_ecdh_key(other_pubkey) + return kdf(r) + + def sign(self, hash): + # FIXME: need unit tests for below cases + if not isinstance(hash, bytes): + raise TypeError('Hash must be bytes instance; got %r' % hash.__class__) + if len(hash) != 32: + raise ValueError('Hash must be exactly 32 bytes long') + + sig_size0 = ctypes.c_uint32() + sig_size0.value = ssl.ECDSA_size(self.k) + mb_sig = ctypes.create_string_buffer(sig_size0.value) + result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k) + assert 1 == result + return mb_sig.raw[:sig_size0.value] + + def verify(self, hash, sig): + """Verify a DER signature""" + return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1 + + def set_compressed(self, compressed): + if compressed: + form = self.POINT_CONVERSION_COMPRESSED + else: + form = self.POINT_CONVERSION_UNCOMPRESSED + ssl.EC_KEY_set_conv_form(self.k, form) + + +class CPubKey(bytes): + """An encapsulated public key + + Attributes: + + is_valid - Corresponds to CPubKey.IsValid() + is_fullyvalid - Corresponds to CPubKey.IsFullyValid() + is_compressed - Corresponds to CPubKey.IsCompressed() + """ + + def __new__(cls, buf, _cec_key=None): + self = super(CPubKey, cls).__new__(cls, buf) + if _cec_key is None: + _cec_key = CECKey() + self._cec_key = _cec_key + self.is_fullyvalid = _cec_key.set_pubkey(self) != 0 + return self + + @property + def is_valid(self): + return len(self) > 0 + + @property + def is_compressed(self): + return len(self) == 33 + + def verify(self, hash, sig): + return self._cec_key.verify(hash, sig) + + def __str__(self): + return repr(self) + + def __repr__(self): + # Always have represent as b'' so test cases don't have to + # change for py2/3 + if sys.version > '3': + return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__()) + else: + return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__()) + diff --git a/zebra-rpc/qa/rpc-tests/test_framework/mininode.py b/zebra-rpc/qa/rpc-tests/test_framework/mininode.py new file mode 100755 index 00000000000..d56fb8bf79c --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/mininode.py @@ -0,0 +1,2131 @@ +#!/usr/bin/env python3 +# Copyright (c) 2010 ArtForz -- public domain half-a-node +# Copyright (c) 2012 Jeff Garzik +# Copyright (c) 2010-2016 The Bitcoin Core developers +# Copyright (c) 2017-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +# +# mininode.py - Bitcoin P2P network half-a-node +# +# This python code was modified from ArtForz' public domain half-a-node, as +# found in the mini-node branch of https://github.com/jgarzik/pynode. +# +# NodeConn: an object which manages p2p connectivity to a bitcoin node +# NodeConnCB: a base class that describes the interface for receiving +# callbacks with network messages from a NodeConn +# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....: +# data structures that should map to corresponding structures in +# bitcoin/primitives +# msg_block, msg_tx, msg_headers, etc.: +# data structures that represent network messages +# ser_*, deser_*: functions that handle serialization/deserialization + + +import struct +import socket +import asyncore +import time +import sys +import random +from binascii import hexlify +from io import BytesIO +from codecs import encode +import hashlib +from threading import RLock +from threading import Thread +import logging +import copy +from hashlib import blake2b + +from .equihash import ( + gbp_basic, + gbp_validate, + hash_nonce, + zcash_person, +) +from .util import bytes_to_hex_str + + +BIP0031_VERSION = 60000 +SPROUT_PROTO_VERSION = 170002 # past bip-31 for ping/pong +OVERWINTER_PROTO_VERSION = 170003 +SAPLING_PROTO_VERSION = 170006 +BLOSSOM_PROTO_VERSION = 170008 +NU5_PROTO_VERSION = 170050 +# NU6_PROTO_VERSION = 170110 + +MY_SUBVERSION = b"/python-mininode-tester:0.0.3/" + +SPROUT_VERSION_GROUP_ID = 0x00000000 +OVERWINTER_VERSION_GROUP_ID = 0x03C48270 +SAPLING_VERSION_GROUP_ID = 0x892F2085 +ZIP225_VERSION_GROUP_ID = 0x26A7270A +# No transaction format change in Blossom. + +MAX_INV_SZ = 50000 + +COIN = 100000000 # 1 zec in zatoshis + +BLOSSOM_POW_TARGET_SPACING_RATIO = 2 + +# The placeholder value used for the auth digest of pre-v5 transactions. +LEGACY_TX_AUTH_DIGEST = (1 << 256) - 1 + +# Keep our own socket map for asyncore, so that we can track disconnects +# ourselves (to workaround an issue with closing an asyncore socket when +# using select) +mininode_socket_map = dict() + +# One lock for synchronizing all data access between the networking thread (see +# NetworkThread below) and the thread running the test logic. For simplicity, +# NodeConn acquires this lock whenever delivering a message to a NodeConnCB, +# and whenever adding anything to the send buffer (in send_message()). This +# lock should be acquired in the thread running the test logic to synchronize +# access to any data shared with the NodeConnCB or NodeConn. +mininode_lock = RLock() + +# Serialization/deserialization tools +def sha256(s): + return hashlib.new('sha256', s).digest() + +def hash256(s): + return sha256(sha256(s)) + +def nuparams(branch_id, height): + return '-nuparams=%x:%d' % (branch_id, height) + +def fundingstream(idx, start_height, end_height, addrs): + return '-fundingstream=%d:%d:%d:%s' % (idx, start_height, end_height, ",".join(addrs)) + +def ser_compactsize(n): + if n < 253: + return struct.pack("B", n) + elif n < 0x10000: + return struct.pack(">= 32 + return rs + + +def uint256_from_str(s): + r = 0 + t = struct.unpack("> 24) & 0xFF + v = (c & 0xFFFFFF) << (8 * (nbytes - 3)) + return v + + +def block_work_from_compact(c): + target = uint256_from_compact(c) + return 2**256 // (target + 1) + + +def deser_vector(f, c): + nit = struct.unpack("H", f.read(2))[0] + + def serialize(self): + r = b"" + r += struct.pack("H", self.port) + return r + + def __repr__(self): + return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices, + self.ip, self.port) + + +class CInv(object): + typemap = { + 0: b"Error", + 1: b"TX", + 2: b"Block", + 5: b"WTX", + } + + def __init__(self, t=0, h=0, h_aux=0): + self.type = t + self.hash = h + self.hash_aux = h_aux + if self.type == 1: + self.hash_aux = LEGACY_TX_AUTH_DIGEST + + def deserialize(self, f): + self.type = struct.unpack(" 0: + flags = struct.unpack("B", f.read(1))[0] + self.enableSpends = (flags & ORCHARD_FLAGS_ENABLE_SPENDS) != 0 + self.enableOutputs = (flags & ORCHARD_FLAGS_ENABLE_OUTPUTS) != 0 + self.valueBalance = struct.unpack(" 0: + r += struct.pack("B", self.flags()) + r += struct.pack(" 0 + if has_sapling: + self.valueBalance = struct.unpack(" 0: + self.anchor = deser_uint256(f) + for i in range(len(self.spends)): + self.spends[i].zkproof = Groth16Proof() + self.spends[i].zkproof.deserialize(f) + for i in range(len(self.spends)): + self.spends[i].spendAuthSig = RedJubjubSignature() + self.spends[i].spendAuthSig.deserialize(f) + for i in range(len(self.outputs)): + self.outputs[i].zkproof = Groth16Proof() + self.outputs[i].zkproof.deserialize(f) + if has_sapling: + self.bindingSig = RedJubjubSignature() + self.bindingSig.deserialize(f) + + def serialize(self): + r = b"" + r += ser_vector(self.spends) + r += ser_vector(self.outputs) + has_sapling = (len(self.spends) + len(self.outputs)) > 0 + if has_sapling: + r += struct.pack(" 0: + r += ser_uint256(self.anchor) + for spend in self.spends: + r += spend.zkproof.serialize() + for spend in self.spends: + r += spend.spendAuthSig.serialize() + for output in self.outputs: + r += output.zkproof.serialize() + if has_sapling: + r += self.bindingSig.serialize() + return r + + def __repr__(self): + return "SaplingBundle(spends=%r, outputs=%r, valueBalance=%i, bindingSig=%064x)" \ + % ( + self.spends, + self.outputs, + self.valueBalance, + self.bindingSig, + ) + + +G1_PREFIX_MASK = 0x02 +G2_PREFIX_MASK = 0x0a + +class ZCProof(object): + def __init__(self): + self.g_A = None + self.g_A_prime = None + self.g_B = None + self.g_B_prime = None + self.g_C = None + self.g_C_prime = None + self.g_K = None + self.g_H = None + + def deserialize(self, f): + def deser_g1(self, f): + leadingByte = struct.unpack("> 31) + self.nVersion = header & 0x7FFFFFFF + self.nVersionGroupId = (struct.unpack("= 2: + self.vJoinSplit = deser_vector(f, JSDescription) + if len(self.vJoinSplit) > 0: + self.joinSplitPubKey = deser_uint256(f) + self.joinSplitSig = f.read(64) + + if isSaplingV4 and not (len(self.shieldedSpends) == 0 and len(self.shieldedOutputs) == 0): + self.bindingSig = RedJubjubSignature() + self.bindingSig.deserialize(f) + + self.sha256 = None + self.hash = None + + def serialize(self): + header = (int(self.fOverwintered)<<31) | self.nVersion + isOverwinterV3 = (self.fOverwintered and + self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and + self.nVersion == 3) + isSaplingV4 = (self.fOverwintered and + self.nVersionGroupId == SAPLING_VERSION_GROUP_ID and + self.nVersion == 4) + isNu5V5 = (self.fOverwintered and + self.nVersionGroupId == ZIP225_VERSION_GROUP_ID and + self.nVersion == 5) + + if isNu5V5: + r = b"" + + # Common transaction fields + r += struct.pack("= 2: + r += ser_vector(self.vJoinSplit) + if len(self.vJoinSplit) > 0: + r += ser_uint256(self.joinSplitPubKey) + r += self.joinSplitSig + if isSaplingV4 and not (len(self.shieldedSpends) == 0 and len(self.shieldedOutputs) == 0): + r += self.bindingSig.serialize() + return r + + def rehash(self): + self.sha256 = None + self.calc_sha256() + + def calc_sha256(self): + if self.nVersion >= 5: + from . import zip244 + txid = zip244.txid_digest(self) + self.auth_digest = zip244.auth_digest(self) + else: + txid = hash256(self.serialize()) + self.auth_digest = b'\xFF'*32 + if self.sha256 is None: + self.sha256 = uint256_from_str(txid) + self.hash = encode(txid[::-1], 'hex_codec').decode('ascii') + self.auth_digest_hex = encode(self.auth_digest[::-1], 'hex_codec').decode('ascii') + + def is_valid(self): + self.calc_sha256() + for tout in self.vout: + if tout.nValue < 0 or tout.nValue > 21000000 * 100000000: + return False + return True + + def __repr__(self): + r = ("CTransaction(fOverwintered=%r nVersion=%i nVersionGroupId=0x%08x " + "vin=%r vout=%r nLockTime=%i nExpiryHeight=%i " + "valueBalance=%i shieldedSpends=%r shieldedOutputs=%r" + % (self.fOverwintered, self.nVersion, self.nVersionGroupId, + self.vin, self.vout, self.nLockTime, self.nExpiryHeight, + self.valueBalance, self.shieldedSpends, self.shieldedOutputs)) + if self.nVersion >= 2: + r += " vJoinSplit=%r" % (self.vJoinSplit,) + if len(self.vJoinSplit) > 0: + r += " joinSplitPubKey=%064x joinSplitSig=%s" \ + % (self.joinSplitPubKey, bytes_to_hex_str(self.joinSplitSig)) + if len(self.shieldedSpends) > 0 or len(self.shieldedOutputs) > 0: + r += " bindingSig=%r" % self.bindingSig + r += ")" + return r + + +class CBlockHeader(object): + def __init__(self, header=None): + if header is None: + self.set_null() + else: + self.nVersion = header.nVersion + self.hashPrevBlock = header.hashPrevBlock + self.hashMerkleRoot = header.hashMerkleRoot + self.hashBlockCommitments = header.hashBlockCommitments + self.nTime = header.nTime + self.nBits = header.nBits + self.nNonce = header.nNonce + self.nSolution = header.nSolution + self.sha256 = header.sha256 + self.hash = header.hash + self.calc_sha256() + + def set_null(self): + self.nVersion = 4 + self.hashPrevBlock = 0 + self.hashMerkleRoot = 0 + self.hashBlockCommitments = 0 + self.nTime = 0 + self.nBits = 0 + self.nNonce = 0 + self.nSolution = [] + self.sha256 = None + self.hash = None + + def deserialize(self, f): + self.nVersion = struct.unpack(" 1: + newhashes = [] + for i in range(0, len(hashes), 2): + i2 = min(i+1, len(hashes)-1) + newhashes.append(hash256(hashes[i] + hashes[i2])) + hashes = newhashes + return uint256_from_str(hashes[0]) + + def calc_auth_data_root(self): + hashes = [] + nleaves = 0 + for tx in self.vtx: + tx.calc_sha256() + hashes.append(tx.auth_digest) + nleaves += 1 + # Continue adding leaves (of zeros) until reaching a power of 2 + while nleaves & (nleaves-1) > 0: + hashes.append(b'\x00'*32) + nleaves += 1 + while len(hashes) > 1: + newhashes = [] + for i in range(0, len(hashes), 2): + digest = blake2b(digest_size=32, person=b'ZcashAuthDatHash') + digest.update(hashes[i]) + digest.update(hashes[i+1]) + newhashes.append(digest.digest()) + hashes = newhashes + return uint256_from_str(hashes[0]) + + def is_valid(self, n=48, k=5): + # H(I||... + digest = blake2b(digest_size=(512//n)*n//8, person=zcash_person(n, k)) + digest.update(super(CBlock, self).serialize()[:108]) + hash_nonce(digest, self.nNonce) + if not gbp_validate(self.nSolution, digest, n, k): + return False + self.calc_sha256() + target = uint256_from_compact(self.nBits) + if self.sha256 > target: + return False + for tx in self.vtx: + if not tx.is_valid(): + return False + if self.calc_merkle_root() != self.hashMerkleRoot: + return False + return True + + def solve(self, n=48, k=5): + target = uint256_from_compact(self.nBits) + # H(I||... + digest = blake2b(digest_size=(512//n)*n//8, person=zcash_person(n, k)) + digest.update(super(CBlock, self).serialize()[:108]) + self.nNonce = 0 + while True: + # H(I||V||... + curr_digest = digest.copy() + hash_nonce(curr_digest, self.nNonce) + # (x_1, x_2, ...) = A(I, V, n, k) + solns = gbp_basic(curr_digest, n, k) + for soln in solns: + assert(gbp_validate(curr_digest, soln, n, k)) + self.nSolution = soln + self.rehash() + if self.sha256 <= target: + return + self.nNonce += 1 + + def __repr__(self): + return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashBlockCommitments=%064x nTime=%s nBits=%08x nNonce=%064x nSolution=%r vtx=%r)" \ + % (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, + self.hashBlockCommitments, time.ctime(self.nTime), self.nBits, + self.nNonce, self.nSolution, self.vtx) + + +class CUnsignedAlert(object): + def __init__(self): + self.nVersion = 1 + self.nRelayUntil = 0 + self.nExpiration = 0 + self.nID = 0 + self.nCancel = 0 + self.setCancel = [] + self.nMinVer = 0 + self.nMaxVer = 0 + self.setSubVer = [] + self.nPriority = 0 + self.strComment = b"" + self.strStatusBar = b"" + self.strReserved = b"" + + def deserialize(self, f): + self.nVersion = struct.unpack("= 106: + self.addrFrom = CAddress() + self.addrFrom.deserialize(f) + self.nNonce = struct.unpack("= 209: + self.nStartingHeight = struct.unpack(" +class msg_headers(object): + command = b"headers" + + def __init__(self): + self.headers = [] + + def deserialize(self, f): + # comment in bitcoind indicates these should be deserialized as blocks + blocks = deser_vector(f, CBlock) + for x in blocks: + self.headers.append(CBlockHeader(x)) + + def serialize(self): + blocks = [CBlock(x) for x in self.headers] + return ser_vector(blocks) + + def __repr__(self): + return "msg_headers(headers=%s)" % repr(self.headers) + + +class msg_reject(object): + command = b"reject" + REJECT_MALFORMED = 1 + + def __init__(self): + self.message = b"" + self.code = 0 + self.reason = b"" + self.data = 0 + + def deserialize(self, f): + self.message = deser_string(f) + self.code = struct.unpack("= 209: + conn.send_message(msg_verack()) + conn.ver_send = min(SPROUT_PROTO_VERSION, message.nVersion) + if message.nVersion < 209: + conn.ver_recv = conn.ver_send + + def on_verack(self, conn, message): + conn.ver_recv = conn.ver_send + self.verack_received = True + + def on_inv(self, conn, message): + want = msg_getdata() + for i in message.inv: + if i.type != 0: + want.inv.append(i) + if len(want.inv): + conn.send_message(want) + + def on_addr(self, conn, message): pass + def on_alert(self, conn, message): pass + def on_getdata(self, conn, message): pass + def on_notfound(self, conn, message): pass + def on_getblocks(self, conn, message): pass + def on_tx(self, conn, message): pass + def on_block(self, conn, message): pass + def on_getaddr(self, conn, message): pass + def on_headers(self, conn, message): pass + def on_getheaders(self, conn, message): pass + def on_ping(self, conn, message): + if conn.ver_send > BIP0031_VERSION: + conn.send_message(msg_pong(message.nonce)) + def on_reject(self, conn, message): pass + def on_close(self, conn): pass + def on_mempool(self, conn): pass + def on_pong(self, conn, message): pass + + +# The actual NodeConn class +# This class provides an interface for a p2p connection to a specified node +class NodeConn(asyncore.dispatcher): + messagemap = { + b"version": msg_version, + b"verack": msg_verack, + b"addr": msg_addr, + b"alert": msg_alert, + b"inv": msg_inv, + b"getdata": msg_getdata, + b"notfound": msg_notfound, + b"getblocks": msg_getblocks, + b"tx": msg_tx, + b"block": msg_block, + b"getaddr": msg_getaddr, + b"ping": msg_ping, + b"pong": msg_pong, + b"headers": msg_headers, + b"getheaders": msg_getheaders, + b"reject": msg_reject, + b"mempool": msg_mempool + } + MAGIC_BYTES = { + "mainnet": b"\x24\xe9\x27\x64", # mainnet + "testnet3": b"\xfa\x1a\xf9\xbf", # testnet3 + "regtest": b"\xaa\xe8\x3f\x5f" # regtest + } + + def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", protocol_version=SAPLING_PROTO_VERSION): + asyncore.dispatcher.__init__(self, map=mininode_socket_map) + self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport)) + self.dstaddr = dstaddr + self.dstport = dstport + self.create_socket(socket.AF_INET, socket.SOCK_STREAM) + self.sendbuf = b"" + self.recvbuf = b"" + self.ver_send = 209 + self.ver_recv = 209 + self.last_sent = 0 + self.state = "connecting" + self.network = net + self.cb = callback + self.disconnect = False + + # stuff version msg into sendbuf + vt = msg_version(protocol_version) + vt.addrTo.ip = self.dstaddr + vt.addrTo.port = self.dstport + vt.addrFrom.ip = "0.0.0.0" + vt.addrFrom.port = 0 + self.send_message(vt, True) + print('MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \ + + str(dstport) + ' using version ' + str(protocol_version)) + + try: + self.connect((dstaddr, dstport)) + except: + self.handle_close() + self.rpc = rpc + + def show_debug_msg(self, msg): + self.log.debug(msg) + + def handle_connect(self): + self.show_debug_msg("MiniNode: Connected & Listening: \n") + self.state = b"connected" + + def handle_close(self): + self.show_debug_msg("MiniNode: Closing Connection to %s:%d... " + % (self.dstaddr, self.dstport)) + self.state = b"closed" + self.recvbuf = b"" + self.sendbuf = b"" + try: + self.close() + except: + pass + self.cb.on_close(self) + + def handle_read(self): + try: + t = self.recv(8192) + if len(t) > 0: + self.recvbuf += t + self.got_data() + except: + pass + + def readable(self): + return True + + def writable(self): + with mininode_lock: + length = len(self.sendbuf) + return (length > 0) + + def handle_write(self): + with mininode_lock: + try: + sent = self.send(self.sendbuf) + except: + self.handle_close() + return + self.sendbuf = self.sendbuf[sent:] + + def got_data(self): + try: + while True: + if len(self.recvbuf) < 4: + return + if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]: + raise ValueError("got garbage %r" % (self.recvbuf,)) + if self.ver_recv < 209: + if len(self.recvbuf) < 4 + 12 + 4: + return + command = self.recvbuf[4:4+12].split(b"\x00", 1)[0] + msglen = struct.unpack("= 209: + th = sha256(data) + h = sha256(th) + tmsg += h[:4] + tmsg += data + with mininode_lock: + self.sendbuf += tmsg + self.last_sent = time.time() + + def got_message(self, message): + if message.command == b"version": + if message.nVersion <= BIP0031_VERSION: + self.messagemap[b'ping'] = msg_ping_prebip31 + if self.last_sent + 30 * 60 < time.time(): + self.send_message(self.messagemap[b'ping']()) + self.show_debug_msg("Recv %s" % repr(message)) + self.cb.deliver(self, message) + + def disconnect_node(self): + self.disconnect = True + + +class NetworkThread(Thread): + def run(self): + while mininode_socket_map: + # We check for whether to disconnect outside of the asyncore + # loop to workaround the behavior of asyncore when using + # select + disconnected = [] + for fd, obj in mininode_socket_map.items(): + if obj.disconnect: + disconnected.append(obj) + [ obj.handle_close() for obj in disconnected ] + asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) + + +# An exception we can raise if we detect a potential disconnect +# (p2p or rpc) before the test is complete +class EarlyDisconnectError(Exception): + def __init__(self, value): + self.value = value + + def __str__(self): + return repr(self.value) diff --git a/zebra-rpc/qa/rpc-tests/test_framework/netutil.py b/zebra-rpc/qa/rpc-tests/test_framework/netutil.py new file mode 100644 index 00000000000..98f099c5558 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/netutil.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2019-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +# Linux network utilities + +import sys +import socket +import struct +import array +import os +from binascii import unhexlify, hexlify + +# Roughly based on https://web.archive.org/web/20190424172231/http://voorloopnul.com:80/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal +STATE_ESTABLISHED = '01' +STATE_SYN_SENT = '02' +STATE_SYN_RECV = '03' +STATE_FIN_WAIT1 = '04' +STATE_FIN_WAIT2 = '05' +STATE_TIME_WAIT = '06' +STATE_CLOSE = '07' +STATE_CLOSE_WAIT = '08' +STATE_LAST_ACK = '09' +STATE_LISTEN = '0A' +STATE_CLOSING = '0B' + +def get_socket_inodes(pid): + ''' + Get list of socket inodes for process pid. + ''' + base = '/proc/%i/fd' % pid + inodes = [] + for item in os.listdir(base): + target = os.readlink(os.path.join(base, item)) + if target.startswith('socket:'): + inodes.append(int(target[8:-1])) + return inodes + +def _remove_empty(array): + return [x for x in array if x !=''] + +def _convert_ip_port(array): + host,port = array.split(':') + # convert host from mangled-per-four-bytes form as used by kernel + host = unhexlify(host) + host_out = '' + for x in range(0, len(host) // 4): + (val,) = struct.unpack('=I', host[x*4:(x+1)*4]) + host_out += '%08x' % val + + return host_out,int(port,16) + +def netstat(typ='tcp'): + ''' + Function to return a list with status of tcp connections at linux systems + To get pid of all network process running on system, you must run this script + as superuser + ''' + with open('/proc/net/'+typ,'r',encoding='utf8') as f: + content = f.readlines() + content.pop(0) + result = [] + for line in content: + line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces. + tcp_id = line_array[0] + l_addr = _convert_ip_port(line_array[1]) + r_addr = _convert_ip_port(line_array[2]) + state = line_array[3] + inode = int(line_array[9]) # Need the inode to match with process pid. + nline = [tcp_id, l_addr, r_addr, state, inode] + result.append(nline) + return result + +def get_bind_addrs(pid): + ''' + Get bind addresses as (host,port) tuples for process pid. + ''' + inodes = get_socket_inodes(pid) + bind_addrs = [] + for conn in netstat('tcp') + netstat('tcp6'): + if conn[3] == STATE_LISTEN and conn[4] in inodes: + bind_addrs.append(conn[1]) + return bind_addrs + +# from: https://code.activestate.com/recipes/439093/ +def all_interfaces(): + ''' + Return all interfaces that are up + ''' + import fcntl + + is_64bits = sys.maxsize > 2**32 + struct_size = 40 if is_64bits else 32 + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + max_possible = 8 # initial value + while True: + bytes = max_possible * struct_size + names = array.array('B', b'\0' * bytes) + outbytes = struct.unpack('iL', fcntl.ioctl( + s.fileno(), + 0x8912, # SIOCGIFCONF + struct.pack('iL', bytes, names.buffer_info()[0]) + ))[0] + if outbytes == bytes: + max_possible *= 2 + else: + break + namestr = names.tobytes() + return [(namestr[i:i+16].split(b'\0', 1)[0], + socket.inet_ntoa(namestr[i+20:i+24])) + for i in range(0, outbytes, struct_size)] + +def addr_to_hex(addr): + ''' + Convert string IPv4 or IPv6 address to binary address as returned by + get_bind_addrs. + Very naive implementation that certainly doesn't work for all IPv6 variants. + ''' + if '.' in addr: # IPv4 + addr = [int(x) for x in addr.split('.')] + elif ':' in addr: # IPv6 + sub = [[], []] # prefix, suffix + x = 0 + addr = addr.split(':') + for i,comp in enumerate(addr): + if comp == '': + if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end + continue + x += 1 # :: skips to suffix + assert(x < 2) + else: # two bytes per component + val = int(comp, 16) + sub[x].append(val >> 8) + sub[x].append(val & 0xff) + nullbytes = 16 - len(sub[0]) - len(sub[1]) + assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)) + addr = sub[0] + ([0] * nullbytes) + sub[1] + else: + raise ValueError('Could not parse address %s' % addr) + return hexlify(bytearray(addr)).decode('ascii') + +def test_ipv6_local(): + ''' + Check for (local) IPv6 support. + ''' + import socket + # By using SOCK_DGRAM this will not actually make a connection, but it will + # fail if there is no route to IPv6 localhost. + have_ipv6 = True + try: + s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) + s.connect(('::1', 0)) + except socket.error: + have_ipv6 = False + return have_ipv6 diff --git a/zebra-rpc/qa/rpc-tests/test_framework/proxy.py b/zebra-rpc/qa/rpc-tests/test_framework/proxy.py new file mode 100644 index 00000000000..d41c92d3c51 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/proxy.py @@ -0,0 +1,157 @@ +""" + Copyright 2024 Zcash Foundation + + ServiceProxy is just AuthServiceProxy without the auth part. + + Previous copyright, from authproxy.py: + + Copyright 2011 Jeff Garzik + + AuthServiceProxy has the following improvements over python-jsonrpc's + ServiceProxy class: + + - HTTP connections persist for the life of the AuthServiceProxy object + (if server supports HTTP/1.1) + - sends protocol 'version', per JSON-RPC 1.1 + - sends proper, incrementing 'id' + - sends Basic HTTP authentication headers + - parses all JSON numbers that look like floats as Decimal + - uses standard Python json lib + + Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: + + Copyright (c) 2007 Jan-Klaas Kollhof + + This file is part of jsonrpc. + + jsonrpc is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2.1 of the License, or + (at your option) any later version. + + This software is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with this software; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +""" + +import decimal +import json +import logging +from http.client import HTTPConnection, HTTPSConnection, BadStatusLine +from urllib.parse import urlparse + +USER_AGENT = "ServiceProxy/0.1" + +HTTP_TIMEOUT = 600 + +log = logging.getLogger("BitcoinRPC") + +class JSONRPCException(Exception): + def __init__(self, rpc_error): + Exception.__init__(self, rpc_error.get("message")) + self.error = rpc_error + +def EncodeDecimal(o): + if isinstance(o, decimal.Decimal): + return str(o) + raise TypeError(repr(o) + " is not JSON serializable") + + +class ServiceProxy(): + __id_count = 0 + + def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None): + self.__service_url = service_url + self._service_name = service_name + self.__url = urlparse(service_url) + + self.timeout = timeout + self._set_conn(connection) + + def _set_conn(self, connection=None): + port = 80 if self.__url.port is None else self.__url.port + if connection: + self.__conn = connection + self.timeout = connection.timeout + elif self.__url.scheme == 'https': + self.__conn = HTTPSConnection(self.__url.hostname, port, timeout=self.timeout) + else: + self.__conn = HTTPConnection(self.__url.hostname, port, timeout=self.timeout) + + def __getattr__(self, name): + if name.startswith('__') and name.endswith('__'): + # Python internal stuff + raise AttributeError + if self._service_name is not None: + name = "%s.%s" % (self._service_name, name) + return ServiceProxy(self.__service_url, name, connection=self.__conn) + + def _request(self, method, path, postdata): + ''' + Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). + This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. + ''' + headers = {'Host': self.__url.hostname, + 'User-Agent': USER_AGENT, + 'Content-type': 'application/json'} + try: + self.__conn.request(method, path, postdata, headers) + return self._get_response() + except Exception as e: + # If connection was closed, try again. + # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset. + # ConnectionResetError happens on FreeBSD with Python 3.4. + # This can be simplified now that we depend on Python 3 (previously, we could not + # refer to BrokenPipeError or ConnectionResetError which did not exist on Python 2) + if ((isinstance(e, BadStatusLine) and e.line == "''") + or e.__class__.__name__ in ('BrokenPipeError', 'ConnectionResetError')): + self.__conn.close() + self.__conn.request(method, path, postdata, headers) + return self._get_response() + else: + raise + + def __call__(self, *args): + ServiceProxy.__id_count += 1 + + log.debug("-%s-> %s %s"%(ServiceProxy.__id_count, self._service_name, + json.dumps(args, default=EncodeDecimal))) + postdata = json.dumps({'jsonrpc': '1.0', + 'method': self._service_name, + 'params': args, + 'id': ServiceProxy.__id_count}, default=EncodeDecimal) + response = self._request('POST', self.__url.path, postdata) + if 'result' not in response: + raise JSONRPCException({ + 'code': -343, 'message': 'missing JSON-RPC result'}) + else: + return response['result'] + + def _batch(self, rpc_call_list): + postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal) + log.debug("--> "+postdata) + return self._request('POST', self.__url.path, postdata) + + def _get_response(self): + http_response = self.__conn.getresponse() + if http_response is None: + raise JSONRPCException({ + 'code': -342, 'message': 'missing HTTP response from server'}) + + content_type = http_response.getheader('Content-Type') + if content_type != 'application/json; charset=utf-8': + raise JSONRPCException({ + 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}) + + responsedata = http_response.read().decode('utf8') + response = json.loads(responsedata, parse_float=decimal.Decimal) + if "error" in response and response["error"] is None: + log.debug("<-%s- %s"%(response["id"], json.dumps(response["result"], default=EncodeDecimal))) + else: + log.debug("<-- "+responsedata) + return response diff --git a/zebra-rpc/qa/rpc-tests/test_framework/script.py b/zebra-rpc/qa/rpc-tests/test_framework/script.py new file mode 100644 index 00000000000..c39d249b0f5 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/script.py @@ -0,0 +1,979 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2016 The Bitcoin Core developers +# Copyright (c) 2017-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +# +# script.py +# +# This file is modified from python-bitcoinlib. +# + +"""Scripts + +Functionality to build scripts, as well as SignatureHash(). +""" + +import sys +bchr = chr +bord = ord +if sys.version > '3': + long = int + bchr = lambda x: bytes([x]) + bord = lambda x: x + +from hashlib import blake2b + +from binascii import hexlify +import struct + +from test_framework.bignum import bn2vch +from test_framework.mininode import (CTransaction, CTxOut, hash256, ser_string, ser_uint256) + +MAX_SCRIPT_SIZE = 10000 +MAX_SCRIPT_ELEMENT_SIZE = 520 +MAX_SCRIPT_OPCODES = 201 + +OPCODE_NAMES = {} + +_opcode_instances = [] +class CScriptOp(int): + """A single script opcode""" + __slots__ = [] + + @staticmethod + def encode_op_pushdata(d): + """Encode a PUSHDATA op, returning bytes""" + if len(d) < 0x4c: + return b'' + struct.pack('B', len(d)) + d # OP_PUSHDATA + elif len(d) <= 0xff: + return b'\x4c' + struct.pack('B', len(d)) + d # OP_PUSHDATA1 + elif len(d) <= 0xffff: + return b'\x4d' + struct.pack(b'>= 8 + if r[-1] & 0x80: + r.append(0x80 if neg else 0) + elif neg: + r[-1] |= 0x80 + return struct.pack("B", len(r)) + r + + +class CScript(bytes): + """Serialized script + + A bytes subclass, so you can use this directly whenever bytes are accepted. + Note that this means that indexing does *not* work - you'll get an index by + byte rather than opcode. This format was chosen for efficiency so that the + general case would not require creating a lot of little CScriptOP objects. + + iter(script) however does iterate by opcode. + """ + @classmethod + def __coerce_instance(cls, other): + # Coerce other into bytes + if isinstance(other, CScriptOp): + other = bytes([other]) + elif isinstance(other, CScriptNum): + if (other.value == 0): + other = bytes([CScriptOp(OP_0)]) + else: + other = CScriptNum.encode(other) + elif isinstance(other, int): + if 0 <= other <= 16: + other = bytes([CScriptOp.encode_op_n(other)]) + elif other == -1: + other = bytes([OP_1NEGATE]) + else: + other = CScriptOp.encode_op_pushdata(bn2vch(other)) + elif isinstance(other, (bytes, bytearray)): + other = bytes(CScriptOp.encode_op_pushdata(other)) + return other + + def __add__(self, other): + # Do the coercion outside of the try block so that errors in it are + # noticed. + other = self.__coerce_instance(other) + + try: + # bytes.__add__ always returns bytes instances unfortunately + return CScript(super(CScript, self).__add__(other)) + except TypeError: + raise TypeError('Can not add a %r instance to a CScript' % other.__class__) + + def join(self, iterable): + # join makes no sense for a CScript() + raise NotImplementedError + + def __new__(cls, value=b''): + if isinstance(value, bytes) or isinstance(value, bytearray): + return super(CScript, cls).__new__(cls, value) + else: + def coerce_iterable(iterable): + for instance in iterable: + yield cls.__coerce_instance(instance) + # Annoyingly on both python2 and python3 bytes.join() always + # returns a bytes instance even when subclassed. + return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value))) + + def raw_iter(self): + """Raw iteration + + Yields tuples of (opcode, data, sop_idx) so that the different possible + PUSHDATA encodings can be accurately distinguished, as well as + determining the exact opcode byte indexes. (sop_idx) + """ + i = 0 + while i < len(self): + sop_idx = i + opcode = bord(self[i]) + i += 1 + + if opcode > OP_PUSHDATA4: + yield (opcode, None, sop_idx) + else: + datasize = None + pushdata_type = None + if opcode < OP_PUSHDATA1: + pushdata_type = 'PUSHDATA(%d)' % opcode + datasize = opcode + + elif opcode == OP_PUSHDATA1: + pushdata_type = 'PUSHDATA1' + if i >= len(self): + raise CScriptInvalidError('PUSHDATA1: missing data length') + datasize = bord(self[i]) + i += 1 + + elif opcode == OP_PUSHDATA2: + pushdata_type = 'PUSHDATA2' + if i + 1 >= len(self): + raise CScriptInvalidError('PUSHDATA2: missing data length') + datasize = bord(self[i]) + (bord(self[i+1]) << 8) + i += 2 + + elif opcode == OP_PUSHDATA4: + pushdata_type = 'PUSHDATA4' + if i + 3 >= len(self): + raise CScriptInvalidError('PUSHDATA4: missing data length') + datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24) + i += 4 + + else: + assert False # shouldn't happen + + + data = bytes(self[i:i+datasize]) + + # Check for truncation + if len(data) < datasize: + raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data) + + i += datasize + + yield (opcode, data, sop_idx) + + def __iter__(self): + """'Cooked' iteration + + Returns either a CScriptOP instance, an integer, or bytes, as + appropriate. + + See raw_iter() if you need to distinguish the different possible + PUSHDATA encodings. + """ + for (opcode, data, sop_idx) in self.raw_iter(): + if data is not None: + yield data + else: + opcode = CScriptOp(opcode) + + if opcode.is_small_int(): + yield opcode.decode_op_n() + else: + yield CScriptOp(opcode) + + def __repr__(self): + # For Python3 compatibility add b before strings so testcases don't + # need to change + def _repr(o): + if isinstance(o, bytes): + return b"x('%s')" % hexlify(o).decode('ascii') + else: + return repr(o) + + ops = [] + i = iter(self) + while True: + op = None + try: + op = _repr(next(i)) + except CScriptTruncatedPushDataError as err: + op = '%s...' % (_repr(err.data), err) + break + except CScriptInvalidError as err: + op = '' % err + break + except StopIteration: + break + finally: + if op is not None: + ops.append(op) + + return "CScript([%s])" % ', '.join(ops) + + def GetSigOpCount(self, fAccurate): + """Get the SigOp count. + + fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details. + + Note that this is consensus-critical. + """ + n = 0 + lastOpcode = OP_INVALIDOPCODE + for (opcode, data, sop_idx) in self.raw_iter(): + if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY): + n += 1 + elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY): + if fAccurate and (OP_1 <= lastOpcode <= OP_16): + n += opcode.decode_op_n() + else: + n += 20 + lastOpcode = opcode + return n + + +SIGHASH_ALL = 1 +SIGHASH_NONE = 2 +SIGHASH_SINGLE = 3 +SIGHASH_ANYONECANPAY = 0x80 + +def getHashPrevouts(tx, person=b'ZcashPrevoutHash'): + digest = blake2b(digest_size=32, person=person) + for x in tx.vin: + digest.update(x.prevout.serialize()) + return digest.digest() + +def getHashSequence(tx, person=b'ZcashSequencHash'): + digest = blake2b(digest_size=32, person=person) + for x in tx.vin: + digest.update(struct.pack('= len(txTo.vin): + raise ValueError("inIdx %d out of range (%d)" % (inIdx, len(txTo.vin))) + + if consensusBranchId != 0: + # ZIP 243 + hashPrevouts = b'\x00'*32 + hashSequence = b'\x00'*32 + hashOutputs = b'\x00'*32 + hashJoinSplits = b'\x00'*32 + hashShieldedSpends = b'\x00'*32 + hashShieldedOutputs = b'\x00'*32 + + if not (hashtype & SIGHASH_ANYONECANPAY): + hashPrevouts = getHashPrevouts(txTo) + + if (not (hashtype & SIGHASH_ANYONECANPAY)) and \ + (hashtype & 0x1f) != SIGHASH_SINGLE and \ + (hashtype & 0x1f) != SIGHASH_NONE: + hashSequence = getHashSequence(txTo) + + if (hashtype & 0x1f) != SIGHASH_SINGLE and \ + (hashtype & 0x1f) != SIGHASH_NONE: + hashOutputs = getHashOutputs(txTo) + elif (hashtype & 0x1f) == SIGHASH_SINGLE and \ + 0 <= inIdx and inIdx < len(txTo.vout): + digest = blake2b(digest_size=32, person=b'ZcashOutputsHash') + digest.update(txTo.vout[inIdx].serialize()) + hashOutputs = digest.digest() + + if len(txTo.vJoinSplit) > 0: + hashJoinSplits = getHashJoinSplits(txTo) + + if len(txTo.shieldedSpends) > 0: + hashShieldedSpends = getHashShieldedSpends(txTo) + + if len(txTo.shieldedOutputs) > 0: + hashShieldedOutputs = getHashShieldedOutputs(txTo) + + digest = blake2b( + digest_size=32, + person=b'ZcashSigHash' + struct.pack('= len(txtmp.vout): + raise ValueError("outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout))) + + tmp = txtmp.vout[outIdx] + txtmp.vout = [] + for i in range(outIdx): + txtmp.vout.append(CTxOut()) + txtmp.vout.append(tmp) + + for i in range(len(txtmp.vin)): + if i != inIdx: + txtmp.vin[i].nSequence = 0 + + if hashtype & SIGHASH_ANYONECANPAY: + tmp = txtmp.vin[inIdx] + txtmp.vin = [] + txtmp.vin.append(tmp) + + s = txtmp.serialize() + s += struct.pack(b" 0: + d = s.recv(n) + if not d: + raise IOError('Unexpected end of stream') + rv.extend(d) + n -= len(d) + return rv + +### Implementation classes +class Socks5Configuration(object): + '''Proxy configuration''' + def __init__(self): + self.addr = None # Bind address (must be set) + self.af = socket.AF_INET # Bind address family + self.unauth = False # Support unauthenticated + self.auth = False # Support authentication + +class Socks5Command(object): + '''Information about an incoming socks5 command''' + def __init__(self, cmd, atyp, addr, port, username, password): + self.cmd = cmd # Command (one of Command.*) + self.atyp = atyp # Address type (one of AddressType.*) + self.addr = addr # Address + self.port = port # Port to connect to + self.username = username + self.password = password + def __repr__(self): + return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password) + +class Socks5Connection(object): + def __init__(self, serv, conn, peer): + self.serv = serv + self.conn = conn + self.peer = peer + + def handle(self): + ''' + Handle socks5 request according to RFC1928 + ''' + try: + # Verify socks version + ver = recvall(self.conn, 1)[0] + if ver != 0x05: + raise IOError('Invalid socks version %i' % ver) + # Choose authentication method + nmethods = recvall(self.conn, 1)[0] + methods = bytearray(recvall(self.conn, nmethods)) + method = None + if 0x02 in methods and self.serv.conf.auth: + method = 0x02 # username/password + elif 0x00 in methods and self.serv.conf.unauth: + method = 0x00 # unauthenticated + if method is None: + raise IOError('No supported authentication method was offered') + # Send response + self.conn.sendall(bytearray([0x05, method])) + # Read authentication (optional) + username = None + password = None + if method == 0x02: + ver = recvall(self.conn, 1)[0] + if ver != 0x01: + raise IOError('Invalid auth packet version %i' % ver) + ulen = recvall(self.conn, 1)[0] + username = str(recvall(self.conn, ulen)) + plen = recvall(self.conn, 1)[0] + password = str(recvall(self.conn, plen)) + # Send authentication response + self.conn.sendall(bytearray([0x01, 0x00])) + + # Read connect request + (ver,cmd,rsv,atyp) = recvall(self.conn, 4) + if ver != 0x05: + raise IOError('Invalid socks version %i in connect request' % ver) + if cmd != Command.CONNECT: + raise IOError('Unhandled command %i in connect request' % cmd) + + if atyp == AddressType.IPV4: + addr = recvall(self.conn, 4) + elif atyp == AddressType.DOMAINNAME: + n = recvall(self.conn, 1)[0] + addr = recvall(self.conn, n) + elif atyp == AddressType.IPV6: + addr = recvall(self.conn, 16) + else: + raise IOError('Unknown address type %i' % atyp) + port_hi,port_lo = recvall(self.conn, 2) + port = (port_hi << 8) | port_lo + + # Send dummy response + self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00])) + + cmdin = Socks5Command(cmd, atyp, addr, port, username, password) + self.serv.queue.put(cmdin) + print('Proxy: ', cmdin) + # Fall through to disconnect + except Exception as e: + traceback.print_exc(file=sys.stderr) + self.serv.queue.put(e) + finally: + self.conn.close() + +class Socks5Server(object): + def __init__(self, conf): + self.conf = conf + self.s = socket.socket(conf.af) + self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.s.bind(conf.addr) + self.s.listen(5) + self.running = False + self.thread = None + self.queue = queue.Queue() # report connections and exceptions to client + + def run(self): + while self.running: + (sockconn, peer) = self.s.accept() + if self.running: + conn = Socks5Connection(self, sockconn, peer) + thread = threading.Thread(None, conn.handle) + thread.daemon = True + thread.start() + + def start(self): + assert(not self.running) + self.running = True + self.thread = threading.Thread(None, self.run) + self.thread.daemon = True + self.thread.start() + + def stop(self): + self.running = False + # connect to self to end run loop + s = socket.socket(self.conf.af) + s.connect(self.conf.addr) + s.close() + self.thread.join() + diff --git a/zebra-rpc/qa/rpc-tests/test_framework/test_framework.py b/zebra-rpc/qa/rpc-tests/test_framework/test_framework.py new file mode 100755 index 00000000000..a4290647654 --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/test_framework.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2016-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +# Base class for RPC testing + +import logging +import optparse +import os +import sys +import shutil +import tempfile +import traceback + +from .proxy import JSONRPCException +from .util import ( + zcashd_binary, + initialize_chain, + start_nodes, + connect_nodes_bi, + sync_blocks, + sync_mempools, + stop_nodes, + wait_bitcoinds, + enable_coverage, + check_json_precision, + PortSeed, +) + + +class BitcoinTestFramework(object): + + def __init__(self): + self.num_nodes = 4 + self.cache_behavior = 'current' + self.nodes = None + + def run_test(self): + raise NotImplementedError + + def add_options(self, parser): + pass + + def setup_chain(self): + print("Initializing test directory "+self.options.tmpdir) + initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir, self.cache_behavior) + + def setup_nodes(self): + return start_nodes(self.num_nodes, self.options.tmpdir) + + def setup_network(self, split = False, do_mempool_sync = True): + self.nodes = self.setup_nodes() + + # Connect the nodes as a "chain". This allows us + # to split the network between nodes 1 and 2 to get + # two halves that can work on competing chains. + connect_nodes_bi(self.nodes, 0, 1) + + # If we joined network halves, connect the nodes from the joint + # on outward. This ensures that chains are properly reorganised. + if len(self.nodes) >= 4: + connect_nodes_bi(self.nodes, 2, 3) + if not split: + connect_nodes_bi(self.nodes, 1, 2) + sync_blocks(self.nodes[1:3]) + if do_mempool_sync: + sync_mempools(self.nodes[1:3]) + + self.is_network_split = split + self.sync_all(do_mempool_sync) + + def split_network(self): + """ + Split the network of four nodes into nodes 0/1 and 2/3. + """ + assert not self.is_network_split + stop_nodes(self.nodes) + wait_bitcoinds() + self.setup_network(True) + + def sync_all(self, do_mempool_sync = True): + if self.is_network_split: + sync_blocks(self.nodes[:2]) + sync_blocks(self.nodes[2:]) + if do_mempool_sync: + sync_mempools(self.nodes[:2]) + sync_mempools(self.nodes[2:]) + else: + sync_blocks(self.nodes) + if do_mempool_sync: + sync_mempools(self.nodes) + + def join_network(self): + """ + Join the (previously split) network halves together. + """ + assert self.is_network_split + stop_nodes(self.nodes) + wait_bitcoinds() + self.setup_network(False, False) + + def main(self): + + parser = optparse.OptionParser(usage="%prog [options]") + parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error") + parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true", + help="Don't stop bitcoinds after the test execution") + parser.add_option("--srcdir", dest="srcdir", default="../../src", + help="Source directory containing bitcoind/bitcoin-cli (default: %default)") + parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"), + help="Directory for caching pregenerated datadirs") + parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"), + help="Root directory for datadirs") + parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true", + help="Print out all RPC calls as they are made") + parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int', + help="The seed to use for assigning port numbers (default: current process id)") + parser.add_option("--coveragedir", dest="coveragedir", + help="Write tested RPC commands into this directory") + self.add_options(parser) + (self.options, self.args) = parser.parse_args() + + self.options.tmpdir += '/' + str(self.options.port_seed) + + if self.options.trace_rpc: + logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) + + if self.options.coveragedir: + enable_coverage(self.options.coveragedir) + + PortSeed.n = self.options.port_seed + + os.environ['PATH'] = self.options.srcdir+":"+os.environ['PATH'] + + check_json_precision() + + success = False + try: + os.makedirs(self.options.tmpdir, exist_ok=False) + self.setup_chain() + self.setup_network() + self.run_test() + success = True + except JSONRPCException as e: + print("JSONRPC error: "+e.error['message']) + traceback.print_tb(sys.exc_info()[2]) + except AssertionError as e: + print("Assertion failed: " + str(e)) + traceback.print_tb(sys.exc_info()[2]) + except KeyError as e: + print("key not found: "+ str(e)) + traceback.print_tb(sys.exc_info()[2]) + except Exception as e: + print("Unexpected exception caught during testing: "+str(e)) + traceback.print_tb(sys.exc_info()[2]) + except KeyboardInterrupt as e: + print("Exiting after " + repr(e)) + + if not self.options.noshutdown: + print("Stopping nodes") + stop_nodes(self.nodes) + wait_bitcoinds() + else: + print("Note: bitcoinds were not stopped and may still be running") + + if not self.options.nocleanup and not self.options.noshutdown: + print("Cleaning up") + shutil.rmtree(self.options.tmpdir) + + if success: + print("Tests successful") + sys.exit(0) + else: + print("Failed") + sys.exit(1) + + +# Test framework for doing p2p comparison testing, which sets up some bitcoind +# binaries: +# 1 binary: test binary +# 2 binaries: 1 test binary, 1 ref binary +# n>2 binaries: 1 test binary, n-1 ref binaries + +class ComparisonTestFramework(BitcoinTestFramework): + + def __init__(self): + super().__init__() + self.num_nodes = 1 + self.cache_behavior = 'clean' + self.additional_args = [] + + def add_options(self, parser): + parser.add_option("--testbinary", dest="testbinary", + default=os.getenv("CARGO_BIN_EXE_zebrad", zcashd_binary()), + help="zebrad binary to test") + parser.add_option("--refbinary", dest="refbinary", + default=os.getenv("CARGO_BIN_EXE_zebrad", zcashd_binary()), + help="zebrad binary to use for reference nodes (if any)") + + def setup_network(self): + self.nodes = start_nodes( + self.num_nodes, self.options.tmpdir, + extra_args=[['-debug', '-whitelist=127.0.0.1'] + self.additional_args] * self.num_nodes, + binary=[self.options.testbinary] + + [self.options.refbinary]*(self.num_nodes-1)) + + def get_tests(self): + raise NotImplementedError diff --git a/zebra-rpc/qa/rpc-tests/test_framework/util.py b/zebra-rpc/qa/rpc-tests/test_framework/util.py new file mode 100644 index 00000000000..c50e730307c --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/util.py @@ -0,0 +1,802 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2016 The Bitcoin Core developers +# Copyright (c) 2016-2022 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + + +# +# Helpful routines for regression testing +# + +import os +import sys + +from binascii import hexlify, unhexlify +from base64 import b64encode +from decimal import Decimal, ROUND_DOWN +import json +import http.client +import random +import shutil +import subprocess +import tarfile +import tempfile +import time +import re +import errno + +from . import coverage +from .proxy import ServiceProxy, JSONRPCException + +LEGACY_DEFAULT_FEE = Decimal('0.00001') + +COVERAGE_DIR = None +PRE_BLOSSOM_BLOCK_TARGET_SPACING = 150 +POST_BLOSSOM_BLOCK_TARGET_SPACING = 75 + +SPROUT_BRANCH_ID = 0x00000000 +OVERWINTER_BRANCH_ID = 0x5BA81B19 +SAPLING_BRANCH_ID = 0x76B809BB +BLOSSOM_BRANCH_ID = 0x2BB40E60 +HEARTWOOD_BRANCH_ID = 0xF5B9230B +CANOPY_BRANCH_ID = 0xE9FF75A6 +NU5_BRANCH_ID = 0xC2D6D0B4 +NU6_BRANCH_ID = 0xC8E71055 + +# The maximum number of nodes a single test can spawn +MAX_NODES = 8 +# Don't assign rpc or p2p ports lower than this +PORT_MIN = 11000 +# The number of ports to "reserve" for p2p and rpc, each +PORT_RANGE = 5000 + +def zcashd_binary(): + return os.getenv("CARGO_BIN_EXE_zebrad", os.path.join("..", "target", "debug", "zebrad")) + +def zebrad_config(datadir): + base_location = os.path.join('qa', 'base_config.toml') + new_location = os.path.join(datadir, "config.toml") + shutil.copyfile(base_location, new_location) + return new_location + +class PortSeed: + # Must be initialized with a unique integer for each process + n = None + +def enable_coverage(dirname): + """Maintain a log of which RPC calls are made during testing.""" + global COVERAGE_DIR + COVERAGE_DIR = dirname + + +def get_rpc_proxy(url, node_number, timeout=None): + """ + Args: + url (str): URL of the RPC server to call + node_number (int): the node number (or id) that this calls to + + Kwargs: + timeout (int): HTTP timeout in seconds + + Returns: + AuthServiceProxy. convenience object for making RPC calls. + + """ + proxy_kwargs = {} + if timeout is not None: + proxy_kwargs['timeout'] = timeout + + proxy = ServiceProxy(url, **proxy_kwargs) + proxy.url = url # store URL on proxy for info + + coverage_logfile = coverage.get_filename( + COVERAGE_DIR, node_number) if COVERAGE_DIR else None + + return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) + + +def p2p_port(n): + assert(n <= MAX_NODES) + return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + +def rpc_port(n): + return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES) + +def check_json_precision(): + """Make sure json library being used does not lose precision converting ZEC values""" + n = Decimal("20000000.00000003") + zatoshis = int(json.loads(json.dumps(float(n)))*1.0e8) + if zatoshis != 2000000000000003: + raise RuntimeError("JSON encode/decode loses precision") + +def bytes_to_hex_str(byte_str): + return hexlify(byte_str).decode('ascii') + +def hex_str_to_bytes(hex_str): + return unhexlify(hex_str.encode('ascii')) + +def str_to_b64str(string): + return b64encode(string.encode('utf-8')).decode('ascii') + +def sync_blocks(rpc_connections, wait=0.125, timeout=60, allow_different_tips=False): + """ + Wait until everybody has the same tip, and has notified + all internal listeners of them. + + If allow_different_tips is True, waits until everyone has + the same block count. + """ + while timeout > 0: + if allow_different_tips: + tips = [ x.getblockcount() for x in rpc_connections ] + else: + tips = [ x.getbestblockhash() for x in rpc_connections ] + if tips == [ tips[0] ]*len(tips): + break + time.sleep(wait) + timeout -= wait + + """ Zebra does not support the `fullyNotified` field in the `blockchaininfo` RPC + # Now that the block counts are in sync, wait for the internal + # notifications to finish + while timeout > 0: + notified = [ x.getblockchaininfo()['fullyNotified'] for x in rpc_connections ] + if notified == [ True ] * len(notified): + return True + time.sleep(wait) + timeout -= wait + + raise AssertionError("Block sync failed") + """ + return True + +def sync_mempools(rpc_connections, wait=0.5, timeout=60): + """ + Wait until everybody has the same transactions in their memory + pools, and has notified all internal listeners of them + """ + while timeout > 0: + pool = set(rpc_connections[0].getrawmempool()) + num_match = 1 + for i in range(1, len(rpc_connections)): + if set(rpc_connections[i].getrawmempool()) == pool: + num_match = num_match+1 + if num_match == len(rpc_connections): + break + time.sleep(wait) + timeout -= wait + + """ Zebra does not support the `fullyNotified` field in the `getmempoolinfo` RPC + # Now that the mempools are in sync, wait for the internal + # notifications to finish + while timeout > 0: + notified = [ x.getmempoolinfo()['fullyNotified'] for x in rpc_connections ] + if notified == [ True ] * len(notified): + return True + time.sleep(wait) + timeout -= wait + + raise AssertionError("Mempool sync failed") + """ + return True + +bitcoind_processes = {} + +def initialize_datadir(dirname, n, clock_offset=0): + datadir = os.path.join(dirname, "node"+str(n)) + if not os.path.isdir(datadir): + os.makedirs(datadir) + rpc_u, rpc_p = rpc_auth_pair(n) + config_rpc_port = rpc_port(n) + config_p2p_port = p2p_port(n) + + with open(os.path.join(datadir, "zcash.conf"), 'w', encoding='utf8') as f: + f.write("regtest=1\n") + f.write("showmetrics=0\n") + f.write("rpcuser=" + rpc_u + "\n") + f.write("rpcpassword=" + rpc_p + "\n") + f.write("port="+str(config_p2p_port)+"\n") + f.write("rpcport="+str(config_rpc_port)+"\n") + f.write("listenonion=0\n") + if clock_offset != 0: + f.write('clockoffset='+str(clock_offset)+'\n') + + update_zebrad_conf(datadir, config_rpc_port, config_p2p_port) + + return datadir + +def update_zebrad_conf(datadir, rpc_port, p2p_port): + import toml + + config_path = zebrad_config(datadir) + + with open(config_path, 'r') as f: + config_file = toml.load(f) + + config_file['rpc']['listen_addr'] = '127.0.0.1:'+str(rpc_port) + config_file['network']['listen_addr'] = '127.0.0.1:'+str(p2p_port) + config_file['state']['cache_dir'] = datadir + + with open(config_path, 'w') as f: + toml.dump(config_file, f) + + return config_path + +def rpc_auth_pair(n): + return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n) + +def rpc_url(i, rpchost=None): + rpc_u, rpc_p = rpc_auth_pair(i) + host = '127.0.0.1' + port = rpc_port(i) + if rpchost: + parts = rpchost.split(':') + if len(parts) == 2: + host, port = parts + else: + host = rpchost + # For zebra, we just use a non-authenticated endpoint. + return "http://%s:%d" % (host, int(port)) + # We might want to get back to authenticated endpoints after #8864: + #return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port)) + +def wait_for_bitcoind_start(process, url, i): + ''' + Wait for bitcoind to start. This means that RPC is accessible and fully initialized. + Raise an exception if bitcoind exits during initialization. + ''' + time.sleep(1) # give zebrad a moment to start + while True: + if process.poll() is not None: + raise Exception('%s node %d exited with status %i during initialization' % (zcashd_binary(), i, process.returncode)) + try: + rpc = get_rpc_proxy(url, i) + rpc.getblockcount() + break # break out of loop on success + except IOError as e: + if e.errno != errno.ECONNREFUSED: # Port not yet open? + raise # unknown IO error + except JSONRPCException as e: # Initialization phase + if e.error['code'] != -28: # RPC in warmup? + raise # unknown JSON RPC exception + time.sleep(0.25) + +def initialize_chain(test_dir, num_nodes, cachedir, cache_behavior='current'): + """ + Create a set of node datadirs in `test_dir`, based upon the specified + `cache_behavior` value. The following values are recognized for + `cache_behavior`: + + * 'current': create a 200-block-long chain (with wallet) for MAX_NODES + in `cachedir` if necessary. Afterward, create num_nodes copies in + `test_dir` from the cache. The resulting nodes will be configured to + use the -clockoffset config argument when starting to ensure that + the cached chain is not treated as being excessively out-of-date. + * 'sprout': use persisted chain data containing known amounts of Sprout + funds from the files in `qa/rpc-tests/cache/sprout`. This allows + testing of Sprout spends even though Sprout outputs can no longer + be created by zcashd software. The resulting nodes will be configured to + use the -clockoffset config argument when starting to ensure that + the cached chain is not treated as being excessively out-of-date. + * 'fresh': force re-creation of the cache, and then start as for `current`. + * 'clean': start the nodes without cached chain data, allowing the test + to take full control of chain setup. + """ + assert num_nodes <= MAX_NODES + + def rebuild_cache(): + #find and delete old cache directories if any exist + for i in range(MAX_NODES): + if os.path.isdir(os.path.join(cachedir,"node"+str(i))): + shutil.rmtree(os.path.join(cachedir,"node"+str(i))) + + # Create cache directories, run bitcoinds: + block_time = int(time.time()) - (200 * PRE_BLOSSOM_BLOCK_TARGET_SPACING) + for i in range(MAX_NODES): + datadir = initialize_datadir(cachedir, i) + + config = update_zebrad_conf(datadir, rpc_port(i), p2p_port(i)) + binary = zcashd_binary() + args = [ binary, "-c="+config, "start" ] + + bitcoind_processes[i] = subprocess.Popen(args) + if os.getenv("PYTHON_DEBUG", ""): + print("initialize_chain: %s started, waiting for RPC to come up" % (zcashd_binary(),)) + wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i) + if os.getenv("PYTHON_DEBUG", ""): + print("initialize_chain: RPC successfully started") + + rpcs = [] + for i in range(MAX_NODES): + try: + rpcs.append(get_rpc_proxy(rpc_url(i), i)) + except: + sys.stderr.write("Error connecting to "+rpc_url(i)+"\n") + sys.exit(1) + + # Create a 200-block-long chain; each of the 4 first nodes + # gets 25 mature blocks and 25 immature. + # Note: To preserve compatibility with older versions of + # initialize_chain, only 4 nodes will generate coins. + # + # Blocks are created with timestamps 2.5 minutes apart (matching the + # chain defaulting above to Sapling active), starting 200 * 2.5 minutes + # before the current time. + for i in range(2): + for peer in range(4): + for j in range(25): + # Removed because zebrad does not has this RPC method: + #set_node_times(rpcs, block_time) + rpcs[peer].generate(1) + block_time += PRE_BLOSSOM_BLOCK_TARGET_SPACING + # Must sync before next peer starts generating blocks + sync_blocks(rpcs) + # Check that local time isn't going backwards + assert_greater_than(time.time() + 1, block_time) + + # Shut them down, and clean up cache directories: + stop_nodes(rpcs) + wait_bitcoinds() + + for i in range(MAX_NODES): + # record the system time at which the cache was regenerated + with open(node_file(cachedir, i, 'cache_config.json'), "w", encoding="utf8") as cache_conf_file: + cache_config = { "cache_time": time.time() } + cache_conf_json = json.dumps(cache_config, indent=4) + cache_conf_file.write(cache_conf_json) + + # Removed as zebrad do not created these files: + #os.remove(node_file(cachedir, i, "debug.log")) + #os.remove(node_file(cachedir, i, "db.log")) + #os.remove(node_file(cachedir, i, "peers.dat")) + + + def init_from_cache(): + for i in range(num_nodes): + from_dir = os.path.join(cachedir, "node"+str(i)) + to_dir = os.path.join(test_dir, "node"+str(i)) + shutil.copytree(from_dir, to_dir) + with open(os.path.join(to_dir, 'cache_config.json'), "r", encoding="utf8") as cache_conf_file: + cache_conf = json.load(cache_conf_file) + # obtain the clock offset as a negative number of seconds + offset = round(cache_conf['cache_time']) - round(time.time()) + # overwrite port/rpcport and clock offset in zcash.conf + initialize_datadir(test_dir, i, clock_offset=offset) + + def init_persistent(cache_behavior): + assert num_nodes <= 4 # only 4 nodes with Sprout funds are supported + cache_path = persistent_cache_path(cache_behavior) + if not os.path.isdir(cache_path): + raise Exception('No cache available for cache behavior %s' % cache_behavior) + + chain_cache_filename = os.path.join(cache_path, "chain_cache.tar.gz") + if not os.path.exists(chain_cache_filename): + raise Exception('Chain cache missing for cache behavior %s' % cache_behavior) + + for i in range(num_nodes): + to_dir = os.path.join(test_dir, "node"+str(i), "regtest") + os.makedirs(to_dir) + + # Copy the same chain data to all nodes + with tarfile.open(chain_cache_filename, "r:gz") as chain_cache_file: + tarfile_extractall(chain_cache_file, to_dir) + + # Copy in per-node wallet data + wallet_tgz_filename = os.path.join(cache_path, "node"+str(i)+"_wallet.tar.gz") + if not os.path.exists(wallet_tgz_filename): + raise Exception('Wallet cache missing for cache behavior %s, node %d' % (cache_behavior, i)) + with tarfile.open(wallet_tgz_filename, "r:gz") as wallet_tgz_file: + tarfile_extractall(wallet_tgz_file, os.path.join(to_dir, "wallet.dat")) + + # Copy in per-node wallet config and update zcash.conf to set the + # clock offsets correctly. + cache_conf_filename = os.path.join(to_dir, 'cache_config.json') + if not os.path.exists(cache_conf_filename): + raise Exception('Cache config missing for cache behavior %s, node %d' % (cache_behavior, i)) + with open(cache_conf_filename, "r", encoding="utf8") as cache_conf_file: + cache_conf = json.load(cache_conf_file) + # obtain the clock offset as a negative number of seconds + offset = round(cache_conf['cache_time']) - round(time.time()) + # overwrite port/rpcport and clock offset in zcash.conf + initialize_datadir(test_dir, i, clock_offset=offset) + + def cache_rebuild_required(): + for i in range(MAX_NODES): + node_path = os.path.join(cachedir, 'node'+str(i)) + if os.path.isdir(node_path): + if not os.path.isfile(node_file(cachedir, i, 'cache_config.json')): + return True + else: + return True + return False + + if cache_behavior == 'current': + if cache_rebuild_required(): rebuild_cache() + init_from_cache() + elif cache_behavior == 'fresh': + rebuild_cache() + init_from_cache() + elif cache_behavior == 'clean': + initialize_chain_clean(test_dir, num_nodes) + else: + init_persistent(cache_behavior) + +def initialize_chain_clean(test_dir, num_nodes): + """ + Create an empty blockchain and num_nodes wallets. + Useful if a test case wants complete control over initialization. + """ + for i in range(num_nodes): + initialize_datadir(test_dir, i) + +def persistent_cache_path(cache_behavior): + return os.path.join( + os.path.dirname(os.path.dirname(os.path.realpath(__file__))), + 'cache', + cache_behavior + ) + +def persistent_cache_exists(cache_behavior): + cache_path = persistent_cache_path(cache_behavior) + return os.path.isdir(cache_path) + +# Clean up, zip, and persist the generated datadirs. Record the generation +# time so that we can correctly set the system clock offset in tests that +# restore their node states using the resulting files. +def persist_node_caches(tmpdir, cache_behavior, num_nodes): + cache_path = persistent_cache_path(cache_behavior) + if os.path.exists(cache_path): + raise Exception('Cache already exists for cache behavior %s' % cache_behavior) + os.mkdir(cache_path) + + for i in range(num_nodes): + node_path = os.path.join(tmpdir, 'node' + str(i)) + + # Clean up the files that we don't want to persist + os.remove(os.path.join(node_path, 'debug.log')) + os.remove(os.path.join(node_path, 'db.log')) + os.remove(os.path.join(node_path, 'peers.dat')) + + # Persist the wallet file for the node to the cache + wallet_tgz_filename = os.path.join(cache_path, 'node' + str(i) + '_wallet.tar.gz') + with tarfile.open(wallet_tgz_filename, "w:gz") as wallet_tgz_file: + wallet_tgz_file.add(os.path.join(node_path, 'wallet.dat'), arcname="") + + # Persist the chain data and cache config just once; it will be reused + # for all of the nodes when loading from the cache. + if i == 0: + # Move the wallet.dat file out of the way so that it doesn't + # pollute the chain cache tarfile + shutil.move( + os.path.join(node_path, 'wallet.dat'), + os.path.join(tmpdir, 'wallet.dat.0')) + + # Store the current time so that we can correctly set the clock + # offset when restoring from the cache. + cache_config = { "cache_time": time.time() } + cache_conf_filename = os.path.join(cache_path, 'cache_config.json') + with open(cache_conf_filename, "w", encoding="utf8") as cache_conf_file: + cache_conf_json = json.dumps(cache_config, indent=4) + cache_conf_file.write(cache_conf_json) + + # Persist the chain data. + chain_cache_filename = os.path.join(cache_path, 'chain_cache.tar.gz') + with tarfile.open(chain_cache_filename, "w:gz") as chain_cache_file: + chain_cache_file.add(node_path, arcname="") + + # Move the wallet file back into place + shutil.move( + os.path.join(tmpdir, 'wallet.dat.0'), + os.path.join(node_path, 'wallet.dat')) + + +def _rpchost_to_args(rpchost): + '''Convert optional IP:port spec to rpcconnect/rpcport args''' + if rpchost is None: + return [] + + match = re.match(r'(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) + if not match: + raise ValueError('Invalid RPC host spec ' + rpchost) + + rpcconnect = match.group(1) + rpcport = match.group(2) + + if rpcconnect.startswith('['): # remove IPv6 [...] wrapping + rpcconnect = rpcconnect[1:-1] + + rv = ['-rpcconnect=' + rpcconnect] + if rpcport: + rv += ['-rpcport=' + rpcport] + return rv + +def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None, stderr=None): + """ + Start a bitcoind and return RPC connection to it + """ + + datadir = os.path.join(dirname, "node"+str(i)) + if binary is None: + binary = zcashd_binary() + + config = update_zebrad_conf(datadir, rpc_port(i), p2p_port(i)) + args = [ binary, "-c="+config, "start" ] + + if extra_args is not None: args.extend(extra_args) + bitcoind_processes[i] = subprocess.Popen(args, stderr=stderr) + if os.getenv("PYTHON_DEBUG", ""): + print("start_node: bitcoind started, waiting for RPC to come up") + url = rpc_url(i, rpchost) + wait_for_bitcoind_start(bitcoind_processes[i], url, i) + if os.getenv("PYTHON_DEBUG", ""): + print("start_node: RPC successfully started for node {} with pid {}".format(i, bitcoind_processes[i].pid)) + proxy = get_rpc_proxy(url, i, timeout=timewait) + + if COVERAGE_DIR: + coverage.write_all_rpc_commands(COVERAGE_DIR, proxy) + + return proxy + +def assert_start_raises_init_error(i, dirname, extra_args=None, expected_msg=None): + with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr: + try: + node = start_node(i, dirname, extra_args, stderr=log_stderr) + stop_node(node, i) + except Exception as e: + assert ("%s node %d exited" % (zcashd_binary(), i)) in str(e) # node must have shutdown + if expected_msg is not None: + log_stderr.seek(0) + stderr = log_stderr.read().decode('utf-8') + if expected_msg not in stderr: + raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr) + else: + if expected_msg is None: + assert_msg = "%s should have exited with an error" % (zcashd_binary(),) + else: + assert_msg = "%s should have exited with expected error %r" % (zcashd_binary(), expected_msg) + raise AssertionError(assert_msg) + +def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None): + """ + Start multiple bitcoinds, return RPC connections to them + """ + if extra_args is None: extra_args = [ None for _ in range(num_nodes) ] + if binary is None: binary = [ None for _ in range(num_nodes) ] + rpcs = [] + try: + for i in range(num_nodes): + rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i])) + except: # If one node failed to start, stop the others + stop_nodes(rpcs) + raise + return rpcs + +def node_file(dirname, n_node, filename): + return os.path.join(dirname, "node"+str(n_node), filename) + +def check_node(i): + bitcoind_processes[i].poll() + return bitcoind_processes[i].returncode + +def stop_node(node, i): + try: + node.stop() + except http.client.CannotSendRequest as e: + print("WARN: Unable to stop node: " + repr(e)) + bitcoind_processes[i].wait() + del bitcoind_processes[i] + +def stop_nodes(nodes): + for node in nodes: + try: + node.stop() + except http.client.CannotSendRequest as e: + print("WARN: Unable to stop node: " + repr(e)) + del nodes[:] # Emptying array closes connections as a side effect + +def set_node_times(nodes, t): + for node in nodes: + node.setmocktime(t) + +def wait_bitcoinds(): + # Wait for all bitcoinds to cleanly exit + for bitcoind in list(bitcoind_processes.values()): + bitcoind.wait() + bitcoind_processes.clear() + +def connect_nodes(from_connection, node_num): + ip_port = "127.0.0.1:"+str(p2p_port(node_num)) + from_connection.addnode(ip_port, "onetry") + # poll until version handshake complete to avoid race conditions + # with transaction relaying + while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): + time.sleep(0.1) + +def connect_nodes_bi(nodes, a, b): + connect_nodes(nodes[a], b) + connect_nodes(nodes[b], a) + +def find_output(node, txid, amount): + """ + Return index to output of txid with value amount + Raises exception if there is none. + """ + txdata = node.getrawtransaction(txid, 1) + for i in range(len(txdata["vout"])): + if txdata["vout"][i]["value"] == amount: + return i + raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) + + +def gather_inputs(from_node, amount_needed, confirmations_required=1): + """ + Return a random set of unspent txouts that are enough to pay amount_needed + """ + assert(confirmations_required >=0) + utxo = from_node.listunspent(confirmations_required) + random.shuffle(utxo) + inputs = [] + total_in = Decimal("0.00000000") + while total_in < amount_needed and len(utxo) > 0: + t = utxo.pop() + total_in += t["amount"] + inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) + if total_in < amount_needed: + raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) + return (total_in, inputs) + +def make_change(from_node, amount_in, amount_out, fee): + """ + Create change output(s), return them + """ + outputs = {} + amount = amount_out+fee + change = amount_in - amount + if change > amount*2: + # Create an extra change output to break up big inputs + change_address = from_node.getnewaddress() + # Split change in two, being careful of rounding: + outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) + change = amount_in - amount - outputs[change_address] + if change > 0: + outputs[from_node.getnewaddress()] = change + return outputs + +def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): + """ + Create a random transaction. + Returns (txid, hex-encoded-transaction-data, fee) + """ + from_node = random.choice(nodes) + to_node = random.choice(nodes) + fee = min_fee + fee_increment*random.randint(0,fee_variants) + + (total_in, inputs) = gather_inputs(from_node, amount+fee) + outputs = make_change(from_node, total_in, amount, fee) + outputs[to_node.getnewaddress()] = float(amount) + + rawtx = from_node.createrawtransaction(inputs, outputs) + signresult = from_node.signrawtransaction(rawtx) + txid = from_node.sendrawtransaction(signresult["hex"], True) + + return (txid, signresult["hex"], fee) + +def assert_equal(expected, actual, message=""): + if expected != actual: + if message: + message = "; %s" % message + raise AssertionError("(left == right)%s\n left: <%s>\n right: <%s>" % (message, str(expected), str(actual))) + +def assert_true(condition, message = ""): + if not condition: + raise AssertionError(message) + +def assert_false(condition, message = ""): + assert_true(not condition, message) + +def assert_greater_than(thing1, thing2): + if thing1 <= thing2: + raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) + +def assert_raises(exc, fun, *args, **kwds): + assert_raises_message(exc, None, fun, *args, **kwds) + +def assert_raises_message(ExceptionType, errstr, func, *args, **kwargs): + """ + Asserts that func throws and that the exception contains 'errstr' + in its message. + """ + try: + func(*args, **kwargs) + except ExceptionType as e: + if errstr is not None and errstr not in str(e): + raise AssertionError("Invalid exception string: Couldn't find %r in %r" % ( + errstr, str(e))) + except Exception as e: + raise AssertionError("Unexpected exception raised: " + type(e).__name__) + else: + raise AssertionError("No exception raised") + +def fail(message=""): + raise AssertionError(message) + + +# Returns an async operation result +def wait_and_assert_operationid_status_result(node, myopid, in_status='success', in_errormsg=None, timeout=300): + print('waiting for async operation {}'.format(myopid)) + result = None + for _ in range(1, timeout): + results = node.z_getoperationresult([myopid]) + if len(results) > 0: + result = results[0] + break + time.sleep(1) + + assert_true(result is not None, "timeout occurred") + status = result['status'] + + debug = os.getenv("PYTHON_DEBUG", "") + if debug: + print('...returned status: {}'.format(status)) + + errormsg = None + if status == "failed": + errormsg = result['error']['message'] + if debug: + print('...returned error: {}'.format(errormsg)) + assert_equal(in_errormsg, errormsg) + + assert_equal(in_status, status, "Operation returned mismatched status. Error Message: {}".format(errormsg)) + + return result + + +# Returns txid if operation was a success or None +def wait_and_assert_operationid_status(node, myopid, in_status='success', in_errormsg=None, timeout=300): + result = wait_and_assert_operationid_status_result(node, myopid, in_status, in_errormsg, timeout) + if result['status'] == "success": + return result['result']['txid'] + else: + return None + +# Find a coinbase address on the node, filtering by the number of UTXOs it has. +# If no filter is provided, returns the coinbase address on the node containing +# the greatest number of spendable UTXOs. +# The default cached chain has one address per coinbase output. +def get_coinbase_address(node, expected_utxos=None): + addrs = [utxo['address'] for utxo in node.listunspent() if utxo['generated']] + assert(len(set(addrs)) > 0) + + if expected_utxos is None: + addrs = [(addrs.count(a), a) for a in set(addrs)] + return sorted(addrs, reverse=True)[0][1] + + addrs = [a for a in set(addrs) if addrs.count(a) == expected_utxos] + assert(len(addrs) > 0) + return addrs[0] + +def check_node_log(self, node_number, line_to_check, stop_node = True): + print("Checking node " + str(node_number) + " logs") + if stop_node: + self.nodes[node_number].stop() + bitcoind_processes[node_number].wait() + logpath = self.options.tmpdir + "/node" + str(node_number) + "/regtest/debug.log" + with open(logpath, "r", encoding="utf8") as myfile: + logdata = myfile.readlines() + for (n, logline) in enumerate(logdata): + if line_to_check in logline: + return n + raise AssertionError(repr(line_to_check) + " not found") + +def nustr(branch_id): + return '%08x' % branch_id + +def nuparams(branch_id, height): + return '-nuparams=%s:%d' % (nustr(branch_id), height) + +def tarfile_extractall(tarfile, path): + if sys.version_info >= (3, 11, 4): + tarfile.extractall(path=path, filter='data') + else: + tarfile.extractall(path=path) diff --git a/zebra-rpc/qa/rpc-tests/test_framework/zip244.py b/zebra-rpc/qa/rpc-tests/test_framework/zip244.py new file mode 100644 index 00000000000..b0f28817a6a --- /dev/null +++ b/zebra-rpc/qa/rpc-tests/test_framework/zip244.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Zcash developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php . + +# +# zip244.py +# +# Functionality to create txids, auth digests, and signature digests. +# +# This file is modified from zcash/zcash-test-vectors. +# + +import struct + +from hashlib import blake2b + +from .mininode import ser_string, ser_uint256 +from .script import ( + SIGHASH_ANYONECANPAY, + SIGHASH_NONE, + SIGHASH_SINGLE, + getHashOutputs, + getHashPrevouts, + getHashSequence, +) + + +# Transparent + +def transparent_digest(tx): + digest = blake2b(digest_size=32, person=b'ZTxIdTranspaHash') + + if len(tx.vin) + len(tx.vout) > 0: + digest.update(getHashPrevouts(tx, b'ZTxIdPrevoutHash')) + digest.update(getHashSequence(tx, b'ZTxIdSequencHash')) + digest.update(getHashOutputs(tx, b'ZTxIdOutputsHash')) + + return digest.digest() + +def transparent_scripts_digest(tx): + digest = blake2b(digest_size=32, person=b'ZTxAuthTransHash') + for x in tx.vin: + digest.update(ser_string(x.scriptSig)) + return digest.digest() + +# Sapling + +def sapling_digest(saplingBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdSaplingHash') + + if len(saplingBundle.spends) + len(saplingBundle.outputs) > 0: + digest.update(sapling_spends_digest(saplingBundle)) + digest.update(sapling_outputs_digest(saplingBundle)) + digest.update(struct.pack(' 0: + for desc in saplingBundle.spends: + digest.update(desc.zkproof.serialize()) + for desc in saplingBundle.spends: + digest.update(desc.spendAuthSig.serialize()) + for desc in saplingBundle.outputs: + digest.update(desc.zkproof.serialize()) + digest.update(saplingBundle.bindingSig.serialize()) + + return digest.digest() + +# - Spends + +def sapling_spends_digest(saplingBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdSSpendsHash') + + if len(saplingBundle.spends) > 0: + digest.update(sapling_spends_compact_digest(saplingBundle)) + digest.update(sapling_spends_noncompact_digest(saplingBundle)) + + return digest.digest() + +def sapling_spends_compact_digest(saplingBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdSSpendCHash') + for desc in saplingBundle.spends: + digest.update(ser_uint256(desc.nullifier)) + return digest.digest() + +def sapling_spends_noncompact_digest(saplingBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdSSpendNHash') + for desc in saplingBundle.spends: + digest.update(ser_uint256(desc.cv)) + digest.update(ser_uint256(saplingBundle.anchor)) + digest.update(ser_uint256(desc.rk)) + return digest.digest() + +# - Outputs + +def sapling_outputs_digest(saplingBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdSOutputHash') + + if len(saplingBundle.outputs) > 0: + digest.update(sapling_outputs_compact_digest(saplingBundle)) + digest.update(sapling_outputs_memos_digest(saplingBundle)) + digest.update(sapling_outputs_noncompact_digest(saplingBundle)) + + return digest.digest() + +def sapling_outputs_compact_digest(saplingBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdSOutC__Hash') + for desc in saplingBundle.outputs: + digest.update(ser_uint256(desc.cmu)) + digest.update(ser_uint256(desc.ephemeralKey)) + digest.update(desc.encCiphertext[:52]) + return digest.digest() + +def sapling_outputs_memos_digest(saplingBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdSOutM__Hash') + for desc in saplingBundle.outputs: + digest.update(desc.encCiphertext[52:564]) + return digest.digest() + +def sapling_outputs_noncompact_digest(saplingBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdSOutN__Hash') + for desc in saplingBundle.outputs: + digest.update(ser_uint256(desc.cv)) + digest.update(desc.encCiphertext[564:]) + digest.update(desc.outCiphertext) + return digest.digest() + +# Orchard + +def orchard_digest(orchardBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdOrchardHash') + + if len(orchardBundle.actions) > 0: + digest.update(orchard_actions_compact_digest(orchardBundle)) + digest.update(orchard_actions_memos_digest(orchardBundle)) + digest.update(orchard_actions_noncompact_digest(orchardBundle)) + digest.update(struct.pack('B', orchardBundle.flags())) + digest.update(struct.pack(' 0: + digest.update(bytes(orchardBundle.proofs)) + for desc in orchardBundle.actions: + digest.update(desc.spendAuthSig.serialize()) + digest.update(orchardBundle.bindingSig.serialize()) + + return digest.digest() + +# - Actions + +def orchard_actions_compact_digest(orchardBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdOrcActCHash') + for desc in orchardBundle.actions: + digest.update(ser_uint256(desc.nullifier)) + digest.update(ser_uint256(desc.cmx)) + digest.update(ser_uint256(desc.ephemeralKey)) + digest.update(desc.encCiphertext[:52]) + return digest.digest() + +def orchard_actions_memos_digest(orchardBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdOrcActMHash') + for desc in orchardBundle.actions: + digest.update(desc.encCiphertext[52:564]) + return digest.digest() + +def orchard_actions_noncompact_digest(orchardBundle): + digest = blake2b(digest_size=32, person=b'ZTxIdOrcActNHash') + for desc in orchardBundle.actions: + digest.update(ser_uint256(desc.cv)) + digest.update(ser_uint256(desc.rk)) + digest.update(desc.encCiphertext[564:]) + digest.update(desc.outCiphertext) + return digest.digest() + +# Transaction + +def header_digest(tx): + digest = blake2b(digest_size=32, person=b'ZTxIdHeadersHash') + + digest.update(struct.pack(' BoxFuture>>; + + /// Stop the running zebrad process. + /// + /// # Notes + /// + /// - Works for non windows targets only. + /// - Works only if the network of the running zebrad process is `Regtest`. + /// + /// zcashd reference: [`stop`](https://zcash.github.io/rpc/stop.html) + /// method: post + /// tags: control + #[rpc(name = "stop")] + fn stop(&self) -> Result; } /// RPC method implementations. @@ -664,7 +677,7 @@ where let response = mempool.oneshot(request).await.map_server_error()?; - let queue_results = match response { + let mut queue_results = match response { mempool::Response::Queued(results) => results, _ => unreachable!("incorrect response variant from mempool service"), }; @@ -675,10 +688,17 @@ where "mempool service returned more results than expected" ); - tracing::debug!("sent transaction to mempool: {:?}", &queue_results[0]); + let queue_result = queue_results + .pop() + .expect("there should be exactly one item in Vec") + .inspect_err(|err| tracing::debug!("sent transaction to mempool: {:?}", &err)) + .map_server_error()? + .await; + + tracing::debug!("sent transaction to mempool: {:?}", &queue_result); - queue_results[0] - .as_ref() + queue_result + .map_server_error()? .map(|_| SentTransactionHash(transaction_hash)) .map_server_error() } @@ -1337,6 +1357,32 @@ where } .boxed() } + + fn stop(&self) -> Result { + #[cfg(not(target_os = "windows"))] + if self.network.is_regtest() { + match nix::sys::signal::raise(nix::sys::signal::SIGINT) { + Ok(_) => Ok("Zebra server stopping".to_string()), + Err(error) => Err(Error { + code: ErrorCode::InternalError, + message: format!("Failed to shut down: {}", error), + data: None, + }), + } + } else { + Err(Error { + code: ErrorCode::MethodNotFound, + message: "stop is only available on regtest networks".to_string(), + data: None, + }) + } + #[cfg(target_os = "windows")] + Err(Error { + code: ErrorCode::MethodNotFound, + message: "stop is not available in windows targets".to_string(), + data: None, + }) + } } /// Returns the best chain tip height of `latest_chain_tip`, diff --git a/zebra-rpc/src/methods/get_block_template_rpcs.rs b/zebra-rpc/src/methods/get_block_template_rpcs.rs index c8c83e9315a..2d50552cfec 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs.rs @@ -19,7 +19,7 @@ use zebra_chain::{ Network, NetworkKind, NetworkUpgrade, POW_AVERAGING_WINDOW, }, primitives, - serialization::ZcashDeserializeInto, + serialization::{ZcashDeserializeInto, ZcashSerialize}, transparent::{ self, EXTRA_ZEBRA_COINBASE_DATA, MAX_COINBASE_DATA_LEN, MAX_COINBASE_HEIGHT_DATA_LEN, }, @@ -47,7 +47,9 @@ use crate::methods::{ // TODO: move the types/* modules directly under get_block_template_rpcs, // and combine any modules with the same names. types::{ - get_block_template::GetBlockTemplate, + get_block_template::{ + proposal::TimeSource, proposal_block_from_template, GetBlockTemplate, + }, get_mining_info, long_poll::LongPollInput, peer_info::PeerInfo, @@ -283,6 +285,22 @@ pub trait GetBlockTemplateRpc { &self, address: String, ) -> BoxFuture>; + + #[rpc(name = "generate")] + /// Mine blocks immediately. Returns the block hashes of the generated blocks. + /// + /// # Parameters + /// + /// - `num_blocks`: (numeric, required, example=1) Number of blocks to be generated. + /// + /// # Notes + /// + /// Only works if the network of the running zebrad process is `Regtest`. + /// + /// zcashd reference: [`generate`](https://zcash.github.io/rpc/generate.html) + /// method: post + /// tags: generating + fn generate(&self, num_blocks: u32) -> BoxFuture>>; } /// RPC method implementations. @@ -994,9 +1012,39 @@ where fn get_mining_info(&self) -> BoxFuture> { let network = self.network.clone(); + let mut state = self.state.clone(); + + let chain_tip = self.latest_chain_tip.clone(); + let tip_height = chain_tip.best_tip_height().unwrap_or(Height(0)).0; + + let mut current_block_tx = None; + if tip_height > 0 { + let mined_tx_ids = chain_tip.best_tip_mined_transaction_ids(); + current_block_tx = + (!mined_tx_ids.is_empty()).then(|| mined_tx_ids.len().saturating_sub(1)); + } + let solution_rate_fut = self.get_network_sol_ps(None, None); async move { + // Get the current block size. + let mut current_block_size = None; + if tip_height > 0 { + let request = zebra_state::ReadRequest::TipBlockSize; + let response: zebra_state::ReadResponse = state + .ready() + .and_then(|service| service.call(request)) + .await + .map_server_error()?; + current_block_size = match response { + zebra_state::ReadResponse::TipBlockSize(Some(block_size)) => Some(block_size), + _ => None, + }; + } + Ok(get_mining_info::Response::new( + tip_height, + current_block_size, + current_block_tx, network, solution_rate_fut.await?, )) @@ -1357,6 +1405,61 @@ where } .boxed() } + + fn generate(&self, num_blocks: u32) -> BoxFuture>> { + let rpc: GetBlockTemplateRpcImpl< + Mempool, + State, + Tip, + BlockVerifierRouter, + SyncStatus, + AddressBook, + > = self.clone(); + let network = self.network.clone(); + + async move { + if !network.is_regtest() { + return Err(Error { + code: ErrorCode::ServerError(0), + message: "generate is only supported on regtest".to_string(), + data: None, + }); + } + + let mut block_hashes = Vec::new(); + for _ in 0..num_blocks { + let block_template = rpc.get_block_template(None).await.map_server_error()?; + + let get_block_template::Response::TemplateMode(block_template) = block_template + else { + return Err(Error { + code: ErrorCode::ServerError(0), + message: "error generating block template".to_string(), + data: None, + }); + }; + + let proposal_block = proposal_block_from_template( + &block_template, + TimeSource::CurTime, + NetworkUpgrade::current(&network, Height(block_template.height)), + ) + .map_server_error()?; + let hex_proposal_block = + HexData(proposal_block.zcash_serialize_to_vec().map_server_error()?); + + let _submit = rpc + .submit_block(hex_proposal_block, None) + .await + .map_server_error()?; + + block_hashes.push(GetBlockHash(proposal_block.hash())); + } + + Ok(block_hashes) + } + .boxed() + } } // Put support functions in a submodule, to keep this file small. diff --git a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs index a14d4a081e7..21627d509db 100644 --- a/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs +++ b/zebra-rpc/src/methods/get_block_template_rpcs/types/get_mining_info.rs @@ -5,6 +5,18 @@ use zebra_chain::parameters::Network; /// Response to a `getmininginfo` RPC request. #[derive(Debug, Default, PartialEq, Eq, serde::Serialize)] pub struct Response { + /// The current tip height. + #[serde(rename = "blocks")] + tip_height: u32, + + /// The size of the last mined block if any. + #[serde(rename = "currentblocksize", skip_serializing_if = "Option::is_none")] + current_block_size: Option, + + /// The number of transactions in the last mined block if any. + #[serde(rename = "currentblocktx", skip_serializing_if = "Option::is_none")] + current_block_tx: Option, + /// The estimated network solution rate in Sol/s. networksolps: u64, @@ -20,8 +32,17 @@ pub struct Response { impl Response { /// Creates a new `getmininginfo` response - pub fn new(network: Network, networksolps: u64) -> Self { + pub fn new( + tip_height: u32, + current_block_size: Option, + current_block_tx: Option, + network: Network, + networksolps: u64, + ) -> Self { Self { + tip_height, + current_block_size, + current_block_tx, networksolps, networkhashps: networksolps, chain: network.bip70_network_name(), diff --git a/zebra-rpc/src/methods/tests/prop.rs b/zebra-rpc/src/methods/tests/prop.rs index c2a9c70a348..409a6aefe52 100644 --- a/zebra-rpc/src/methods/tests/prop.rs +++ b/zebra-rpc/src/methods/tests/prop.rs @@ -7,6 +7,7 @@ use hex::ToHex; use jsonrpc_core::{Error, ErrorCode}; use proptest::{collection::vec, prelude::*}; use thiserror::Error; +use tokio::sync::oneshot; use tower::buffer::Buffer; use zebra_chain::{ @@ -61,7 +62,9 @@ proptest! { let unmined_transaction = UnminedTx::from(transaction); let expected_request = mempool::Request::Queue(vec![unmined_transaction.into()]); - let response = mempool::Response::Queued(vec![Ok(())]); + let (rsp_tx, rsp_rx) = oneshot::channel(); + let _ = rsp_tx.send(Ok(())); + let response = mempool::Response::Queued(vec![Ok(rsp_rx)]); mempool .expect_request(expected_request) @@ -111,10 +114,10 @@ proptest! { .expect("Transaction serializes successfully"); let transaction_hex = hex::encode(&transaction_bytes); - let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); + let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex.clone())); let unmined_transaction = UnminedTx::from(transaction); - let expected_request = mempool::Request::Queue(vec![unmined_transaction.into()]); + let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]); mempool .expect_request(expected_request) @@ -138,6 +141,32 @@ proptest! { "Result is not a server error: {result:?}" ); + let send_task = tokio::spawn(rpc.send_raw_transaction(transaction_hex)); + + let expected_request = mempool::Request::Queue(vec![unmined_transaction.clone().into()]); + + let (rsp_tx, rsp_rx) = oneshot::channel(); + let _ = rsp_tx.send(Err("any verification error".into())); + mempool + .expect_request(expected_request) + .await? + .respond(Ok::<_, BoxError>(mempool::Response::Queued(vec![Ok(rsp_rx)]))); + + let result = send_task + .await + .expect("Sending raw transactions should not panic"); + + prop_assert!( + matches!( + result, + Err(Error { + code: ErrorCode::ServerError(_), + .. + }) + ), + "Result is not a server error: {result:?}" + ); + // The queue task should continue without errors or panics let rpc_tx_queue_task_result = rpc_tx_queue_task_handle.now_or_never(); prop_assert!(rpc_tx_queue_task_result.is_none()); @@ -897,7 +926,9 @@ proptest! { // now a retry will be sent to the mempool let expected_request = mempool::Request::Queue(vec![mempool::Gossip::Tx(tx_unmined.clone())]); - let response = mempool::Response::Queued(vec![Ok(())]); + let (rsp_tx, rsp_rx) = oneshot::channel(); + let _ = rsp_tx.send(Ok(())); + let response = mempool::Response::Queued(vec![Ok(rsp_rx)]); mempool .expect_request(expected_request) @@ -997,7 +1028,9 @@ proptest! { for tx in txs.clone() { let expected_request = mempool::Request::Queue(vec![mempool::Gossip::Tx(UnminedTx::from(tx))]); - let response = mempool::Response::Queued(vec![Ok(())]); + let (rsp_tx, rsp_rx) = oneshot::channel(); + let _ = rsp_tx.send(Ok(())); + let response = mempool::Response::Queued(vec![Ok(rsp_rx)]); mempool .expect_request(expected_request) diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@mainnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@mainnet_10.snap index 67ffde393c4..de309513443 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@mainnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@mainnet_10.snap @@ -3,6 +3,8 @@ source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs expression: get_mining_info --- { + "blocks": 1687104, + "currentblocksize": 1617, "networksolps": 2, "networkhashps": 2, "chain": "main", diff --git a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@testnet_10.snap b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@testnet_10.snap index fc728a8540f..2051e6913ce 100644 --- a/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@testnet_10.snap +++ b/zebra-rpc/src/methods/tests/snapshot/snapshots/get_mining_info@testnet_10.snap @@ -3,6 +3,8 @@ source: zebra-rpc/src/methods/tests/snapshot/get_block_template_rpcs.rs expression: get_mining_info --- { + "blocks": 1842420, + "currentblocksize": 1618, "networksolps": 0, "networkhashps": 0, "chain": "test", diff --git a/zebra-rpc/src/queue/tests/prop.rs b/zebra-rpc/src/queue/tests/prop.rs index 1db9a340f2e..9f63ecce24d 100644 --- a/zebra-rpc/src/queue/tests/prop.rs +++ b/zebra-rpc/src/queue/tests/prop.rs @@ -5,7 +5,7 @@ use std::{collections::HashSet, env, sync::Arc}; use proptest::prelude::*; use chrono::Duration; -use tokio::time; +use tokio::{sync::oneshot, time}; use tower::ServiceExt; use zebra_chain::{ @@ -196,7 +196,9 @@ proptest! { let request = Request::Queue(vec![Gossip::Tx(unmined_transaction.clone())]); let expected_request = Request::Queue(vec![Gossip::Tx(unmined_transaction.clone())]); let send_task = tokio::spawn(mempool.clone().oneshot(request)); - let response = Response::Queued(vec![Ok(())]); + let (rsp_tx, rsp_rx) = oneshot::channel(); + let _ = rsp_tx.send(Ok(())); + let response = Response::Queued(vec![Ok(rsp_rx)]); mempool .expect_request(expected_request) @@ -337,7 +339,9 @@ proptest! { // retry will queue the transaction to mempool let gossip = Gossip::Tx(UnminedTx::from(transaction.clone())); let expected_request = Request::Queue(vec![gossip]); - let response = Response::Queued(vec![Ok(())]); + let (rsp_tx, rsp_rx) = oneshot::channel(); + let _ = rsp_tx.send(Ok(())); + let response = Response::Queued(vec![Ok(rsp_rx)]); mempool .expect_request(expected_request) diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 8ef7cae2577..7d02a4ae81b 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -72,10 +72,10 @@ futures = "0.3.30" # ECC dependencies. # TODO: we can't use the workspace version for all ECC dependencies in this crate yet (#8809) -zcash_client_backend = { git = "https://github.com/zcash/librustzcash/", commit = "40ca428c6081c61d5a2bf3f2053eb9e18219ca95" } +zcash_client_backend = { version = "0.12.1" } zcash_keys = { workspace = true, features = ["sapling"] } -zcash_primitives = { git = "https://github.com/zcash/librustzcash/", commit = "40ca428c6081c61d5a2bf3f2053eb9e18219ca95" } -zcash_address = { git = "https://github.com/zcash/librustzcash/", commit = "40ca428c6081c61d5a2bf3f2053eb9e18219ca95" } +zcash_primitives = "0.15.0" +zcash_address = "0.3.2" sapling-crypto.workspace = true zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.39", features = ["shielded-scan"] } diff --git a/zebra-state/src/config.rs b/zebra-state/src/config.rs index fa9032edf6e..4cd800f3975 100644 --- a/zebra-state/src/config.rs +++ b/zebra-state/src/config.rs @@ -12,7 +12,7 @@ use serde::{Deserialize, Serialize}; use tokio::task::{spawn_blocking, JoinHandle}; use tracing::Span; -use zebra_chain::parameters::Network; +use zebra_chain::{common::default_cache_dir, parameters::Network}; use crate::{ constants::{DATABASE_FORMAT_VERSION_FILE_NAME, RESTORABLE_DB_VERSIONS, STATE_DATABASE_KIND}, @@ -173,12 +173,8 @@ impl Config { impl Default for Config { fn default() -> Self { - let cache_dir = dirs::cache_dir() - .unwrap_or_else(|| std::env::current_dir().unwrap().join("cache")) - .join("zebra"); - Self { - cache_dir, + cache_dir: default_cache_dir(), ephemeral: false, delete_old_database: true, debug_stop_at_height: None, @@ -471,6 +467,8 @@ pub(crate) use hidden::{ pub(crate) mod hidden { #![allow(dead_code)] + use zebra_chain::common::atomic_write; + use super::*; /// Writes `changed_version` to the on-disk state database after the format is changed. @@ -512,10 +510,9 @@ pub(crate) mod hidden { let version = format!("{}.{}", changed_version.minor, changed_version.patch); - // # Concurrency - // - // The caller handles locking for this file write. - fs::write(version_path, version.as_bytes())?; + // Write the version file atomically so the cache is not corrupted if Zebra shuts down or + // crashes. + atomic_write(version_path, version.as_bytes())??; Ok(()) } diff --git a/zebra-state/src/request.rs b/zebra-state/src/request.rs index 28740a336bb..1863c56b2ed 100644 --- a/zebra-state/src/request.rs +++ b/zebra-state/src/request.rs @@ -1063,6 +1063,11 @@ pub enum ReadRequest { /// Returns [`ReadResponse::ValidBlockProposal`] when successful, or an error if /// the block fails contextual validation. CheckBlockProposalValidity(SemanticallyVerifiedBlock), + + #[cfg(feature = "getblocktemplate-rpcs")] + /// Returns [`ReadResponse::TipBlockSize(usize)`](ReadResponse::TipBlockSize) + /// with the current best chain tip block size in bytes. + TipBlockSize, } impl ReadRequest { @@ -1098,6 +1103,8 @@ impl ReadRequest { ReadRequest::SolutionRate { .. } => "solution_rate", #[cfg(feature = "getblocktemplate-rpcs")] ReadRequest::CheckBlockProposalValidity(_) => "check_block_proposal_validity", + #[cfg(feature = "getblocktemplate-rpcs")] + ReadRequest::TipBlockSize => "tip_block_size", } } diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index 22e610838de..77c252b0c75 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -229,6 +229,10 @@ pub enum ReadResponse { #[cfg(feature = "getblocktemplate-rpcs")] /// Response to [`ReadRequest::CheckBlockProposalValidity`] ValidBlockProposal, + + #[cfg(feature = "getblocktemplate-rpcs")] + /// Response to [`ReadRequest::TipBlockSize`] + TipBlockSize(Option), } /// A structure with the information needed from the state to build a `getblocktemplate` RPC response. @@ -315,7 +319,7 @@ impl TryFrom for Response { ReadResponse::ValidBlockProposal => Ok(Response::ValidBlockProposal), #[cfg(feature = "getblocktemplate-rpcs")] - ReadResponse::ChainInfo(_) | ReadResponse::SolutionRate(_) => { + ReadResponse::ChainInfo(_) | ReadResponse::SolutionRate(_) | ReadResponse::TipBlockSize(_) => { Err("there is no corresponding Response for this ReadResponse") } } diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 2116ab10470..4f970be89d4 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -39,6 +39,9 @@ use zebra_chain::{ subtree::NoteCommitmentSubtreeIndex, }; +#[cfg(feature = "getblocktemplate-rpcs")] +use zebra_chain::{block::Height, serialization::ZcashSerialize}; + use crate::{ constants::{ MAX_FIND_BLOCK_HASHES_RESULTS, MAX_FIND_BLOCK_HEADERS_RESULTS_FOR_ZEBRA, @@ -1905,6 +1908,46 @@ impl Service for ReadStateService { }) .wait_for_panics() } + + #[cfg(feature = "getblocktemplate-rpcs")] + ReadRequest::TipBlockSize => { + let state = self.clone(); + + tokio::task::spawn_blocking(move || { + span.in_scope(move || { + // Get the best chain tip height. + let tip_height = state + .non_finalized_state_receiver + .with_watch_data(|non_finalized_state| { + read::tip_height(non_finalized_state.best_chain(), &state.db) + }) + .unwrap_or(Height(0)); + + // Get the block at the best chain tip height. + let block = state.non_finalized_state_receiver.with_watch_data( + |non_finalized_state| { + read::block( + non_finalized_state.best_chain(), + &state.db, + tip_height.into(), + ) + }, + ); + + // The work is done in the future. + timer.finish(module_path!(), line!(), "ReadRequest::TipBlockSize"); + + // Respond with the length of the obtained block if any. + match block { + Some(b) => Ok(ReadResponse::TipBlockSize(Some( + b.zcash_serialize_to_vec()?.len(), + ))), + None => Ok(ReadResponse::TipBlockSize(None)), + } + }) + }) + .wait_for_panics() + } } } } diff --git a/zebra-state/src/service/check.rs b/zebra-state/src/service/check.rs index b20ca0fd4c5..ced63bfea16 100644 --- a/zebra-state/src/service/check.rs +++ b/zebra-state/src/service/check.rs @@ -67,9 +67,16 @@ where .take(POW_ADJUSTMENT_BLOCK_SPAN) .collect(); - let parent_block = relevant_chain - .first() - .expect("state must contain parent block to do contextual validation"); + let Some(parent_block) = relevant_chain.first() else { + warn!( + ?semantically_verified, + ?finalized_tip_height, + "state must contain parent block to do contextual validation" + ); + + return Err(ValidateContextError::NotReadyToBeCommitted); + }; + let parent_block = parent_block.borrow(); let parent_height = parent_block .coinbase_height() diff --git a/zebra-state/src/service/finalized_state/zebra_db/block.rs b/zebra-state/src/service/finalized_state/zebra_db/block.rs index e10fd3b43b4..4dc3a801ef3 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/block.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/block.rs @@ -290,6 +290,7 @@ impl ZebraDb { /// /// - Propagates any errors from writing to the DB /// - Propagates any errors from updating history and note commitment trees + #[allow(clippy::unwrap_in_result)] pub(in super::super) fn write_block( &mut self, finalized: FinalizedBlock, diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index 1c873400730..2c1ce74992e 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -77,7 +77,7 @@ openapi-generator = [ "zebra-rpc", "syn", "quote", - "serde_yaml", + "serde_yml", "serde" ] @@ -121,7 +121,7 @@ zcash_protocol.workspace = true rand = "0.8.5" syn = { version = "2.0.72", features = ["full"], optional = true } quote = { version = "1.0.36", optional = true } -serde_yaml = { version = "0.9.34+deprecated", optional = true } +serde_yml = { version = "0.0.12", optional = true } serde = { version = "1.0.204", features = ["serde_derive"], optional = true } indexmap = "2.3.0" diff --git a/zebra-utils/README.md b/zebra-utils/README.md index 48a887e09dd..2422264ea4a 100644 --- a/zebra-utils/README.md +++ b/zebra-utils/README.md @@ -112,7 +112,7 @@ This program is commonly used as part of `zebrad-log-filter` where hashes will b The program is designed to filter the output from the zebra terminal or log file. Each time a hash is seen the script will capture it and get the additional information using `zebrad-hash-lookup`. -Assuming `zebrad`, `zclash-cli`, `zebrad-hash-lookup` and `zebrad-log-filter` are in your path the program can used as: +Assuming `zebrad`, `zcash-cli`, `zebrad-hash-lookup` and `zebrad-log-filter` are in your path the program can used as: ```sh $ zebrad -v start | zebrad-log-filter diff --git a/zebra-utils/src/bin/openapi-generator/main.rs b/zebra-utils/src/bin/openapi-generator/main.rs index 0935f6560ff..15e5446d855 100644 --- a/zebra-utils/src/bin/openapi-generator/main.rs +++ b/zebra-utils/src/bin/openapi-generator/main.rs @@ -174,9 +174,9 @@ fn main() -> Result<(), Box> { let all_methods = Methods { paths: methods }; // Add openapi header and write to file - let yaml_string = serde_yaml::to_string(&all_methods)?; + let yml_string = serde_yml::to_string(&all_methods)?; let mut w = File::create("openapi.yaml")?; - w.write_all(format!("{}{}", create_yaml(), yaml_string).as_bytes())?; + w.write_all(format!("{}{}", create_yaml(), yml_string).as_bytes())?; Ok(()) } @@ -543,6 +543,7 @@ fn get_default_properties(method_name: &str) -> Result default_property(type_, items.clone(), GetInfo::default())?, + "stop" => default_property(type_, items.clone(), ())?, // transaction "sendrawtransaction" => { default_property(type_, items.clone(), SentTransactionHash::default())? diff --git a/zebrad/src/components/mempool.rs b/zebrad/src/components/mempool.rs index 2d9b2b3e0c5..05732ddaac2 100644 --- a/zebrad/src/components/mempool.rs +++ b/zebrad/src/components/mempool.rs @@ -27,7 +27,7 @@ use std::{ }; use futures::{future::FutureExt, stream::Stream}; -use tokio::sync::broadcast; +use tokio::sync::{broadcast, oneshot}; use tokio_stream::StreamExt; use tower::{buffer::Buffer, timeout::Timeout, util::BoxService, Service}; @@ -560,7 +560,7 @@ impl Service for Mempool { for tx in tx_retries { // This is just an efficiency optimisation, so we don't care if queueing // transaction requests fails. - let _result = tx_downloads.download_if_needed_and_verify(tx); + let _result = tx_downloads.download_if_needed_and_verify(tx, None); } } @@ -608,8 +608,8 @@ impl Service for Mempool { tracing::trace!("chain grew during tx verification, retrying ..",); // We don't care if re-queueing the transaction request fails. - let _result = - tx_downloads.download_if_needed_and_verify(tx.transaction.into()); + let _result = tx_downloads + .download_if_needed_and_verify(tx.transaction.into(), None); } } Ok(Err((txid, error))) => { @@ -758,16 +758,24 @@ impl Service for Mempool { Request::Queue(gossiped_txs) => { trace!(req_count = ?gossiped_txs.len(), "got mempool Queue request"); - let rsp: Vec> = gossiped_txs - .into_iter() - .map(|gossiped_tx| -> Result<(), MempoolError> { - storage.should_download_or_verify(gossiped_tx.id())?; - tx_downloads.download_if_needed_and_verify(gossiped_tx)?; - - Ok(()) - }) - .map(|result| result.map_err(BoxError::from)) - .collect(); + let rsp: Vec>, BoxError>> = + gossiped_txs + .into_iter() + .map( + |gossiped_tx| -> Result< + oneshot::Receiver>, + MempoolError, + > { + let (rsp_tx, rsp_rx) = oneshot::channel(); + storage.should_download_or_verify(gossiped_tx.id())?; + tx_downloads + .download_if_needed_and_verify(gossiped_tx, Some(rsp_tx))?; + + Ok(rsp_rx) + }, + ) + .map(|result| result.map_err(BoxError::from)) + .collect(); // We've added transactions to the queue self.update_metrics(); diff --git a/zebrad/src/components/mempool/crawler/tests/prop.rs b/zebrad/src/components/mempool/crawler/tests/prop.rs index fa1e3ef5785..524d754cfdc 100644 --- a/zebrad/src/components/mempool/crawler/tests/prop.rs +++ b/zebrad/src/components/mempool/crawler/tests/prop.rs @@ -6,7 +6,7 @@ use proptest::{ collection::{hash_set, vec}, prelude::*, }; -use tokio::time; +use tokio::{sync::oneshot, time}; use zebra_chain::{ chain_sync_status::ChainSyncStatus, parameters::Network, transaction::UnminedTxId, @@ -317,9 +317,17 @@ async fn respond_to_queue_request( expected_transaction_ids: HashSet, response: impl IntoIterator>, ) -> Result<(), TestCaseError> { - let response = response + let response: Vec>, BoxError>> = response .into_iter() - .map(|result| result.map_err(BoxError::from)) + .map(|result| { + result + .map(|_| { + let (rsp_tx, rsp_rx) = oneshot::channel(); + let _ = rsp_tx.send(Ok(())); + rsp_rx + }) + .map_err(BoxError::from) + }) .collect(); mempool diff --git a/zebrad/src/components/mempool/downloads.rs b/zebrad/src/components/mempool/downloads.rs index d3f62b4087b..eeda6bd9567 100644 --- a/zebrad/src/components/mempool/downloads.rs +++ b/zebrad/src/components/mempool/downloads.rs @@ -51,7 +51,7 @@ use zebra_chain::{ use zebra_consensus::transaction as tx; use zebra_network as zn; use zebra_node_services::mempool::Gossip; -use zebra_state as zs; +use zebra_state::{self as zs, CloneError}; use crate::components::sync::{BLOCK_DOWNLOAD_TIMEOUT, BLOCK_VERIFY_TIMEOUT}; @@ -105,17 +105,17 @@ pub const MAX_INBOUND_CONCURRENCY: usize = 25; struct CancelDownloadAndVerify; /// Errors that can occur while downloading and verifying a transaction. -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] #[allow(dead_code)] pub enum TransactionDownloadVerifyError { #[error("transaction is already in state")] InState, #[error("error in state service")] - StateError(#[source] BoxError), + StateError(#[source] CloneError), #[error("error downloading transaction")] - DownloadFailed(#[source] BoxError), + DownloadFailed(#[source] CloneError), #[error("transaction download / verification was cancelled")] Cancelled, @@ -240,9 +240,11 @@ where /// /// Returns the action taken in response to the queue request. #[instrument(skip(self, gossiped_tx), fields(txid = %gossiped_tx.id()))] + #[allow(clippy::unwrap_in_result)] pub fn download_if_needed_and_verify( &mut self, gossiped_tx: Gossip, + rsp_tx: Option>>, ) -> Result<(), MempoolError> { let txid = gossiped_tx.id(); @@ -295,7 +297,7 @@ where Ok((Some(height), next_height)) } Ok(_) => unreachable!("wrong response"), - Err(e) => Err(TransactionDownloadVerifyError::StateError(e)), + Err(e) => Err(TransactionDownloadVerifyError::StateError(e.into())), }?; trace!(?txid, ?next_height, "got next height"); @@ -307,11 +309,12 @@ where let tx = match network .oneshot(req) .await + .map_err(CloneError::from) .map_err(TransactionDownloadVerifyError::DownloadFailed)? { zn::Response::Transactions(mut txs) => txs.pop().ok_or_else(|| { TransactionDownloadVerifyError::DownloadFailed( - "no transactions returned".into(), + BoxError::from("no transactions returned").into(), ) })?, _ => unreachable!("wrong response to transaction request"), @@ -373,7 +376,7 @@ where let task = tokio::spawn(async move { // Prefer the cancel handle if both are ready. - tokio::select! { + let result = tokio::select! { biased; _ = &mut cancel_rx => { trace!("task cancelled prior to completion"); @@ -381,7 +384,19 @@ where Err((TransactionDownloadVerifyError::Cancelled, txid)) } verification = fut => verification, + }; + + // Send the result to responder channel if one was provided. + if let Some(rsp_tx) = rsp_tx { + let _ = rsp_tx.send( + result + .as_ref() + .map(|_| ()) + .map_err(|(err, _)| err.clone().into()), + ); } + + result }); self.pending.push(task); @@ -458,6 +473,7 @@ where match state .ready() .await + .map_err(CloneError::from) .map_err(TransactionDownloadVerifyError::StateError)? .call(zs::Request::Transaction(txid.mined_id())) .await @@ -465,7 +481,7 @@ where Ok(zs::Response::Transaction(None)) => Ok(()), Ok(zs::Response::Transaction(Some(_))) => Err(TransactionDownloadVerifyError::InState), Ok(_) => unreachable!("wrong response"), - Err(e) => Err(TransactionDownloadVerifyError::StateError(e)), + Err(e) => Err(TransactionDownloadVerifyError::StateError(e.into())), }?; Ok(()) diff --git a/zebrad/src/components/mempool/tests/vector.rs b/zebrad/src/components/mempool/tests/vector.rs index 2868fef2e65..c285923fa7d 100644 --- a/zebrad/src/components/mempool/tests/vector.rs +++ b/zebrad/src/components/mempool/tests/vector.rs @@ -445,12 +445,17 @@ async fn mempool_cancel_mined() -> Result<(), Report> { .call(Request::Queue(vec![txid.into()])) .await .unwrap(); - let queued_responses = match response { + let mut queued_responses = match response { Response::Queued(queue_responses) => queue_responses, _ => unreachable!("will never happen in this test"), }; assert_eq!(queued_responses.len(), 1); - assert!(queued_responses[0].is_ok()); + + let queued_response = queued_responses + .pop() + .expect("already checked that there is exactly 1 item in Vec") + .expect("initial queue checks result should be Ok"); + assert_eq!(mempool.tx_downloads().in_flight(), 1); // Push block 2 to the state @@ -489,6 +494,14 @@ async fn mempool_cancel_mined() -> Result<(), Report> { // Check if download was cancelled. assert_eq!(mempool.tx_downloads().in_flight(), 0); + assert!( + queued_response + .await + .expect("channel should not be closed") + .is_err(), + "queued tx should fail to download and verify due to chain tip change" + ); + Ok(()) } diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index c21b0a0e3e3..cd3572ce3f2 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -29,9 +29,10 @@ //! - `FULL_SYNC_MAINNET_TIMEOUT_MINUTES` env variable: The total number of minutes we //! will allow this test to run or give up. Value for the Mainnet full sync tests. //! - `FULL_SYNC_TESTNET_TIMEOUT_MINUTES` env variable: The total number of minutes we -//! will allow this test to run or give up. Value for the Testnet ful sync tests. -//! - `/zebrad-cache` directory: For some sync tests, this needs to be created in -//! the file system, the created directory should have write permissions. +//! will allow this test to run or give up. Value for the Testnet full sync tests. +//! - `ZEBRA_CACHED_STATE_DIR` env variable: The path to a Zebra cached state directory. +//! If not set, it defaults to `/zebrad-cache`. For some sync tests, this directory needs to be +//! created in the file system with write permissions. //! //! Here are some examples on how to run each of the tests: //! @@ -40,13 +41,15 @@ //! //! $ cargo test sync_large_checkpoints_mempool_mainnet -- --ignored --nocapture //! -//! $ sudo mkdir /zebrad-cache -//! $ sudo chmod 777 /zebrad-cache +//! $ export ZEBRA_CACHED_STATE_DIR="/zebrad-cache" +//! $ sudo mkdir -p "$ZEBRA_CACHED_STATE_DIR" +//! $ sudo chmod 777 "$ZEBRA_CACHED_STATE_DIR" //! $ export FULL_SYNC_MAINNET_TIMEOUT_MINUTES=600 //! $ cargo test full_sync_mainnet -- --ignored --nocapture //! -//! $ sudo mkdir /zebrad-cache -//! $ sudo chmod 777 /zebrad-cache +//! $ export ZEBRA_CACHED_STATE_DIR="/zebrad-cache" +//! $ sudo mkdir -p "$ZEBRA_CACHED_STATE_DIR" +//! $ sudo chmod 777 "$ZEBRA_CACHED_STATE_DIR" //! $ export FULL_SYNC_TESTNET_TIMEOUT_MINUTES=600 //! $ cargo test full_sync_testnet -- --ignored --nocapture //! ``` @@ -67,9 +70,10 @@ //! at least the `ZEBRA_TEST_LIGHTWALLETD` environment variable is present: //! //! - `ZEBRA_TEST_LIGHTWALLETD` env variable: Needs to be present to run any of the lightwalletd tests. -//! - `ZEBRA_CACHED_STATE_DIR` env var: The path to a zebra blockchain database. -//! - `LIGHTWALLETD_DATA_DIR` env variable. The path to a lightwalletd database. -//! - `--features lightwalletd-grpc-tests` cargo flag. The flag given to cargo to build the source code of the running test. +//! - `ZEBRA_CACHED_STATE_DIR` env variable: The path to a Zebra cached state directory. +//! If not set, it defaults to `/zebrad-cache`. +//! - `LIGHTWALLETD_DATA_DIR` env variable: The path to a lightwalletd database. +//! - `--features lightwalletd-grpc-tests` cargo flag: The flag given to cargo to build the source code of the running test. //! //! Here are some examples of running each test: //! diff --git a/zebrad/tests/common/checkpoints.rs b/zebrad/tests/common/checkpoints.rs index 602525fd926..c1c0ae44716 100644 --- a/zebrad/tests/common/checkpoints.rs +++ b/zebrad/tests/common/checkpoints.rs @@ -136,7 +136,7 @@ pub async fn run(network: Network) -> Result<()> { ?zebra_rpc_address, "waiting for zebrad to open its RPC port...", ); - zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {zebra_rpc_address}"))?; + zebrad.expect_stdout_line_matches(format!("Opened RPC endpoint at {zebra_rpc_address}"))?; tracing::info!( ?network, diff --git a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs index 4ca0bc797ad..dd30954948c 100644 --- a/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs +++ b/zebrad/tests/common/get_block_template_rpcs/get_peer_info.rs @@ -34,7 +34,7 @@ pub(crate) async fn run() -> Result<()> { let rpc_address = zebra_rpc_address.expect("getpeerinfo test must have RPC port"); // Wait until port is open. - zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {rpc_address}"))?; + zebrad.expect_stdout_line_matches(format!("Opened RPC endpoint at {rpc_address}"))?; tracing::info!(?rpc_address, "zebrad opened its RPC port",); diff --git a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs index 28f48fb2c14..399efc8d99e 100644 --- a/zebrad/tests/common/get_block_template_rpcs/submit_block.rs +++ b/zebrad/tests/common/get_block_template_rpcs/submit_block.rs @@ -59,7 +59,7 @@ pub(crate) async fn run() -> Result<()> { ?rpc_address, "spawned isolated zebrad with shorter chain, waiting for zebrad to open its RPC port..." ); - zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {rpc_address}"))?; + zebrad.expect_stdout_line_matches(format!("Opened RPC endpoint at {rpc_address}"))?; tracing::info!(?rpc_address, "zebrad opened its RPC port",); diff --git a/zebrad/tests/common/lightwalletd/send_transaction_test.rs b/zebrad/tests/common/lightwalletd/send_transaction_test.rs index f9087771595..bee6cf78356 100644 --- a/zebrad/tests/common/lightwalletd/send_transaction_test.rs +++ b/zebrad/tests/common/lightwalletd/send_transaction_test.rs @@ -118,7 +118,7 @@ pub async fn run() -> Result<()> { ?zebra_rpc_address, "spawned isolated zebrad with shorter chain, waiting for zebrad to open its RPC port..." ); - zebrad.expect_stdout_line_matches(&format!("Opened RPC endpoint at {zebra_rpc_address}"))?; + zebrad.expect_stdout_line_matches(format!("Opened RPC endpoint at {zebra_rpc_address}"))?; tracing::info!( ?zebra_rpc_address, diff --git a/zebrad/tests/common/sync.rs b/zebrad/tests/common/sync.rs index ff5234c2b1e..bac394099f5 100644 --- a/zebrad/tests/common/sync.rs +++ b/zebrad/tests/common/sync.rs @@ -5,7 +5,7 @@ //! Test functions in this file will not be run. //! This file is only for test library code. -use std::{path::PathBuf, time::Duration}; +use std::{env, path::PathBuf, time::Duration}; use tempfile::TempDir; @@ -326,10 +326,20 @@ pub fn check_sync_logs_until( Ok(zebrad) } +/// Returns the cache directory for Zebra's state. +/// +/// It checks the `ZEBRA_CACHED_STATE_DIR` environment variable and returns its value if set. +/// Otherwise, it defaults to `"/zebrad-cache"`. +fn get_zebra_cached_state_dir() -> PathBuf { + env::var("ZEBRA_CACHED_STATE_DIR") + .unwrap_or_else(|_| "/zebrad-cache".to_string()) + .into() +} + /// Returns a test config for caching Zebra's state up to the mandatory checkpoint. pub fn cached_mandatory_checkpoint_test_config(network: &Network) -> Result { let mut config = persistent_test_config(network)?; - config.state.cache_dir = "/zebrad-cache".into(); + config.state.cache_dir = get_zebra_cached_state_dir(); // To get to the mandatory checkpoint, we need to sync lots of blocks. // (Most tests use a smaller limit to minimise redundant block downloads.) @@ -377,7 +387,7 @@ pub fn create_cached_database_height( config.state.debug_stop_at_height = Some(height.0); config.consensus.checkpoint_sync = checkpoint_sync; - let dir = PathBuf::from("/zebrad-cache"); + let dir = get_zebra_cached_state_dir(); let mut child = dir .with_exact_config(&config)? .spawn_child(args!["start"])?