diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml index 2b498be4d011..62185d25b4d5 100644 --- a/.github/workflows/test-go.yml +++ b/.github/workflows/test-go.yml @@ -1,497 +1,4 @@ -on: - workflow_call: - inputs: - go-arch: - description: The execution architecture (arm, amd64, etc.) - required: true - type: string - enterprise: - description: A flag indicating if this workflow is executing for the enterprise repository. - required: true - type: string - total-runners: - description: Number of runners to use for executing non-binary tests. - required: true - type: string - binary-tests: - description: Whether to run the binary tests. - required: false - default: false - type: boolean - env-vars: - description: A map of environment variables as JSON. - required: false - type: string - default: '{}' - extra-flags: - description: A space-separated list of additional build flags. - required: false - type: string - default: '' - runs-on: - description: An expression indicating which kind of runners to use. - required: false - type: string - default: ubuntu-latest - go-tags: - description: A comma-separated list of additional build tags to consider satisfied during the build. - required: false - type: string - name: - description: A suffix to append to archived test results - required: false - default: '' - type: string - go-test-parallelism: - description: The parallelism parameter for Go tests - required: false - default: 20 - type: number - timeout-minutes: - description: The maximum number of minutes that this workflow should run - required: false - default: 60 - type: number - testonly: - description: Whether to run the tests tagged with testonly. - required: false - default: false - type: boolean - test-timing-cache-enabled: - description: Cache the gotestsum test timing data. - required: false - default: true - type: boolean - test-timing-cache-key: - description: The cache key to use for gotestsum test timing data. - required: false - default: go-test-reports - type: string - checkout-ref: - description: The ref to use for checkout. - required: false - default: ${{ github.ref }} - type: string - -env: ${{ fromJSON(inputs.env-vars) }} - -jobs: - test-matrix: - permissions: - id-token: write # Note: this permission is explicitly required for Vault auth - contents: read - runs-on: ${{ fromJSON(inputs.runs-on) }} - steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - with: - ref: ${{ inputs.checkout-ref }} - - uses: ./.github/actions/set-up-go - with: - github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - name: Authenticate to Vault - id: vault-auth - if: github.repository == 'hashicorp/vault-enterprise' - run: vault-auth - - name: Fetch Secrets - id: secrets - if: github.repository == 'hashicorp/vault-enterprise' - uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; - kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; - kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; - kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; - kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS; - kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL; - kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID; - kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET; - kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID; - - id: setup-git-private - name: Setup Git configuration (private) - if: github.repository == 'hashicorp/vault-enterprise' - run: | - git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com - - id: setup-git-public - name: Setup Git configuration (public) - if: github.repository != 'hashicorp/vault-enterprise' - run: | - git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com - - uses: ./.github/actions/set-up-gotestsum - - run: mkdir -p test-results/go-test - - uses: actions/cache/restore@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - if: inputs.test-timing-cache-enabled - with: - path: test-results/go-test - key: ${{ inputs.test-timing-cache-key }}-${{ github.run_number }} - restore-keys: | - ${{ inputs.test-timing-cache-key }}- - go-test-reports- - - name: Sanitize timing files - id: sanitize-timing-files - run: | - # Prune invalid timing files - find test-results/go-test -mindepth 1 -type f -name "*.json" -exec sh -c ' - file="$1"; - jq . "$file" || rm "$file" - ' shell {} \; > /dev/null 2>&1 - - name: Build matrix excluding binary, integration, and testonly tests - id: build-non-binary - if: ${{ !inputs.testonly }} - env: - GOPRIVATE: github.com/hashicorp/* - run: | - # testonly tests need additional build tag though let's exclude them anyway for clarity - ( - go list ./... | grep -v "_binary" | grep -v "vault/integ" | grep -v "testonly" | gotestsum tool ci-matrix --debug \ - --partitions "${{ inputs.total-runners }}" \ - --timing-files 'test-results/go-test/*.json' > matrix.json - ) - - name: Build matrix for tests tagged with testonly - if: ${{ inputs.testonly }} - env: - GOPRIVATE: github.com/hashicorp/* - run: | - set -exo pipefail - # enable glob expansion - shopt -s nullglob - # testonly tagged tests need an additional tag to be included - # also running some extra tests for sanity checking with the testonly build tag - ( - go list -tags=testonly ./vault/external_tests/{kv,token,*replication-perf*,*testonly*} ./vault/ | gotestsum tool ci-matrix --debug \ - --partitions "${{ inputs.total-runners }}" \ - --timing-files 'test-results/go-test/*.json' > matrix.json - ) - # disable glob expansion - shopt -u nullglob - - name: Capture list of binary tests - if: inputs.binary-tests - id: list-binary-tests - run: | - LIST="$(go list ./... | grep "_binary" | xargs)" - echo "list=$LIST" >> "$GITHUB_OUTPUT" - - name: Build complete matrix - id: build - run: | - set -exo pipefail - matrix_file="matrix.json" - if [ "${{ inputs.binary-tests}}" == "true" ] && [ -n "${{ steps.list-binary-tests.outputs.list }}" ]; then - export BINARY_TESTS="${{ steps.list-binary-tests.outputs.list }}" - jq --arg BINARY "${BINARY_TESTS}" --arg BINARY_INDEX "${{ inputs.total-runners }}" \ - '.include += [{ - "id": $BINARY_INDEX, - "estimatedRuntime": "N/A", - "packages": $BINARY, - "description": "partition $BINARY_INDEX - binary test packages" - }]' matrix.json > new-matrix.json - matrix_file="new-matrix.json" - fi - # convert the json to a map keyed by id - ( - echo -n "matrix=" - jq -c \ - '.include | map( { (.id|tostring): . } ) | add' "$matrix_file" - ) >> "$GITHUB_OUTPUT" - # extract an array of ids from the json - ( - echo -n "matrix_ids=" - jq -c \ - '[ .include[].id | tostring ]' "$matrix_file" - ) >> "$GITHUB_OUTPUT" - outputs: - matrix: ${{ steps.build.outputs.matrix }} - matrix_ids: ${{ steps.build.outputs.matrix_ids }} - - test-go: - needs: test-matrix - permissions: - actions: read - contents: read - id-token: write # Note: this permission is explicitly required for Vault auth - runs-on: ${{ fromJSON(inputs.runs-on) }} - strategy: - fail-fast: false - matrix: - id: ${{ fromJSON(needs.test-matrix.outputs.matrix_ids) }} - env: - GOPRIVATE: github.com/hashicorp/* - TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }} - steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - with: - ref: ${{ inputs.checkout-ref }} - - uses: ./.github/actions/set-up-go - with: - github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - - name: Authenticate to Vault - id: vault-auth - if: github.repository == 'hashicorp/vault-enterprise' - run: vault-auth - - name: Fetch Secrets - id: secrets - if: github.repository == 'hashicorp/vault-enterprise' - uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e - with: - url: ${{ steps.vault-auth.outputs.addr }} - caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} - token: ${{ steps.vault-auth.outputs.token }} - secrets: | - kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; - kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; - kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; - kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; - kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS; - kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL; - kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID; - kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET; - kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID; - - id: setup-git-private - name: Setup Git configuration (private) - if: github.repository == 'hashicorp/vault-enterprise' - run: | - git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com - - id: setup-git-public - name: Setup Git configuration (public) - if: github.repository != 'hashicorp/vault-enterprise' - run: | - git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com - - id: build - if: inputs.binary-tests && matrix.id == inputs.total-runners - env: - GOPRIVATE: github.com/hashicorp/* - run: time make ci-bootstrap dev - - uses: ./.github/actions/set-up-gotestsum - - name: Install gVisor - # Enterprise repo runners do not allow sudo, so can't install gVisor there yet. - if: ${{ !inputs.enterprise }} - run: | - ( - set -e - ARCH="$(uname -m)" - URL="https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}" - wget --quiet "${URL}/runsc" "${URL}/runsc.sha512" \ - "${URL}/containerd-shim-runsc-v1" "${URL}/containerd-shim-runsc-v1.sha512" - sha512sum -c runsc.sha512 \ - -c containerd-shim-runsc-v1.sha512 - rm -f -- *.sha512 - chmod a+rx runsc containerd-shim-runsc-v1 - sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin - ) - sudo tee /etc/docker/daemon.json < job.name.startsWith(prefixToSearchFor) - ); - const url = jobData[0].html_url; - const envVarName = "GH_JOB_URL"; - const envVar = envVarName + "=" + url; - const envFile = process.env.GITHUB_ENV; - - fs.appendFile(envFile, envVar, (err) => { - if (err) throw err; - console.log("Successfully set " + envVarName + " to: " + url); - }); - } catch (error) { - console.log("Error: " + error); - return - } - - name: Prepare failure summary - if: success() || failure() - continue-on-error: true - run: | - # This jq query filters out successful tests, leaving only the failures. - # Then, it formats the results into rows of a Markdown table. - # An example row will resemble this: - # | github.com/hashicorp/vault/package | TestName | fips | 0 | 2 | [view results](github.com/link-to-logs) | - jq -r -n 'inputs - | select(.Action == "fail") - | "| ${{inputs.name}} | \(.Package) | \(.Test // "-") | \(.Elapsed) | ${{ matrix.id }} | [view test results :scroll:](${{ env.GH_JOB_URL }}) |"' \ - failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.json \ - >> failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.md - - name: Upload failure summary - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2 - if: success() || failure() - with: - name: failure-summary - path: failure-summary-${{ matrix.id }}${{ inputs.name != '' && '-' || '' }}${{inputs.name}}.md - - test-collect-reports: - if: ${{ ! cancelled() && needs.test-go.result == 'success' && inputs.test-timing-cache-enabled }} - needs: test-go - runs-on: ${{ fromJSON(inputs.runs-on) }} - steps: - - uses: actions/cache@88522ab9f39a2ea568f7027eddc7d8d8bc9d59c8 # v3.3.1 - with: - path: test-results/go-test - key: ${{ inputs.test-timing-cache-key }}-${{ github.run_number }} - restore-keys: | - ${{ inputs.test-timing-cache-key }}- - go-test-reports- - - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 - with: - name: test-results - path: test-results/go-test - - run: | - rm -rf test-results/go-test/logs - ls -lhR test-results/go-test - find test-results/go-test -mindepth 1 -mtime +3 -delete - - # Prune invalid timing files - find test-results/go-test -mindepth 1 -type f -name "*.json" -exec sh -c ' - file="$1"; - jq . "$file" || rm "$file" - ' shell {} \; > /dev/null 2>&1 - - ls -lhR test-results/go-test +```release-note:feature +**Reload seal configuration on SIGHUP**: Seal configuration is reloaded on SIGHUP so that seal configuration can +be changed without shutting down vault +``` diff --git a/.hooks/pre-commit b/.hooks/pre-commit index 0b4d6e0c94fa..aa72bc977912 100755 --- a/.hooks/pre-commit +++ b/.hooks/pre-commit @@ -1,151 +1,3 @@ -#!/usr/bin/env bash - -# READ THIS BEFORE MAKING CHANGES: -# -# If you want to add a new pre-commit check, here are the rules: -# -# 1. Create a bash function for your check (see e.g. ui_lint below). -# NOTE: Each function will be called in a sub-shell so you can freely -# change directory without worrying about interference. -# 2. Add the name of the function to the CHECKS variable. -# 3. If no changes relevant to your new check are staged, then -# do not output anything at all - this would be annoying noise. -# In this case, call 'return 0' from your check function to return -# early without blocking the commit. -# 4. If any non-trivial check-specific thing has to be invoked, -# then output '==> [check description]' as the first line of -# output. Each sub-check should output '--> [subcheck description]' -# after it has run, indicating success or failure. -# 5. Call 'block [reason]' to block the commit. This ensures the last -# line of output calls out that the commit was blocked - which may not -# be obvious from random error messages generated in 4. -# -# At the moment, there are no automated tests for this hook, so please run it -# locally to check you have not broken anything - breaking this will interfere -# with other peoples' workflows significantly, so be sure, check everything twice. - -set -euo pipefail - -# Call block to block the commit with a message. -block() { - echo "$@" - echo "Commit blocked - see errors above." - exit 1 -} - -# Add all check functions to this space separated list. -# They are executed in this order (see end of file). -CHECKS="ui_lint ui_copywrite backend_lint" - -# Run ui linter if changes in that dir detected. -ui_lint() { - local DIR=ui LINTER=node_modules/.bin/lint-staged - - # Silently succeed if no changes staged for $DIR - if git diff --name-only --cached --exit-code -- $DIR/; then - return 0 - fi - - # Silently succeed if the linter has not been installed. - # We assume that if you're doing UI dev, you will have installed the linter - # by running yarn. - if [ ! -x $DIR/$LINTER ]; then - return 0 - fi - - echo "==> Changes detected in $DIR/: Running linter..." - - # Run the linter from the UI dir. - cd $DIR - $LINTER || block "UI lint failed" -} - -backend_lint() { - # Silently succeed if no changes staged for Go code files. - staged=$(git diff --name-only --cached --exit-code -- '*.go') - ret=$? - if [ $ret -eq 0 ]; then - return 0 - fi - - # Only run fmtcheck on staged files - ./scripts/gofmtcheck.sh "${staged}" || block "Backend linting failed; run 'make fmt' to fix." -} - -ui_copywrite() { - DIR=ui - BINARY_DIR=$DIR/.copywrite - DOWNLOAD_ERR="==> Copywrite tool not found and failed to downloaded. Please download manually and extract to ui/.copywrite directory to utilize in pre-commit hook." - - # silently succeed if no changes staged for $DIR - if git diff --name-only --cached --exit-code -- $DIR/; then - return 0 - fi - - echo "==> Changes detected in $DIR/: Checking copyright headers..." - - # download latest version of hashicorp/copywrite if necessary - if [ ! -x $BINARY_DIR/copywrite ]; then - local REPO_URL=https://github.com/hashicorp/copywrite - # get the latest version tag - local LATEST_RELEASE_JSON=$(curl -L -s -H 'Accept: application/json' $REPO_URL/releases/latest); - local LATEST_TAG=$(echo $LATEST_RELEASE_JSON | sed -e 's/.*"tag_name":"\([^"]*\)".*/\1/') - - if [ ! $LATEST_TAG ]; then - echo $DOWNLOAD_ERR - return 0; - fi - - # get the OS/Architecture specifics to build the filename - # eg. copywrite_0.16.6_darwin_x86_64.tar.gz - case "$OSTYPE" in - linux*) OS='linux' ;; - darwin*) OS='darwin' ;; - msys*) OS='windows';; - esac - local ARCH=$([ $(uname -m) == arm* ] && echo 'arm64' || echo 'x86_64') - local EXT=$([ $OSTYPE == "msys" ] && echo '.zip' || echo '.tar.gz') - local FILENAME=copywrite_"${LATEST_TAG:1}"_"$OS"_"$ARCH""$EXT" - - mkdir -p $BINARY_DIR - echo "==> Copywrite tool not found, downloading version $LATEST_TAG from $REPO_URL..." - curl -L -s $REPO_URL/releases/download/$LATEST_TAG/$FILENAME | tar -xz - -C $BINARY_DIR || { echo $DOWNLOAD_ERR; return 0; }; - fi - - # run the copywrite tool - # if a --path option is added we could apply the headers to only the staged files much easier - # as of the latest version 0.16.6 there is only support for --dirPath - STAGED_FILES=($(git diff --name-only --cached)) - - rm -rf $BINARY_DIR/.staged - mkdir $BINARY_DIR/.staged - - # copy staged files to .staged directory - echo $STAGED_FILES; - for FILE_PATH in "${STAGED_FILES[@]}"; do - cp $FILE_PATH $BINARY_DIR/.staged - done - - COPYWRITE_LOG_LEVEL=info - COPY_CMD="$BINARY_DIR/copywrite headers -d $BINARY_DIR/.staged --config $DIR/.copywrite.hcl" - - # if staged files are missing header run the tool on .staged directory - VALIDATE=$(eval $COPY_CMD --plan) # assigning to var so output is suppressed since it is repeated during second run - if [ $(echo $?) == 1 ]; then - eval $COPY_CMD || { echo "==> Copyright check failed. Please review and add headers manually."; return 0; }; - - # copy files back to original locations and stage changes - local TMP_FILES=$(ls $BINARY_DIR/.staged) - i=0 - for FILE in $TMP_FILES; do - cp $BINARY_DIR/.staged/$FILE "${STAGED_FILES[$i]}" - git add "${STAGED_FILES[$i]}" - i=$(( i + 1 )) - done - fi -} - -for CHECK in $CHECKS; do - # Force each check into a subshell to avoid crosstalk. - ( $CHECK ) || exit $? -done +```release-note:improvement +storage/raft: Upgrade to bbolt 1.3.8, along with an extra patch to reduce time scanning large freelist maps. +``` diff --git a/CHANGELOG.md b/CHANGELOG.md index cc17febcfa35..bc33a184f988 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3292 +1,3 @@ -## Previous versions -- [v1.0.0 - v1.9.10](CHANGELOG-pre-v1.10.md) -- [v0.11.6 and earlier](CHANGELOG-v0.md) - -## 1.15.2 -### November 09, 2023 - -SECURITY: -* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] - -CHANGES: - -* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] -* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] - -FEATURES: - -* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] - -IMPROVEMENTS: - -* api (enterprise): Enable the sys/license/features from any namespace -* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] -* ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. [[GH-23700](https://github.com/hashicorp/vault/pull/23700)] -* ui: Update sidebar Secrets engine to title case. [[GH-23964](https://github.com/hashicorp/vault/pull/23964)] - -BUG FIXES: - -* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured -on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] -* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. -* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] -* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] -* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] -* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] -* core: fix bug where deadlock detection was always on for expiration and quotas. -These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] -* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] -* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] -* ui: fix broken GUI when accessing from listener with chroot_namespace defined [[GH-23942](https://github.com/hashicorp/vault/pull/23942)] - -## 1.15.1 -### October 25, 2023 - -CHANGES: - -* core: Bump Go version to 1.21.3. - -IMPROVEMENTS: - -* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] -* auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too. [[GH-22996](https://github.com/hashicorp/vault/pull/22996)] -* secrets-sync (enterprise): Added telemetry on number of destinations and associations per type. -* ui: Adds a warning when whitespace is detected in a key of a KV secret [[GH-23702](https://github.com/hashicorp/vault/pull/23702)] -* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] -* ui: Surface warning banner if UI has stopped auto-refreshing token [[GH-23143](https://github.com/hashicorp/vault/pull/23143)] -* ui: show banner when resultant-acl check fails due to permissions or wrong namespace. [[GH-23503](https://github.com/hashicorp/vault/pull/23503)] - -BUG FIXES: - -* Seal HA (enterprise/beta): Fix rejection of a seal configuration change -from two to one auto seal due to persistence of the previous seal type being -"multiseal". [[GH-23573](https://github.com/hashicorp/vault/pull/23573)] -* audit: Fix bug reopening 'file' audit devices on SIGHUP. [[GH-23598](https://github.com/hashicorp/vault/pull/23598)] -* auth/aws: Fixes a panic that can occur in IAM-based login when a [client config](https://developer.hashicorp.com/vault/api-docs/auth/aws#configure-client) does not exist. [[GH-23555](https://github.com/hashicorp/vault/pull/23555)] -* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] -* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] -* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] -* kmip (enterprise): Improve handling of failures due to storage replication issues. -* kmip (enterprise): Return a structure in the response for query function Query Server Information. -* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] -* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. -* replication (enterprise): Fix a missing unlock when changing replication state -* secrets-sync (enterprise): Fixed issue where we could sync a deleted secret -* secrets/aws: update credential rotation deadline when static role rotation period is updated [[GH-23528](https://github.com/hashicorp/vault/pull/23528)] -* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] -* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] -* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key -* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. -* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations -* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] -* storage/consul: fix a bug where an active node in a specific sort of network -partition could continue to write data to Consul after a new leader is elected -potentially causing data loss or corruption for keys with many concurrent -writers. For Enterprise clusters this could cause corruption of the merkle trees -leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] -* ui: Assumes version 1 for kv engines when options are null because no version is specified [[GH-23585](https://github.com/hashicorp/vault/pull/23585)] -* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] -* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] -* ui: Fix bug where auth items were not listed when within a namespace. [[GH-23446](https://github.com/hashicorp/vault/pull/23446)] -* ui: Fix regression that broke the oktaNumberChallenge on the ui. [[GH-23565](https://github.com/hashicorp/vault/pull/23565)] -* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] -* ui: Fixes issue where you could not share the list view URL from the KV v2 secrets engine. [[GH-23620](https://github.com/hashicorp/vault/pull/23620)] -* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] -* ui: Fixes issues displaying accurate TLS state in dashboard configuration details [[GH-23726](https://github.com/hashicorp/vault/pull/23726)] - -## 1.15.0 -### September 27, 2023 - -SECURITY: - -* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] -* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8.[[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] - -CHANGES: - -* auth/alicloud: Update plugin to v0.16.0 [[GH-22646](https://github.com/hashicorp/vault/pull/22646)] -* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)] -* auth/azure: Update plugin to v0.16.1 [[GH-22795](https://github.com/hashicorp/vault/pull/22795)] -* auth/azure: Update plugin to v0.16.2 [[GH-23060](https://github.com/hashicorp/vault/pull/23060)] -* auth/cf: Update plugin to v0.15.1 [[GH-22758](https://github.com/hashicorp/vault/pull/22758)] -* auth/gcp: Update plugin to v0.16.1 [[GH-22612](https://github.com/hashicorp/vault/pull/22612)] -* auth/jwt: Update plugin to v0.17.0 [[GH-22678](https://github.com/hashicorp/vault/pull/22678)] -* auth/kerberos: Update plugin to v0.10.1 [[GH-22797](https://github.com/hashicorp/vault/pull/22797)] -* auth/kubernetes: Update plugin to v0.17.0 [[GH-22709](https://github.com/hashicorp/vault/pull/22709)] -* auth/kubernetes: Update plugin to v0.17.1 [[GH-22879](https://github.com/hashicorp/vault/pull/22879)] -* auth/ldap: Normalize HTTP response codes when invalid credentials are provided [[GH-21282](https://github.com/hashicorp/vault/pull/21282)] -* auth/oci: Update plugin to v0.14.2 [[GH-22805](https://github.com/hashicorp/vault/pull/22805)] -* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy -* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), -which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] -* core: Bump Go version to 1.21.1. -* database/couchbase: Update plugin to v0.9.3 [[GH-22854](https://github.com/hashicorp/vault/pull/22854)] -* database/couchbase: Update plugin to v0.9.4 [[GH-22871](https://github.com/hashicorp/vault/pull/22871)] -* database/elasticsearch: Update plugin to v0.13.3 [[GH-22696](https://github.com/hashicorp/vault/pull/22696)] -* database/mongodbatlas: Update plugin to v0.10.1 [[GH-22655](https://github.com/hashicorp/vault/pull/22655)] -* database/redis-elasticache: Update plugin to v0.2.2 [[GH-22584](https://github.com/hashicorp/vault/pull/22584)] -* database/redis-elasticache: Update plugin to v0.2.3 [[GH-22598](https://github.com/hashicorp/vault/pull/22598)] -* database/redis: Update plugin to v0.2.2 [[GH-22654](https://github.com/hashicorp/vault/pull/22654)] -* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)] -* events: Log level for processing an event dropped from info to debug. [[GH-22997](https://github.com/hashicorp/vault/pull/22997)] -* events: `data_path` will include full data path of secret, including name. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)] -* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host -* sdk/logical/events: `EventSender` interface method is now `SendEvent` instead of `Send`. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)] -* secrets/ad: Update plugin to v0.16.1 [[GH-22856](https://github.com/hashicorp/vault/pull/22856)] -* secrets/alicloud: Update plugin to v0.15.1 [[GH-22533](https://github.com/hashicorp/vault/pull/22533)] -* secrets/azure: Update plugin to v0.16.2 [[GH-22799](https://github.com/hashicorp/vault/pull/22799)] -* secrets/azure: Update plugin to v0.16.3 [[GH-22824](https://github.com/hashicorp/vault/pull/22824)] -* secrets/gcp: Update plugin to v0.17.0 [[GH-22746](https://github.com/hashicorp/vault/pull/22746)] -* secrets/gcpkms: Update plugin to v0.15.1 [[GH-22757](https://github.com/hashicorp/vault/pull/22757)] -* secrets/keymgmt: Update plugin to v0.9.3 -* secrets/kubernetes: Update plugin to v0.6.0 [[GH-22823](https://github.com/hashicorp/vault/pull/22823)] -* secrets/kv: Update plugin to v0.16.1 [[GH-22716](https://github.com/hashicorp/vault/pull/22716)] -* secrets/mongodbatlas: Update plugin to v0.10.1 [[GH-22748](https://github.com/hashicorp/vault/pull/22748)] -* secrets/openldap: Update plugin to v0.11.2 [[GH-22734](https://github.com/hashicorp/vault/pull/22734)] -* secrets/terraform: Update plugin to v0.7.3 [[GH-22907](https://github.com/hashicorp/vault/pull/22907)] -* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. -* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] -* telemetry: Replace `vault.rollback.attempt.{MOUNT_POINT}` and `vault.route.rollback.{MOUNT_POINT}` metrics with `vault.rollback.attempt` and `vault.route.rollback metrics` by default. Added a telemetry configuration `add_mount_point_rollback_metrics` which, when set to true, causes vault to emit the metrics with mount points in their names. [[GH-22400](https://github.com/hashicorp/vault/pull/22400)] - -FEATURES: - -* **Certificate Issuance External Policy Service (CIEPS) (enterprise)**: Allow highly-customizable operator control of certificate validation and generation through the PKI Secrets Engine. -* **Copyable KV v2 paths in UI**: KV v2 secret paths are copyable for use in CLI commands or API calls [[GH-22551](https://github.com/hashicorp/vault/pull/22551)] -* **Dashboard UI**: Dashboard is now available in the UI as the new landing page. [[GH-21057](https://github.com/hashicorp/vault/pull/21057)] -* **Database Static Role Advanced TTL Management**: Adds the ability to rotate -* **Event System**: Add subscribe capability and subscribe_event_types to policies for events. [[GH-22474](https://github.com/hashicorp/vault/pull/22474)] -static roles on a defined schedule. [[GH-22484](https://github.com/hashicorp/vault/pull/22484)] -* **GCP IAM Support**: Adds support for IAM-based authentication to MySQL and PostgreSQL backends using Google Cloud SQL. [[GH-22445](https://github.com/hashicorp/vault/pull/22445)] -* **Improved KV V2 UI**: Updated and restructured secret engine for KV (version 2 only) [[GH-22559](https://github.com/hashicorp/vault/pull/22559)] -* **Merkle Tree Corruption Detection (enterprise)**: Add a new endpoint to check merkle tree corruption. -* **Plugin Containers**: Vault supports registering, managing, and running plugins inside a container on Linux. [[GH-22712](https://github.com/hashicorp/vault/pull/22712)] -* **SAML Auth Method (enterprise)**: Enable users to authenticate with Vault using their identity in a SAML Identity Provider. -* **Seal High Availability Beta (enterprise)**: operators can try out configuring more than one automatic seal for resilience against seal provider outages. Not for production use at this time. -* **Secrets Sync (enterprise)**: Add the ability to synchronize KVv2 secret with external secrets manager solutions. -* **UI LDAP secrets engine**: Add LDAP secrets engine to the UI. [[GH-20790](https://github.com/hashicorp/vault/pull/20790)] - -IMPROVEMENTS: - -* Bump github.com/hashicorp/go-plugin version v1.4.9 -> v1.4.10 [[GH-20966](https://github.com/hashicorp/vault/pull/20966)] -* api: add support for cloning a Client's tls.Config. [[GH-21424](https://github.com/hashicorp/vault/pull/21424)] -* api: adding a new api sys method for replication status [[GH-20995](https://github.com/hashicorp/vault/pull/20995)] -* audit: add core audit events experiment [[GH-21628](https://github.com/hashicorp/vault/pull/21628)] -* auth/aws: Added support for signed GET requests for authenticating to vault using the aws iam method. [[GH-10961](https://github.com/hashicorp/vault/pull/10961)] -* auth/azure: Add support for azure workload identity authentication (see issue -#18257). Update go-kms-wrapping dependency to include [PR -#155](https://github.com/hashicorp/go-kms-wrapping/pull/155) [[GH-22994](https://github.com/hashicorp/vault/pull/22994)] -* auth/azure: Added Azure API configurable retry options [[GH-23059](https://github.com/hashicorp/vault/pull/23059)] -* auth/cert: Adds support for requiring hexadecimal-encoded non-string certificate extension values [[GH-21830](https://github.com/hashicorp/vault/pull/21830)] -* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] -* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)] -* auto-auth: added support for LDAP auto-auth [[GH-21641](https://github.com/hashicorp/vault/pull/21641)] -* aws/auth: Adds a new config field `use_sts_region_from_client` which allows for using dynamic regional sts endpoints based on Authorization header when using IAM-based authentication. [[GH-21960](https://github.com/hashicorp/vault/pull/21960)] -* command/server: add `-dev-tls-san` flag to configure subject alternative names for the certificate generated when using `-dev-tls`. [[GH-22657](https://github.com/hashicorp/vault/pull/22657)] -* core (ent) : Add field that allows lease-count namespace quotas to be inherited by child namespaces. -* core : Add field that allows rate-limit namespace quotas to be inherited by child namespaces. [[GH-22452](https://github.com/hashicorp/vault/pull/22452)] -* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. -* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] -* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)] -* core: Fix OpenAPI representation and `-output-policy` recognition of some non-standard sudo paths [[GH-21772](https://github.com/hashicorp/vault/pull/21772)] -* core: Fix regexes for `sys/raw/` and `sys/leases/lookup/` to match prevailing conventions [[GH-21760](https://github.com/hashicorp/vault/pull/21760)] -* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] -* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] -* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy [[GH-22304](https://github.com/hashicorp/vault/pull/22304)] -* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy -* core: remove unnecessary *BarrierView field from backendEntry struct [[GH-20933](https://github.com/hashicorp/vault/pull/20933)] -* core: use Go stdlib functionalities instead of explicit byte/string conversions [[GH-21854](https://github.com/hashicorp/vault/pull/21854)] -* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)] -* events: Allow subscriptions to multiple namespaces [[GH-22540](https://github.com/hashicorp/vault/pull/22540)] -* events: Enabled by default [[GH-22815](https://github.com/hashicorp/vault/pull/22815)] -* events: WebSocket subscriptions add support for boolean filter expressions [[GH-22835](https://github.com/hashicorp/vault/pull/22835)] -* framework: Make it an error for `CreateOperation` to be defined without an `ExistenceCheck`, thereby fixing misleading `x-vault-createSupported` in OpenAPI [[GH-18492](https://github.com/hashicorp/vault/pull/18492)] -* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)] -* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)] -* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] -* openapi: Fix generation of correct fields in some rarer cases [[GH-21942](https://github.com/hashicorp/vault/pull/21942)] -* openapi: Fix response definitions for list operations [[GH-21934](https://github.com/hashicorp/vault/pull/21934)] -* openapi: List operations are now given first-class representation in the OpenAPI document, rather than sometimes being overlaid with a read operation at the same path [[GH-21723](https://github.com/hashicorp/vault/pull/21723)] -* plugins: Containerized plugins can be configured to still work when running with systemd's PrivateTmp=true setting. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)] -* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary -* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. -* sdk/framework: Adds replication state helper for backends to check for read-only storage [[GH-21743](https://github.com/hashicorp/vault/pull/21743)] -* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] -* secrets/db: Remove the `service_account_json` parameter when reading DB connection details [[GH-23256](https://github.com/hashicorp/vault/pull/23256)] -* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)] -* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling -* secrets/transit: Add support to create CSRs from keys in transit engine and import/export x509 certificates [[GH-21081](https://github.com/hashicorp/vault/pull/21081)] -* storage/dynamodb: Added three permit pool metrics for the DynamoDB backend, `pending_permits`, `active_permits`, and `pool_size`. [[GH-21742](https://github.com/hashicorp/vault/pull/21742)] -* storage/etcd: Make etcd parameter MaxCallSendMsgSize configurable [[GH-12666](https://github.com/hashicorp/vault/pull/12666)] -* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] -* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] -* ui: Add API Explorer link to Sidebar, under Tools. [[GH-21578](https://github.com/hashicorp/vault/pull/21578)] -* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)] -* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] -* ui: Adds mount configuration details to Kubernetes secrets engine configuration view [[GH-22926](https://github.com/hashicorp/vault/pull/22926)] -* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)] -* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)] -* ui: Display minus icon for empty MaskedInput value. Show MaskedInput for KV secrets without values [[GH-22039](https://github.com/hashicorp/vault/pull/22039)] -* ui: JSON diff view available in "Create New Version" form for KV v2 [[GH-22593](https://github.com/hashicorp/vault/pull/22593)] -* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] -* ui: Move access to KV V2 version diff view to toolbar in Version History [[GH-23200](https://github.com/hashicorp/vault/pull/23200)] -* ui: Update pki mount configuration details to match the new mount configuration details pattern [[GH-23166](https://github.com/hashicorp/vault/pull/23166)] -* ui: add example modal to policy form [[GH-21583](https://github.com/hashicorp/vault/pull/21583)] -* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)] -* ui: display CertificateCard instead of MaskedInput for certificates in PKI [[GH-22160](https://github.com/hashicorp/vault/pull/22160)] -* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] -* ui: implement hashicorp design system [alert](https://helios.hashicorp.design/components/alert) component [[GH-21375](https://github.com/hashicorp/vault/pull/21375)] -* ui: update detail views that render ttl durations to display full unit instead of letter (i.e. 'days' instead of 'd') [[GH-20697](https://github.com/hashicorp/vault/pull/20697)] -* ui: update unseal and DR operation token flow components [[GH-21871](https://github.com/hashicorp/vault/pull/21871)] -* ui: upgrade Ember to 4.12 [[GH-22122](https://github.com/hashicorp/vault/pull/22122)] - -DEPRECATIONS: - -* auth/centrify: Centrify plugin is deprecated as of 1.15, slated for removal in 1.17 [[GH-23050](https://github.com/hashicorp/vault/pull/23050)] - -BUG FIXES: - -* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] -* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)] -* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)] -* api/client: Fix deadlock in client.CloneWithHeaders when used alongside other client methods. [[GH-22410](https://github.com/hashicorp/vault/pull/22410)] -* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] -* audit: Prevent panic due to nil pointer receiver for audit header formatting. [[GH-22694](https://github.com/hashicorp/vault/pull/22694)] -* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)] -* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)] -* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)] -* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer -respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)] -* cli: Avoid printing "Success" message when `-field` flag is provided during a `vault write`. [[GH-21546](https://github.com/hashicorp/vault/pull/21546)] -* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] -* core (enterprise): Fix sentinel policy check logic so that sentinel -policies are not used when Sentinel feature isn't licensed. -* core (enterprise): Remove MFA Configuration for namespace when deleting namespace -* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC. -* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)] -* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. -Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] -* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] -* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] -* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] -* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] -* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] -* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] -* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] -* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)] -* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] -* core: fix race when updating a mount's route entry tainted status and incoming requests [[GH-21640](https://github.com/hashicorp/vault/pull/21640)] -* events: Ensure subscription resources are cleaned up on close. [[GH-23042](https://github.com/hashicorp/vault/pull/23042)] -* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] -* identity/mfa: Fixes to OpenAPI representation and returned error codes for `identity/mfa/method/*` APIs [[GH-20879](https://github.com/hashicorp/vault/pull/20879)] -* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] -* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] -* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)] -* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)] -* plugins: Containerized plugins can be run with mlock enabled. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)] -* plugins: Fix instance where Vault could fail to kill broken/unresponsive plugins. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)] -* plugins: Fix instance where broken/unresponsive plugins could cause Vault to hang. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)] -* plugins: Runtime catalog returns 404 instead of 500 when reading a runtime that does not exist [[GH-23171](https://github.com/hashicorp/vault/pull/23171)] -* plugins: `vault plugin runtime list` can successfully list plugin runtimes with GET [[GH-23171](https://github.com/hashicorp/vault/pull/23171)] -* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] -* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath -* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable -* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs -* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. -* replication (enterprise): Sort cluster addresses returned by echo requests, so that primary-addrs only gets persisted when the -set of addrs changes. -* replication (enterprise): update primary cluster address after DR failover -* sdk/ldaputil: Properly escape user filters when using UPN domains -sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] -* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)] -* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)] -* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] -* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] -* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] -* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] -* secrets/pki: allowed_domains are now compared in a case-insensitive manner if they use glob patterns [[GH-22126](https://github.com/hashicorp/vault/pull/22126)] -* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element -* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present -* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node -* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required -* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute -* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)] -* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] -* storage/consul: Consul service registration tags are now case-sensitive. [[GH-6483](https://github.com/hashicorp/vault/pull/6483)] -* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] -* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] -* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)] -* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] -* ui: Fix display for "Last Vault Rotation" timestamp for static database roles which was not rendering or copyable [[GH-22519](https://github.com/hashicorp/vault/pull/22519)] -* ui: Fix styling for username input when editing a user [[GH-21771](https://github.com/hashicorp/vault/pull/21771)] -* ui: Fix styling for viewing certificate in kubernetes configuration [[GH-21968](https://github.com/hashicorp/vault/pull/21968)] -* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)] -* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] -* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)] -* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)] -* ui: Fixes form field label tooltip alignment [[GH-22832](https://github.com/hashicorp/vault/pull/22832)] -* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)] -* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)] -* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)] -* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)] -* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] -* ui: correct doctype for index.html [[GH-22153](https://github.com/hashicorp/vault/pull/22153)] -* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] -* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] -* ui: fixes long namespace names overflow in the sidebar -* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] -* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)] - -## 1.14.6 -### November 09, 2023 - -SECURITY: -* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] - -CHANGES: - -* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] -* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] - -FEATURES: - -* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] - -IMPROVEMENTS: - -* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] - -BUG FIXES: - -* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured -on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] -* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. -* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] -* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] -* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] -* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] -* core: fix bug where deadlock detection was always on for expiration and quotas. -These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] -* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] -* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] - -## 1.14.5 -### October 25, 2023 - -CHANGES: - -* core: Bump Go version to 1.20.10. -* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host - -IMPROVEMENTS: - -* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] -* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] -* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] - -BUG FIXES: - -* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] -* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] -* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] -* kmip (enterprise): Improve handling of failures due to storage replication issues. -* kmip (enterprise): Return a structure in the response for query function Query Server Information. -* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] -* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. -* replication (enterprise): Fix a missing unlock when changing replication state -* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] -* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] -* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key -* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. -* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations -* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] -* storage/consul: fix a bug where an active node in a specific sort of network -partition could continue to write data to Consul after a new leader is elected -potentially causing data loss or corruption for keys with many concurrent -writers. For Enterprise clusters this could cause corruption of the merkle trees -leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] -* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] -* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] -* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] -* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] - -## 1.14.4 -### September 27, 2023 - -SECURITY: - -* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] - -CHANGES: - -* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy - -IMPROVEMENTS: - -* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)] -* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] -* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)] -* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)] - -BUG FIXES: - -* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] -* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] -* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] -* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)] -* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)] -* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] - -## 1.14.3 -### September 13, 2023 - -SECURITY: - -* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] - -CHANGES: - -* core: Bump Go version to 1.20.8. - -FEATURES: - -* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption. - -IMPROVEMENTS: - -* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] -* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] -* kmip (enterprise): reduce latency of KMIP operation handling - -BUG FIXES: - -* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] -* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] -* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] -* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] -* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] -* kmip (enterprise): fix date handling error with some re-key operations -* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] -* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable -* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)] -* ui: fixes long namespace names overflow in the sidebar - -## 1.14.2 -### August 30, 2023 - -CHANGES: - -* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)] -* core: Bump Go version to 1.20.7. -* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)] - -IMPROVEMENTS: - -* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)] -* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] -* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)] -* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. -* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] -* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] -* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] -* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)] -* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] -* website/docs: Fix link formatting in Vault lambda extension docs [[GH-22396](https://github.com/hashicorp/vault/pull/22396)] - -BUG FIXES: - -* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] -* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)] -* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] -* core (enterprise): Remove MFA Configuration for namespace when deleting namespace -* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)] -* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. -Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] -* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] -* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] -* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] -* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] -* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] -* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] -* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath -* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs -* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. -* sdk/ldaputil: Properly escape user filters when using UPN domains -sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] -* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)] -* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element -* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node -* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute -* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] -* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] -* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] -* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] -* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)] - -## 1.14.1 -### July 25, 2023 - -SECURITY - -* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)] -* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] - -CHANGES: - -* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), -which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] -* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. -* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] - -IMPROVEMENTS: - -* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. -* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)] -* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)] -* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary -* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)] -* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling -* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] - -BUG FIXES: - -* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)] -* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)] -* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)] -* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)] -* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer -respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)] -* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC. -* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] -* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)] -* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] -* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)] -* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)] -* replication (enterprise): update primary cluster address after DR failover -* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)] -* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] -* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] -* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] -* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present -* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required -* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] -* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)] -* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] -* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)] -* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)] -* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)] -* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)] -* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)] -* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] - -## 1.14.0 -### June 21, 2023 - -SECURITY: - -* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] - -BREAKING CHANGES: - -* secrets/pki: Maintaining running count of certificates will be turned off by default. -To re-enable keeping these metrics available on the tidy status endpoint, enable -maintain_stored_certificate_counts on tidy-config, to also publish them to the -metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] - -CHANGES: - -* auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 [[GH-20758](https://github.com/hashicorp/vault/pull/20758)] -* auth/azure: Updated plugin from v0.13.0 to v0.15.0 [[GH-20816](https://github.com/hashicorp/vault/pull/20816)] -* auth/centrify: Updated plugin from v0.14.0 to v0.15.1 [[GH-20745](https://github.com/hashicorp/vault/pull/20745)] -* auth/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20725](https://github.com/hashicorp/vault/pull/20725)] -* auth/jwt: Updated plugin from v0.15.0 to v0.16.0 [[GH-20799](https://github.com/hashicorp/vault/pull/20799)] -* auth/kubernetes: Update plugin to v0.16.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] -* core: Bump Go version to 1.20.5. -* core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. [[GH-20834](https://github.com/hashicorp/vault/pull/20834)] -* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] -* database/couchbase: Updated plugin from v0.9.0 to v0.9.2 [[GH-20764](https://github.com/hashicorp/vault/pull/20764)] -* database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 [[GH-20751](https://github.com/hashicorp/vault/pull/20751)] -* replication (enterprise): Add a new parameter for the update-primary API call -that allows for setting of the primary cluster addresses directly, instead of -via a token. -* secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 [[GH-20750](https://github.com/hashicorp/vault/pull/20750)] -* secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 [[GH-20787](https://github.com/hashicorp/vault/pull/20787)] -* secrets/aure: Updated plugin from v0.15.0 to v0.16.0 [[GH-20777](https://github.com/hashicorp/vault/pull/20777)] -* secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] -* secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 [[GH-20807](https://github.com/hashicorp/vault/pull/20807)] -* secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20818](https://github.com/hashicorp/vault/pull/20818)] -* secrets/keymgmt: Updated plugin to v0.9.1 -* secrets/kubernetes: Update plugin to v0.5.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] -* secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 [[GH-20742](https://github.com/hashicorp/vault/pull/20742)] -* secrets/pki: Allow issuance of root CAs without AIA, when templated AIA information includes issuer_id. [[GH-21209](https://github.com/hashicorp/vault/pull/21209)] -* secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. [[GH-20654](https://github.com/hashicorp/vault/pull/20654)] - -FEATURES: - -* **AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. [[GH-20536](https://github.com/hashicorp/vault/pull/20536)] -* **Automated License Utilization Reporting**: Added automated license -utilization reporting, which sends minimal product-license [metering -data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) -to HashiCorp without requiring you to manually collect and report them. -* **Environment Variables through Vault Agent**: Introducing a new process-supervisor mode for Vault Agent which allows injecting secrets as environment variables into a child process using a new `env_template` configuration stanza. The process-supervisor configuration can be generated with a new `vault agent generate-config` helper tool. [[GH-20530](https://github.com/hashicorp/vault/pull/20530)] -* **MongoDB Atlas Database Secrets**: Adds support for client certificate credentials [[GH-20425](https://github.com/hashicorp/vault/pull/20425)] -* **MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] -* **NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience [[GH-pki-ui-improvements](https://github.com/hashicorp/vault/pull/pki-ui-improvements)] -* **Secrets/Auth Plugin Multiplexing**: The plugin will be multiplexed when run -as an external plugin by vault versions that support secrets/auth plugin -multiplexing (> 1.12) [[GH-19215](https://github.com/hashicorp/vault/pull/19215)] -* **Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. [[GH-19296](https://github.com/hashicorp/vault/pull/19296)] -* **Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. [[GH-20752](https://github.com/hashicorp/vault/pull/20752)] -* **Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. [[GH-20548](https://github.com/hashicorp/vault/pull/20548)] -* **OCI Auto-Auth**: Add OCI (Oracle Cloud Infrastructure) auto-auth method [[GH-19260](https://github.com/hashicorp/vault/pull/19260)] - -IMPROVEMENTS: - -* * api: Add Config.TLSConfig method to fetch the TLS configuration from a client config. [[GH-20265](https://github.com/hashicorp/vault/pull/20265)] -* * physical/etcd: Upgrade etcd3 client to v3.5.7 [[GH-20261](https://github.com/hashicorp/vault/pull/20261)] -* activitylog: EntityRecord protobufs now contain a ClientType field for -distinguishing client sources. [[GH-20626](https://github.com/hashicorp/vault/pull/20626)] -* agent: Add integration tests for agent running in process supervisor mode [[GH-20741](https://github.com/hashicorp/vault/pull/20741)] -* agent: Add logic to validate env_template entries in configuration [[GH-20569](https://github.com/hashicorp/vault/pull/20569)] -* agent: Added `reload` option to cert auth configuration in case of external renewals of local x509 key-pairs. [[GH-19002](https://github.com/hashicorp/vault/pull/19002)] -* agent: JWT auto-auth has a new config option, `remove_jwt_follows_symlinks` (default: false), that, if set to true will now remove the JWT, instead of the symlink to the JWT, if a symlink to a JWT has been provided in the `path` option, and the `remove_jwt_after_reading` config option is set to true (default). [[GH-18863](https://github.com/hashicorp/vault/pull/18863)] -* agent: Vault Agent now reports its name and version as part of the User-Agent header in all requests issued. [[GH-19776](https://github.com/hashicorp/vault/pull/19776)] -* agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent [[GH-20628](https://github.com/hashicorp/vault/pull/20628)] -* api: GET ... /sys/internal/counters/activity?current_billing_period=true now -results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] -* api: `/sys/internal/counters/config` endpoint now contains read-only -`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] -* api: `/sys/internal/counters/config` endpoint now contains read-only -`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] -* api: property based testing for LifetimeWatcher sleep duration calculation [[GH-17919](https://github.com/hashicorp/vault/pull/17919)] -* audit: add plugin metadata, including plugin name, type, version, sha256, and whether plugin is external, to audit logging [[GH-19814](https://github.com/hashicorp/vault/pull/19814)] -* audit: forwarded requests can now contain host metadata on the node it was sent 'from' or a flag to indicate that it was forwarded. -* auth/cert: Better return OCSP validation errors during login to the caller. [[GH-20234](https://github.com/hashicorp/vault/pull/20234)] -* auth/kerberos: Enable plugin multiplexing -auth/kerberos: Upgrade plugin dependencies [[GH-20771](https://github.com/hashicorp/vault/pull/20771)] -* auth/ldap: allow configuration of alias dereferencing in LDAP search [[GH-18230](https://github.com/hashicorp/vault/pull/18230)] -* auth/ldap: allow providing the LDAP password via an env var when authenticating via the CLI [[GH-18225](https://github.com/hashicorp/vault/pull/18225)] -* auth/oidc: Adds support for group membership parsing when using IBM ISAM as an OIDC provider. [[GH-19247](https://github.com/hashicorp/vault/pull/19247)] -* build: Prefer GOBIN when set over GOPATH/bin when building the binary [[GH-19862](https://github.com/hashicorp/vault/pull/19862)] -* cli: Add walkSecretsTree helper function, which recursively walks secrets rooted at the given path [[GH-20464](https://github.com/hashicorp/vault/pull/20464)] -* cli: Improve addPrefixToKVPath helper [[GH-20488](https://github.com/hashicorp/vault/pull/20488)] -* command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. [[GH-20629](https://github.com/hashicorp/vault/pull/20629)] -* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when -`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] -* command/server: New -dev-cluster-json writes a file describing the dev cluster in -dev and -dev-three-node modes, plus -dev-three-node now enables unauthenticated metrics and pprof requests. [[GH-20224](https://github.com/hashicorp/vault/pull/20224)] -* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] -* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): vault server command now allows for opt-out of automated -reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] -* core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. [[GH-20559](https://github.com/hashicorp/vault/pull/20559)] -* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] -* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] -* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] -* core: include namespace path in granting_policies block of audit log -* core: include reason for ErrReadOnly on PBPWF writing failures -* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] -* core:provide more descriptive error message when calling enterprise feature paths in open-source [[GH-18870](https://github.com/hashicorp/vault/pull/18870)] -* database/elasticsearch: Upgrade plugin dependencies [[GH-20767](https://github.com/hashicorp/vault/pull/20767)] -* database/mongodb: upgrade mongo driver to 1.11 [[GH-19954](https://github.com/hashicorp/vault/pull/19954)] -* database/redis: Upgrade plugin dependencies [[GH-20763](https://github.com/hashicorp/vault/pull/20763)] -* http: Support responding to HEAD operation from plugins [[GH-19520](https://github.com/hashicorp/vault/pull/19520)] -* openapi: Add openapi response definitions to /sys defined endpoints. [[GH-18633](https://github.com/hashicorp/vault/pull/18633)] -* openapi: Add openapi response definitions to pki/config_*.go [[GH-18376](https://github.com/hashicorp/vault/pull/18376)] -* openapi: Add openapi response definitions to vault/logical_system_paths.go defined endpoints. [[GH-18515](https://github.com/hashicorp/vault/pull/18515)] -* openapi: Consistently stop Vault server on exit in gen_openapi.sh [[GH-19252](https://github.com/hashicorp/vault/pull/19252)] -* openapi: Improve operationId/request/response naming strategy [[GH-19319](https://github.com/hashicorp/vault/pull/19319)] -* openapi: add openapi response definitions to /sys/internal endpoints [[GH-18542](https://github.com/hashicorp/vault/pull/18542)] -* openapi: add openapi response definitions to /sys/rotate endpoints [[GH-18624](https://github.com/hashicorp/vault/pull/18624)] -* openapi: add openapi response definitions to /sys/seal endpoints [[GH-18625](https://github.com/hashicorp/vault/pull/18625)] -* openapi: add openapi response definitions to /sys/tool endpoints [[GH-18626](https://github.com/hashicorp/vault/pull/18626)] -* openapi: add openapi response definitions to /sys/version-history, /sys/leader, /sys/ha-status, /sys/host-info, /sys/in-flight-req [[GH-18628](https://github.com/hashicorp/vault/pull/18628)] -* openapi: add openapi response definitions to /sys/wrapping endpoints [[GH-18627](https://github.com/hashicorp/vault/pull/18627)] -* openapi: add openapi response defintions to /sys/auth endpoints [[GH-18465](https://github.com/hashicorp/vault/pull/18465)] -* openapi: add openapi response defintions to /sys/capabilities endpoints [[GH-18468](https://github.com/hashicorp/vault/pull/18468)] -* openapi: add openapi response defintions to /sys/config and /sys/generate-root endpoints [[GH-18472](https://github.com/hashicorp/vault/pull/18472)] -* openapi: added ability to validate response structures against openapi schema for test clusters [[GH-19043](https://github.com/hashicorp/vault/pull/19043)] -* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] -* sdk: Add new docker-based cluster testing framework to the sdk. [[GH-20247](https://github.com/hashicorp/vault/pull/20247)] -* secrets/ad: upgrades dependencies [[GH-19829](https://github.com/hashicorp/vault/pull/19829)] -* secrets/alicloud: upgrades dependencies [[GH-19846](https://github.com/hashicorp/vault/pull/19846)] -* secrets/consul: Improve error message when ACL bootstrapping fails. [[GH-20891](https://github.com/hashicorp/vault/pull/20891)] -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] -* secrets/gcpkms: Enable plugin multiplexing -secrets/gcpkms: Upgrade plugin dependencies [[GH-20784](https://github.com/hashicorp/vault/pull/20784)] -* secrets/mongodbatlas: upgrades dependencies [[GH-19861](https://github.com/hashicorp/vault/pull/19861)] -* secrets/openldap: upgrades dependencies [[GH-19993](https://github.com/hashicorp/vault/pull/19993)] -* secrets/pki: Add missing fields to tidy-status, include new last_auto_tidy_finished field. [[GH-20442](https://github.com/hashicorp/vault/pull/20442)] -* secrets/pki: Add warning when issuer lacks KeyUsage during CRL rebuilds; expose in logs and on rotation. [[GH-20253](https://github.com/hashicorp/vault/pull/20253)] -* secrets/pki: Allow determining existing issuers and keys on import. [[GH-20441](https://github.com/hashicorp/vault/pull/20441)] -* secrets/pki: Include CA serial number, key UUID on issuers list endpoint. [[GH-20276](https://github.com/hashicorp/vault/pull/20276)] -* secrets/pki: Limit ACME issued certificates NotAfter TTL to a maximum of 90 days [[GH-20981](https://github.com/hashicorp/vault/pull/20981)] -* secrets/pki: Support TLS-ALPN-01 challenge type in ACME for DNS certificate identifiers. [[GH-20943](https://github.com/hashicorp/vault/pull/20943)] -* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] -* secrets/postgresql: Add configuration to scram-sha-256 encrypt passwords on Vault before sending them to PostgreSQL [[GH-19616](https://github.com/hashicorp/vault/pull/19616)] -* secrets/terraform: upgrades dependencies [[GH-19798](https://github.com/hashicorp/vault/pull/19798)] -* secrets/transit: Add support to import public keys in transit engine and allow encryption and verification of signed data [[GH-17934](https://github.com/hashicorp/vault/pull/17934)] -* secrets/transit: Allow importing RSA-PSS OID (1.2.840.113549.1.1.10) private keys via BYOK. [[GH-19519](https://github.com/hashicorp/vault/pull/19519)] -* secrets/transit: Respond to writes with updated key policy, cache configuration. [[GH-20652](https://github.com/hashicorp/vault/pull/20652)] -* secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. [[GH-20736](https://github.com/hashicorp/vault/pull/20736)] -* ui: Add download button for each secret value in KV v2 [[GH-20431](https://github.com/hashicorp/vault/pull/20431)] -* ui: Add filtering by auth type and auth name to the Authentication Method list view. [[GH-20747](https://github.com/hashicorp/vault/pull/20747)] -* ui: Add filtering by engine type and engine name to the Secret Engine list view. [[GH-20481](https://github.com/hashicorp/vault/pull/20481)] -* ui: Adds whitespace warning to secrets engine and auth method path inputs [[GH-19913](https://github.com/hashicorp/vault/pull/19913)] -* ui: Remove the Bulma CSS framework. [[GH-19878](https://github.com/hashicorp/vault/pull/19878)] -* ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata [[GH-20590](https://github.com/hashicorp/vault/pull/20590)] -* ui: Updates UI javascript dependencies [[GH-19901](https://github.com/hashicorp/vault/pull/19901)] -* ui: add allowed_managed_keys field to secret engine mount options [[GH-19791](https://github.com/hashicorp/vault/pull/19791)] -* ui: adds warning for commas in stringArray inputs and updates tooltip help text to remove references to comma separation [[GH-20163](https://github.com/hashicorp/vault/pull/20163)] -* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] -* website/docs: Add rotate root documentation for azure secrets engine [[GH-19187](https://github.com/hashicorp/vault/pull/19187)] -* website/docs: fix database static-user sample payload [[GH-19170](https://github.com/hashicorp/vault/pull/19170)] - -BUG FIXES: - -* agent: Fix agent generate-config to accept -namespace, VAULT_NAMESPACE, and other client-modifying flags. [[GH-21297](https://github.com/hashicorp/vault/pull/21297)] -* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] -* api: Addressed a couple of issues that arose as edge cases for the -output-policy flag. Specifically around properly handling list commands, distinguishing kv V1/V2, and correctly recognizing protected paths. [[GH-19160](https://github.com/hashicorp/vault/pull/19160)] -* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] -* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] -* auth/token: Fix cubbyhole and revocation for legacy service tokens [[GH-19416](https://github.com/hashicorp/vault/pull/19416)] -* cli/kv: add -mount flag to kv list [[GH-19378](https://github.com/hashicorp/vault/pull/19378)] -* core (enterprise): Don't delete backend stored data that appears to be filterable -on this secondary if we don't have a corresponding mount entry. -* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. -* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. -* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur -* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace -* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` -resulting in 412 errors. -* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will -have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] -* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] -* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] -* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. -* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] -* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] -* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] -* license (enterprise): Fix bug where license would update even if the license didn't change. -* openapi: Small fixes for OpenAPI display attributes. Changed "log-in" to "login" [[GH-20285](https://github.com/hashicorp/vault/pull/20285)] -* plugin/reload: Fix a possible data race with rollback manager and plugin reload [[GH-19468](https://github.com/hashicorp/vault/pull/19468)] -* replication (enterprise): Fix a caching issue when replicating filtered data to -a performance secondary. This resulted in the data being set to nil in the cache -and a "invalid value" error being returned from the API. -* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. -* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs -* replication (enterprise): Fix regression causing token creation against a role -with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-21100](https://github.com/hashicorp/vault/pull/21100)] -* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil -* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. -* sdk/backend: prevent panic when computing the zero value for a `TypeInt64` schema field. [[GH-18729](https://github.com/hashicorp/vault/pull/18729)] -* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] -* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens -* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation -* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions -* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] -* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. -* secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. -* sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] -* shamir: change mul and div implementations to be constant-time [[GH-19495](https://github.com/hashicorp/vault/pull/19495)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix secret render when path includes %. Resolves #11616. [[GH-20430](https://github.com/hashicorp/vault/pull/20430)] -* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] -* ui: fixes auto_rotate_period ttl input for transit keys [[GH-20731](https://github.com/hashicorp/vault/pull/20731)] -* ui: fixes bug in kmip role form that caused `operation_all` to persist after deselecting all operation checkboxes [[GH-19139](https://github.com/hashicorp/vault/pull/19139)] -* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] -* ui: wait for wanted message event during OIDC callback instead of using the first message event [[GH-18521](https://github.com/hashicorp/vault/pull/18521)] - -## 1.13.10 -### November 09, 2023 - -SECURITY: -* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] - -CHANGES: - -* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] -* secrets/mongodbatlas: Update plugin to v0.9.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] - -FEATURES: - -* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] - -IMPROVEMENTS: - -* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] - -BUG FIXES: - -* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured -on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] -* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. -* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] -* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] -* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] -* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] -* core: fix bug where deadlock detection was always on for expiration and quotas. -These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] -* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] -* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] - -## 1.13.9 -### October 25, 2023 - -CHANGES: - -* core: Bump Go version to 1.20.10. -* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host - -IMPROVEMENTS: - -* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] -* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] - -BUG FIXES: - -* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] -* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] -* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] -* kmip (enterprise): Improve handling of failures due to storage replication issues. -* kmip (enterprise): Return a structure in the response for query function Query Server Information. -* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] -* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. -* replication (enterprise): Fix a missing unlock when changing replication state -* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key -* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. -* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations -* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] - -## 1.13.6 -### August 30, 2023 - -CHANGES: - -* core: Bump Go version to 1.20.7. - -IMPROVEMENTS: - -* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] -* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. -* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] -* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] -* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] -* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] - -BUG FIXES: - -* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] -* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] -* core (enterprise): Remove MFA Configuration for namespace when deleting namespace -* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. -Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] -* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] -* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] -* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] -* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] -* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] -* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] -* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath -* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs -* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. -* sdk/ldaputil: Properly escape user filters when using UPN domains -sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] -* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)] -* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute -* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] -* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] -* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] - -## 1.13.8 -### September 27, 2023 - -SECURITY: - -* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] - -CHANGES: - -* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy - -IMPROVEMENTS: - -* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] - -BUG FIXES: - -* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] -* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] -* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] -* ui: Fixes old pki's filter and search roles page bug [[GH-22810](https://github.com/hashicorp/vault/pull/22810)] -* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] - -## 1.13.7 -### September 13, 2023 - -SECURITY: - -* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] - -CHANGES: - -* core: Bump Go version to 1.20.8. -* database/snowflake: Update plugin to v0.7.3 [[GH-22591](https://github.com/hashicorp/vault/pull/22591)] - -FEATURES: - -* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption. - -IMPROVEMENTS: - -* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] -* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] -* kmip (enterprise): reduce latency of KMIP operation handling - -BUG FIXES: - -* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] -* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] -* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] -* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] -* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] -* kmip (enterprise): fix date handling error with some re-key operations -* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] -* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable - -## 1.13.6 -### August 30, 2023 - -CHANGES: - -* core: Bump Go version to 1.20.7. - -IMPROVEMENTS: - -* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] -* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. -* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] -* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] -* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] -* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] - -BUG FIXES: - -* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] -* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] -* core (enterprise): Remove MFA Configuration for namespace when deleting namespace -* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. -Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] -* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] -* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] -* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] -* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] -* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] -* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] -* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath -* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs -* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. -* sdk/ldaputil: Properly escape user filters when using UPN domains -sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] -* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)] -* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute -* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] -* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] -* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] - -## 1.13.5 -### July 25, 2023 - -SECURITY: - -* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)] -* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] - -CHANGES: - -* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), -which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] -* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. - -IMPROVEMENTS: - -* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. -* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)] -* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary -* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling -* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] - -BUG FIXES: - -* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21799](https://github.com/hashicorp/vault/pull/21799)] -* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] -* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] -* replication (enterprise): update primary cluster address after DR failover -* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21632](https://github.com/hashicorp/vault/pull/21632)] -* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] -* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] -* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present -* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required -* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] -* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] -* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] - -## 1.13.4 -### June 21, 2023 -BREAKING CHANGES: - -* secrets/pki: Maintaining running count of certificates will be turned off by default. -To re-enable keeping these metrics available on the tidy status endpoint, enable -maintain_stored_certificate_counts on tidy-config, to also publish them to the -metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] - -CHANGES: - -* core: Bump Go version to 1.20.5. - -FEATURES: - -* **Automated License Utilization Reporting**: Added automated license -utilization reporting, which sends minimal product-license [metering -data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) -to HashiCorp without requiring you to manually collect and report them. -* core (enterprise): Add background worker for automatic reporting of billing -information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] - -IMPROVEMENTS: - -* api: GET ... /sys/internal/counters/activity?current_billing_period=true now -results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] -* api: `/sys/internal/counters/config` endpoint now contains read-only -`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] -* api: `/sys/internal/counters/config` endpoint now contains read-only -`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] -* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] -* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): vault server command now allows for opt-out of automated -reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] -* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] -* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] -* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] - -BUG FIXES: - -* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] -* core (enterprise): Don't delete backend stored data that appears to be filterable -on this secondary if we don't have a corresponding mount entry. -* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will -have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] -* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] -* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] -* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] -* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] -* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover -* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs -* replication (enterprise): Fix regression causing token creation against a role -with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-21100](https://github.com/hashicorp/vault/pull/21100)] -* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] - -## 1.13.3 -### June 08, 2023 - -CHANGES: - -* core: Bump Go version to 1.20.4. -* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] -* replication (enterprise): Add a new parameter for the update-primary API call -that allows for setting of the primary cluster addresses directly, instead of -via a token. -* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] - -IMPROVEMENTS: - -* Add debug symbols back to builds to fix Dynatrace support [[GH-20519](https://github.com/hashicorp/vault/pull/20519)] -* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] -* autopilot: Update version to v0.2.0 to add better support for respecting min quorum [[GH-19472](https://github.com/hashicorp/vault/pull/19472)] -* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when -`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] -* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] -* core: include namespace path in granting_policies block of audit log -* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] -* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] -* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] -* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] - -BUG FIXES: - -* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] -* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] -* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] -* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] -* command/server: fixes panic in Vault server command when running in recovery mode [[GH-20418](https://github.com/hashicorp/vault/pull/20418)] -* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. -* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. -* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace -* core/identity: Allow updates of only the custom-metadata for entity alias. [[GH-20368](https://github.com/hashicorp/vault/pull/20368)] -* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. -* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] -* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] -* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. -* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. -* secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. [[GH-20354](https://github.com/hashicorp/vault/pull/20354)] -* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation -* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] -* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. -secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. -sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] -* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] -* ui: fixes issue creating mfa login enforcement from method enforcements tab [[GH-20603](https://github.com/hashicorp/vault/pull/20603)] -* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] - -## 1.13.2 -### April 26, 2023 - -CHANGES: - -* core: Bump Go version to 1.20.3. - -SECURITY: - -* core/seal: Fix handling of HMACing of seal-wrapped storage entries from HSMs using CKM_AES_CBC or CKM_AES_CBC_PAD which may have allowed an attacker to conduct a padding oracle attack. This vulnerability, CVE-2023-2197, affects Vault from 1.13.0 up to 1.13.1 and was fixed in 1.13.2. [[HCSEC-2023-14](https://discuss.hashicorp.com/t/hcsec-2023-14-vault-enterprise-vulnerable-to-padding-oracle-attacks-when-using-a-cbc-based-encryption-mechanism-with-a-hsm/53322)] - -IMPROVEMENTS: - -* Add debug symbols back to builds to fix Dynatrace support [[GH-20294](https://github.com/hashicorp/vault/pull/20294)] -* cli/namespace: Add detailed flag to output additional namespace information -such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] -* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] -* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the -`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] -* core: include reason for ErrReadOnly on PBPWF writing failures -* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration -for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] -* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] -* sys/wrapping: Add example how to unwrap without authentication in Vault [[GH-20109](https://github.com/hashicorp/vault/pull/20109)] -* ui: Allows license-banners to be dismissed. Saves preferences in localStorage. [[GH-19116](https://github.com/hashicorp/vault/pull/19116)] - -BUG FIXES: - -* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] -* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] -* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. -* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur -* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` -resulting in 412 errors. -* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] -* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] -* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. -* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] -* replication (enterprise): Fix a caching issue when replicating filtered data to -a performance secondary. This resulted in the data being set to nil in the cache -and a "invalid value" error being returned from the API. -* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil -* sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. -auth/cert: Fix OCSP validation against Vault's PKI engine. [[GH-20181](https://github.com/hashicorp/vault/pull/20181)] -* secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. [[GH-20034](https://github.com/hashicorp/vault/pull/20034)] -* secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. [[GH-20057](https://github.com/hashicorp/vault/pull/20057)] -* secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. [[GH-20058](https://github.com/hashicorp/vault/pull/20058)] -* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] -* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens -* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] -* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] -* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] -* ui: fixes remaining doc links to include /vault in path [[GH-20070](https://github.com/hashicorp/vault/pull/20070)] -* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] -* website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. [[GH-20216](https://github.com/hashicorp/vault/pull/20216)] - -## 1.13.1 -### March 29, 2023 - -SECURITY: - -* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] -* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] -* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] - -IMPROVEMENTS: - -* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id -website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] -* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch -option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] -* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] -* database/elasticsearch: Update error messages resulting from Elasticsearch API errors [[GH-19545](https://github.com/hashicorp/vault/pull/19545)] -* events: Suppress log warnings triggered when events are sent but the events system is not enabled. [[GH-19593](https://github.com/hashicorp/vault/pull/19593)] - -BUG FIXES: - -* agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. [[GH-19483](https://github.com/hashicorp/vault/pull/19483)] -* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. -* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] -* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. -* kmip (enterprise): Fix a problem forwarding some requests to the active node. -* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] -* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19640](https://github.com/hashicorp/vault/pull/19640)] -* secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug [[GH-19624](https://github.com/hashicorp/vault/pull/19624)] -* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions -* ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library [[GH-19428](https://github.com/hashicorp/vault/pull/19428)] -* ui: fixes SSH engine config deletion [[GH-19448](https://github.com/hashicorp/vault/pull/19448)] -* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] -* ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted [[GH-19541](https://github.com/hashicorp/vault/pull/19541)] -* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] -* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] - -## 1.13.0 -### March 01, 2023 - -SECURITY: - -* secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. [[GH-18874](https://github.com/hashicorp/vault/pull/18874)] -* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] - -CHANGES: - -* auth/alicloud: require the `role` field on login [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] -* auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation [[GH-17768](https://github.com/hashicorp/vault/pull/17768)] -* auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. -This will only be used internally for implementing user lockout. [[GH-17104](https://github.com/hashicorp/vault/pull/17104)] -* core: Bump Go version to 1.20.1. -* core: Vault version has been moved out of sdk and into main vault module. -Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. [[GH-14229](https://github.com/hashicorp/vault/pull/14229)] -* logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead [[GH-17822](https://github.com/hashicorp/vault/pull/17822)] -* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. [[GH-16982](https://github.com/hashicorp/vault/pull/16982)] -* plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] -* plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] -* sdk: Remove version package, make useragent.String versionless. [[GH-19068](https://github.com/hashicorp/vault/pull/19068)] -* secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls [[GH-15869](https://github.com/hashicorp/vault/pull/15869)] -* secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 [[GH-19063](https://github.com/hashicorp/vault/pull/19063)] -* sys/internal/inspect: Turns of this endpoint by default. A SIGHUP can now be used to reload the configs and turns this endpoint on. -* ui: Upgrade Ember to version 4.4.0 [[GH-17086](https://github.com/hashicorp/vault/pull/17086)] - -FEATURES: - -* **User lockout**: Ignore repeated bad credentials from the same user for a configured period of time. Enabled by default. -* **Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. [[GH-19194](https://github.com/hashicorp/vault/pull/19194)] -* **GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] -* **Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. [[GH-17893](https://github.com/hashicorp/vault/pull/17893)] -* **New PKI UI**: Add beta support for new and improved PKI UI [[GH-18842](https://github.com/hashicorp/vault/pull/18842)] -* **PKI Cross-Cluster Revocations**: Revocation information can now be -synchronized across primary and performance replica clusters offering -a unified CRL/OCSP view of revocations across cluster boundaries. [[GH-19196](https://github.com/hashicorp/vault/pull/19196)] -* **Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket [[GH-18227](https://github.com/hashicorp/vault/pull/18227)] -* **Transit managed keys**: The transit secrets engine now supports configuring and using managed keys -* **User Lockout**: Adds support to configure the user-lockout behaviour for failed logins to prevent -brute force attacks for userpass, approle and ldap auth methods. [[GH-19230](https://github.com/hashicorp/vault/pull/19230)] -* **VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] -* **Namespaces (enterprise)**: Added the ability to allow access to secrets and more to be shared across namespaces that do not share a namespace hierarchy. Using the new `sys/config/group-policy-application` API, policies can be configured to apply outside of namespace hierarchy, allowing this kind of cross-namespace sharing. -* **OpenAPI-based Go & .NET Client Libraries (Beta)**: We have now made available two new [[OpenAPI-based Go](https://github.com/hashicorp/vault-client-go/)] & [[OpenAPI-based .NET](https://github.com/hashicorp/vault-client-dotnet/)] Client libraries (beta). You can use them to perform various secret management operations easily from your applications. - -IMPROVEMENTS: - -* **Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. [[GH-18752](https://github.com/hashicorp/vault/pull/18752)] -* Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 [[GH-19100](https://github.com/hashicorp/vault/pull/19100)] -* Reduced binary size [[GH-17678](https://github.com/hashicorp/vault/pull/17678)] -* agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. [[GH-18403](https://github.com/hashicorp/vault/pull/18403)] -* agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. [[GH-18684](https://github.com/hashicorp/vault/pull/18684)] -* agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. [[GH-18740](https://github.com/hashicorp/vault/pull/18740)] -* agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. [[GH-18101](https://github.com/hashicorp/vault/pull/18101)] -* agent: Configured Vault Agent listeners now listen without the need for caching to be configured. [[GH-18137](https://github.com/hashicorp/vault/pull/18137)] -* agent: allows some parts of config to be reloaded without requiring a restart. [[GH-18638](https://github.com/hashicorp/vault/pull/18638)] -* agent: fix incorrectly used loop variables in parallel tests and when finalizing seals [[GH-16872](https://github.com/hashicorp/vault/pull/16872)] -* api: Remove dependency on sdk module. [[GH-18962](https://github.com/hashicorp/vault/pull/18962)] -* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] -* audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries [[GH-18128](https://github.com/hashicorp/vault/pull/18128)] -* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] -* auth/alicloud: upgrades dependencies [[GH-18021](https://github.com/hashicorp/vault/pull/18021)] -* auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a -Virtual Machine Scale Set (VMSS) in flexible orchestration mode. [[GH-17540](https://github.com/hashicorp/vault/pull/17540)] -* auth/azure: upgrades dependencies [[GH-17857](https://github.com/hashicorp/vault/pull/17857)] -* auth/cert: Add configurable support for validating client certs with OCSP. [[GH-17093](https://github.com/hashicorp/vault/pull/17093)] -* auth/cert: Support listing provisioned CRLs within the mount. [[GH-18043](https://github.com/hashicorp/vault/pull/18043)] -* auth/cf: Remove incorrect usage of CreateOperation from path_config [[GH-19098](https://github.com/hashicorp/vault/pull/19098)] -* auth/gcp: Upgrades dependencies [[GH-17858](https://github.com/hashicorp/vault/pull/17858)] -* auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] -* auth/oidc: Adds ability to set Google Workspace domain for groups search [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] -* auth/token (enterprise): Allow batch token creation in perfStandby nodes -* auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. -Make passcode arguments consistent across login MFA method types. [[GH-18610](https://github.com/hashicorp/vault/pull/18610)] -* auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. [[GH-18811](https://github.com/hashicorp/vault/pull/18811)] -* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum -* cli/kv: improve kv CLI to remove data or custom metadata using kv patch [[GH-18067](https://github.com/hashicorp/vault/pull/18067)] -* cli/pki: Add List-Intermediates functionality to pki client. [[GH-18463](https://github.com/hashicorp/vault/pull/18463)] -* cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. [[GH-17750](https://github.com/hashicorp/vault/pull/17750)] -* cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. [[GH-18467](https://github.com/hashicorp/vault/pull/18467)] -* cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. [[GH-18499](https://github.com/hashicorp/vault/pull/18499)] -* cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file [[GH-19269](https://github.com/hashicorp/vault/pull/19269)] -* cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. [[GH-17650](https://github.com/hashicorp/vault/pull/17650)] -* cli: Add transit import key helper commands for BYOK to Transit/Transform. [[GH-18887](https://github.com/hashicorp/vault/pull/18887)] -* cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. [[GH-14945](https://github.com/hashicorp/vault/pull/14945)] -* cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` [[GH-18892](https://github.com/hashicorp/vault/pull/18892)] -* client/pki: Add a new command verify-sign which checks the relationship between two certificates. [[GH-18437](https://github.com/hashicorp/vault/pull/18437)] -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* core/identity: Add machine-readable output to body of response upon alias clash during entity merge [[GH-17459](https://github.com/hashicorp/vault/pull/17459)] -* core/server: Added an environment variable to write goroutine stacktraces to a -temporary file for SIGUSR2 signals. [[GH-17929](https://github.com/hashicorp/vault/pull/17929)] -* core: Add RPCs to read and update userFailedLoginInfo map -* core: Add experiments system and `events.alpha1` experiment. [[GH-18682](https://github.com/hashicorp/vault/pull/18682)] -* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] -* core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods [[GH-17338](https://github.com/hashicorp/vault/pull/17338)] -* core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. [[GH-18718](https://github.com/hashicorp/vault/pull/18718)] -* core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from -sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. [[GH-18675](https://github.com/hashicorp/vault/pull/18675)] -* core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user -with given mount_accessor and alias_identifier if locked [[GH-18279](https://github.com/hashicorp/vault/pull/18279)] -* core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. [[GH-17855](https://github.com/hashicorp/vault/pull/17855)] -* core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. [[GH-18673](https://github.com/hashicorp/vault/pull/18673)] -* core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. [[GH-17265](https://github.com/hashicorp/vault/pull/17265)] -* core: Update protoc from 3.21.5 to 3.21.7 [[GH-17499](https://github.com/hashicorp/vault/pull/17499)] -* core: add `detect_deadlocks` config to optionally detect core state deadlocks [[GH-18604](https://github.com/hashicorp/vault/pull/18604)] -* core: added changes for user lockout workflow. [[GH-17951](https://github.com/hashicorp/vault/pull/17951)] -* core: parallelize backend initialization to improve startup time for large numbers of mounts. [[GH-18244](https://github.com/hashicorp/vault/pull/18244)] -* database/postgres: Support multiline strings for revocation statements. [[GH-18632](https://github.com/hashicorp/vault/pull/18632)] -* database/redis-elasticache: changed config argument names for disambiguation [[GH-19044](https://github.com/hashicorp/vault/pull/19044)] -* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] -* hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) [[GH-18228](https://github.com/hashicorp/vault/pull/18228)] -* hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs [[GH-18315](https://github.com/hashicorp/vault/pull/18315)] -* hcp/connectivity: Only update SCADA session metadata if status changes [[GH-18585](https://github.com/hashicorp/vault/pull/18585)] -* hcp/status: Add cluster-level status information [[GH-18351](https://github.com/hashicorp/vault/pull/18351)] -* hcp/status: Expand node-level status information [[GH-18302](https://github.com/hashicorp/vault/pull/18302)] -* logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config [[GH-17841](https://github.com/hashicorp/vault/pull/17841)] -* logging: Vault agent and server commands support log file and log rotation. [[GH-18031](https://github.com/hashicorp/vault/pull/18031)] -* migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. [[GH-18817](https://github.com/hashicorp/vault/pull/18817)] -* namespaces (enterprise): Add new API, `sys/config/group-policy-application`, to allow group policies to be configurable -to apply to a group in `any` namespace. The default, `within_namespace_hierarchy`, is the current behaviour. -* openapi: Add default values to thing_mount_path parameters [[GH-18935](https://github.com/hashicorp/vault/pull/18935)] -* openapi: Add logic to generate openapi response structures [[GH-18192](https://github.com/hashicorp/vault/pull/18192)] -* openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go [[GH-18772](https://github.com/hashicorp/vault/pull/18772)] -* openapi: Add openapi response definitions to approle/path_role.go [[GH-18198](https://github.com/hashicorp/vault/pull/18198)] -* openapi: Change gen_openapi.sh to generate schema with generic mount paths [[GH-18934](https://github.com/hashicorp/vault/pull/18934)] -* openapi: Mark request body objects as required [[GH-17909](https://github.com/hashicorp/vault/pull/17909)] -* openapi: add openapi response defintions to /sys/audit endpoints [[GH-18456](https://github.com/hashicorp/vault/pull/18456)] -* openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` [[GH-18663](https://github.com/hashicorp/vault/pull/18663)] -* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] -* plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. [[GH-17289](https://github.com/hashicorp/vault/pull/17289)] -* plugins: Let Vault unseal and mount deprecated builtin plugins in a -deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Mark app-id auth method Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] -* plugins: Mark logical database plugins Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] -* sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) [[GH-18635](https://github.com/hashicorp/vault/pull/18635)] -* sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests [[GH-18636](https://github.com/hashicorp/vault/pull/18636)] -* secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] [[GH-17747](https://github.com/hashicorp/vault/pull/17747)] -* secrets/azure: Adds ability to persist an application for the lifetime of a role. [[GH-19096](https://github.com/hashicorp/vault/pull/19096)] -* secrets/azure: upgrades dependencies [[GH-17964](https://github.com/hashicorp/vault/pull/17964)] -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* secrets/gcp: Upgrades dependencies [[GH-17871](https://github.com/hashicorp/vault/pull/17871)] -* secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] [[GH-18587](https://github.com/hashicorp/vault/pull/18587)] -* secrets/kubernetes: add /check endpoint to determine if environment variables are set [[GH-19084](https://github.com/hashicorp/vault/pull/19084)] -* secrets/kv: Emit events on write if events system enabled [[GH-19145](https://github.com/hashicorp/vault/pull/19145)] -* secrets/kv: make upgrade synchronous when no keys to upgrade [[GH-19056](https://github.com/hashicorp/vault/pull/19056)] -* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] -* secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster [[GH-17779](https://github.com/hashicorp/vault/pull/17779)] -* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] -* secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters [[GH-18040](https://github.com/hashicorp/vault/pull/18040)] -* secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when -allowed by role [[GH-18397](https://github.com/hashicorp/vault/pull/18397)] -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] -* secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. [[GH-18199](https://github.com/hashicorp/vault/pull/18199)] -* secrets/pki: Allow tidying of expired issuer certificates. [[GH-17823](https://github.com/hashicorp/vault/pull/17823)] -* secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. [[GH-18645](https://github.com/hashicorp/vault/pull/18645)] -* secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. [[GH-18222](https://github.com/hashicorp/vault/pull/18222)] -* secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. [[GH-18482](https://github.com/hashicorp/vault/pull/18482)] -* secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked [[GH-17774](https://github.com/hashicorp/vault/pull/17774)] -* secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. [[GH-18939](https://github.com/hashicorp/vault/pull/18939)] -* secrets/ssh: Evaluate ssh validprincipals user template before splitting [[GH-16622](https://github.com/hashicorp/vault/pull/16622)] -* secrets/transit: Add an optional reference field to batch operation items -which is repeated on batch responses to help more easily correlate inputs with outputs. [[GH-18243](https://github.com/hashicorp/vault/pull/18243)] -* secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers [[GH-17638](https://github.com/hashicorp/vault/pull/17638)] -* secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures [[GH-17636](https://github.com/hashicorp/vault/pull/17636)] -* secrets/transit: Allow configuring whether upsert of keys is allowed. [[GH-18272](https://github.com/hashicorp/vault/pull/18272)] -* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] -* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] -* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. [[GH-17789](https://github.com/hashicorp/vault/pull/17789)] -* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. -* ui: Add algorithm-signer as a SSH Secrets Engine UI field [[GH-10299](https://github.com/hashicorp/vault/pull/10299)] -* ui: Add inline policy creation when creating an identity entity or group [[GH-17749](https://github.com/hashicorp/vault/pull/17749)] -* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] -* ui: Enable typescript for future development [[GH-17927](https://github.com/hashicorp/vault/pull/17927)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] -* ui: adds allowed_response_headers as param for secret engine mount config [[GH-19216](https://github.com/hashicorp/vault/pull/19216)] -* ui: consolidate all tag usage [[GH-17866](https://github.com/hashicorp/vault/pull/17866)] -* ui: mfa: use proper request id generation [[GH-17835](https://github.com/hashicorp/vault/pull/17835)] -* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] -* ui: update DocLink component to use new host url: developer.hashicorp.com [[GH-18374](https://github.com/hashicorp/vault/pull/18374)] -* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] -* ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint [[GH-17575](https://github.com/hashicorp/vault/pull/17575)] -* vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 [[GH-18589](https://github.com/hashicorp/vault/pull/18589)] - -DEPRECATIONS: - -* secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. [[GH-19334](https://github.com/hashicorp/vault/pull/19334)] - -BUG FIXES: - -* api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext [[GH-18708](https://github.com/hashicorp/vault/pull/18708)] -* auth/alicloud: fix regression in vault login command that caused login to fail [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] -* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] -* auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) [[GH-19094](https://github.com/hashicorp/vault/pull/19094)] -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* cli/pki: Decode integer values properly in health-check configuration file [[GH-19265](https://github.com/hashicorp/vault/pull/19265)] -* cli/pki: Fix path for role health-check warning messages [[GH-19274](https://github.com/hashicorp/vault/pull/19274)] -* cli/pki: Properly report permission issues within health-check mount tune checks [[GH-19276](https://github.com/hashicorp/vault/pull/19276)] -* cli/transit: Fix import, import-version command invocation [[GH-19373](https://github.com/hashicorp/vault/pull/19373)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] -* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] -* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] -* core (enterprise): Fix missing quotation mark in error message -* core (enterprise): Fix panic that could occur with SSCT alongside invoking external plugins for revocation. -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. -* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] -* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] -* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] -* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] -* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] -* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] -* core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided [[GH-18859](https://github.com/hashicorp/vault/pull/18859)] -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] -* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths [[GH-18568](https://github.com/hashicorp/vault/pull/18568)] -* core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy [[GH-17514](https://github.com/hashicorp/vault/pull/17514)] -* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] -* core: Linux packages now have vendor label and set the default label to HashiCorp. -This fix is implemented for any future releases, but will not be updated for historical releases. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* core: Refactor lock grabbing code to simplify stateLock deadlock investigations [[GH-17187](https://github.com/hashicorp/vault/pull/17187)] -* core: fix GPG encryption to support subkeys. [[GH-16224](https://github.com/hashicorp/vault/pull/16224)] -* core: fix a start up race condition where performance standbys could go into a -mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: fix race when using SystemView.ReplicationState outside of a request context [[GH-17186](https://github.com/hashicorp/vault/pull/17186)] -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* core: trying to unseal with the wrong key now returns HTTP 400 [[GH-17836](https://github.com/hashicorp/vault/pull/17836)] -* credential/cert: adds error message if no tls connection is found during the AliasLookahead operation [[GH-17904](https://github.com/hashicorp/vault/pull/17904)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] -* kmip (enterprise): Fix a problem with some multi-part MAC Verify operations. -* kmip (enterprise): Only require data to be full blocks on encrypt/decrypt operations using CBC and ECB block cipher modes. -* license (enterprise): Fix bug where license would update even if the license didn't change. -* licensing (enterprise): update autoloaded license cache after reload -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] -* openapi: fix gen_openapi.sh script to correctly load vault plugins [[GH-17752](https://github.com/hashicorp/vault/pull/17752)] -* plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. [[GH-17339](https://github.com/hashicorp/vault/pull/17339)] -* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] -* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] -* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] -* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] -* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/ad: Fix bug where updates to config would fail if password isn't provided [[GH-19061](https://github.com/hashicorp/vault/pull/19061)] -* secrets/gcp: fix issue where IAM bindings were not preserved during policy update [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] -* secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys [[GH-19111](https://github.com/hashicorp/vault/pull/19111)] -* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] -* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] -* secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. [[GH-16700](https://github.com/hashicorp/vault/pull/16700)] -* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] -* secrets/pki: consistently use UTC for CA's notAfter exceeded error message [[GH-18984](https://github.com/hashicorp/vault/pull/18984)] -* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] -* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* storage/raft: Fix race with follower heartbeat tracker during teardown. [[GH-18704](https://github.com/hashicorp/vault/pull/18704)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] -* ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. [[GH-19290](https://github.com/hashicorp/vault/pull/19290)] -* ui: Remove default value of 30 to TtlPicker2 if no value is passed in. [[GH-17376](https://github.com/hashicorp/vault/pull/17376)] -* ui: allow selection of "default" for ssh algorithm_signer in web interface [[GH-17894](https://github.com/hashicorp/vault/pull/17894)] -* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] -* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19403](https://github.com/hashicorp/vault/pull/19403)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - -## 1.12.11 -### September 13, 2023 - -SECURITY: - -* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. [[GH-22852](https://github.com/hashicorp/vault/pull/22852)] - -IMPROVEMENTS: - -* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] -* kmip (enterprise): reduce latency of KMIP operation handling - -BUG FIXES: - -* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] -* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] -* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] -* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] -* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable - -## 1.12.10 -### August 30, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.12. - -IMPROVEMENTS: - -* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] -* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. -* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] -* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] - -BUG FIXES: - -* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] -* core (enterprise): Remove MFA Configuration for namespace when deleting namespace -* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. -Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] -* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] -* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] -* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] -* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] -* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] -* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath -* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. -* sdk/ldaputil: Properly escape user filters when using UPN domains -sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] -* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22332](https://github.com/hashicorp/vault/pull/22332)] -* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute -* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] -* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] - -## 1.12.9 -### July 25, 2023 - -SECURITY: - -* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] - -CHANGES: - -* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. - -IMPROVEMENTS: - -* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. -* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary -* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling - -BUG FIXES: - -* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] -* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] -* replication (enterprise): update primary cluster address after DR failover -* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21633](https://github.com/hashicorp/vault/pull/21633)] -* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] -* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] -* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present -* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required -* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] -* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] - -## 1.12.8 -### June 21, 2023 -BREAKING CHANGES: - -* secrets/pki: Maintaining running count of certificates will be turned off by default. -To re-enable keeping these metrics available on the tidy status endpoint, enable -maintain_stored_certificate_counts on tidy-config, to also publish them to the -metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] - -CHANGES: - -* core: Bump Go version to 1.19.10. - -FEATURES: - -* **Automated License Utilization Reporting**: Added automated license -utilization reporting, which sends minimal product-license [metering -data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) -to HashiCorp without requiring you to manually collect and report them. -* core (enterprise): Add background worker for automatic reporting of billing -information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] - -IMPROVEMENTS: - -* api: GET ... /sys/internal/counters/activity?current_billing_period=true now -results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] -* api: `/sys/internal/counters/config` endpoint now contains read-only -`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] -* api: `/sys/internal/counters/config` endpoint now contains read-only -`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] -* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] -* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): vault server command now allows for opt-out of automated -reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] -* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] -* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] -* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] - -BUG FIXES: - -* core (enterprise): Don't delete backend stored data that appears to be filterable -on this secondary if we don't have a corresponding mount entry. -* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] -* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] -* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] -* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] -* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] -* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] -* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will -have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] -* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] -* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] -* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] -* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] -* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover -* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs -* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] - -## 1.12.7 -### June 08, 2023 - -SECURITY: - -* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] - -CHANGES: - -* core: Bump Go version to 1.19.9. -* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] - -IMPROVEMENTS: - -* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] -* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when -`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] -* core: include namespace path in granting_policies block of audit log -* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] -* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] -* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] -* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] - -BUG FIXES: - -* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] -* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] -* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] -* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] -* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. -* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. -* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace -* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] -* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. -* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. -* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation -* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. -secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. -sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] -* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] - -## 1.12.6 -### April 26, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.8. - -IMPROVEMENTS: - -* cli/namespace: Add detailed flag to output additional namespace information -such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] -* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] -* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the -`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] -* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration -for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] -* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] - -BUG FIXES: - -* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] -* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] -* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. -* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur -* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` -resulting in 412 errors. -* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] -* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] -* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. -* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] -* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] -* replication (enterprise): Fix a caching issue when replicating filtered data to -a performance secondary. This resulted in the data being set to nil in the cache -and a "invalid value" error being returned from the API. -* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil -* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] -* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens -* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] -* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] -* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] -* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] - -## 1.12.5 -### March 29, 2023 - -SECURITY: - -* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] -* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] -* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] - -IMPROVEMENTS: - -* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id -website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] -* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch -option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] -* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] - -BUG FIXES: - -* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] -* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. -* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] -* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. -* kmip (enterprise): Fix a problem forwarding some requests to the active node. -* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] -* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19641](https://github.com/hashicorp/vault/pull/19641)] -* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions -* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] -* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] -* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] - -## 1.12.4 -### March 01, 2023 - -SECURITY: -* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] - -CHANGES: - -* core: Bump Go version to 1.19.6. - -IMPROVEMENTS: - -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] -* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] - -BUG FIXES: - -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* license (enterprise): Fix bug where license would update even if the license didn't change. -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18207](https://github.com/hashicorp/vault/pull/18207)] -* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] -* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19410](https://github.com/hashicorp/vault/pull/19410)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - -## 1.12.3 -### February 6, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.4. - -IMPROVEMENTS: - -* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] -* plugins: Let Vault unseal and mount deprecated builtin plugins in a -deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] -* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] -* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] - -BUG FIXES: - -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] -* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. -* licensing (enterprise): update autoloaded license cache after reload -* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] -* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] -* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] -* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] -* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] -* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] -## 1.12.2 -### November 30, 2022 - -CHANGES: - -* core: Bump Go version to 1.19.3. -* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] - -IMPROVEMENTS: - -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] -* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] - -BUG FIXES: - -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] -* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18086](https://github.com/hashicorp/vault/pull/18086)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18111](https://github.com/hashicorp/vault/pull/18111)] -* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - -## 1.12.1 -### November 2, 2022 - -IMPROVEMENTS: - -* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] -* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] -* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] - -BUG FIXES: - -* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility -* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] -* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. -* kmip (enterprise): Fix selection of Cryptographic Parameters for Encrypt/Decrypt operations. -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - -## 1.12.0 -### October 13, 2022 - -SECURITY: - -* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] - -CHANGES: - -* api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] -* auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] -* auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint reports an error for `Pending Removal` auth methods. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] -* core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] -* core: Bump Go version to 1.19.2. -* core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] -* identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] -* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades will not be allowed if the license expiration time is before the build date of the binary. -* plugins: Add plugin version to auth register, list, and mount table [[GH-16856](https://github.com/hashicorp/vault/pull/16856)] -* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* plugins: `plugin info` displays deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] -* secrets/azure: Removed deprecated AAD graph API support from the secrets engine. [[GH-17180](https://github.com/hashicorp/vault/pull/17180)] -* secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. [[GH-17038](https://github.com/hashicorp/vault/pull/17038)] -* secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] -* secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint reports an error for `Pending Removal` secrets engines. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] - -FEATURES: - -* **GCP Cloud KMS support for managed keys**: Managed keys now support using GCP Cloud KMS keys -* **LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out functionality for all supported schemas. [[GH-17152](https://github.com/hashicorp/vault/pull/17152)] -* **OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] -* **Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user roles and root credential rotation on a stand alone Redis server. [[GH-17070](https://github.com/hashicorp/vault/pull/17070)] -* **Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. [[GH-17075](https://github.com/hashicorp/vault/pull/17075)] -* **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] -* **Transform Key Import (BYOK)**: The transform secrets engine now supports importing keys for tokenization and FPE transformations -* HCP (enterprise): Adding foundational support for self-managed vault nodes to securely communicate with [HashiCorp Cloud Platform](https://cloud.hashicorp.com) as an opt-in feature -* ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] -* **Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. - -IMPROVEMENTS: - -* core/managed-keys (enterprise): Allow operators to specify PSS signatures and/or hash algorithm for the test/sign api -* activity (enterprise): Added new clients unit tests to test accuracy of estimates -* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] -* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] -* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] -* agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] -* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] -* api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] -* api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] -* auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. [[GH-17251](https://github.com/hashicorp/vault/pull/17251)] -* auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. -When either the ttl and num_uses fields are not specified, the role's configuration is used. [[GH-14474](https://github.com/hashicorp/vault/pull/14474)] -* auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] -* auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. [[GH-17194](https://github.com/hashicorp/vault/pull/17194)] -* auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] -* auth/cert: Operators can now specify a CRL distribution point URL, in which case the cert auth engine will fetch and use the CRL from that location rather than needing to push CRLs directly to auth/cert. [[GH-17136](https://github.com/hashicorp/vault/pull/17136)] -* auth/cf: Enables CF roles to be compatible with Vault's role based quotas. [[GH-17196](https://github.com/hashicorp/vault/pull/17196)] -* auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] -* auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17160](https://github.com/hashicorp/vault/pull/17160)] -* auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] -* auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] -* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the Kerberos config in Vault. This removes any instance names found in the keytab service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] -* auth/oci: Add support for role resolution. [[GH-17212](https://github.com/hashicorp/vault/pull/17212)] -* auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] -* cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] -* cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] -* cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] -* command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] -* command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] -* command: Fix shell completion for KV v2 mounts [[GH-16553](https://github.com/hashicorp/vault/pull/16553)] -* core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command -* core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. -* core (enterprise): Add custom metadata support for namespaces -* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] -* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] -* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] -* core/quotas (enterprise): Added ability to add path suffixes for lease-count resource quotas -* core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role -* core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] -* core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] -* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* core: Handle and log deprecated builtin mounts. Introduces `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when attempting to mount `Pending Removal` builtin plugins. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] -* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] -* core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] -* core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] -* docs: Clarify the behaviour of local mounts in the context of DR replication [[GH-16218](https://github.com/hashicorp/vault/pull/16218)] -* identity/oidc: Adds support for detailed listing of clients and providers. [[GH-16567](https://github.com/hashicorp/vault/pull/16567)] -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] -* identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] -* identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] -* physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] -* plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins [[GH-16995](https://github.com/hashicorp/vault/pull/16995)] -* plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] -* plugins: Added environment variable flag to opt-out specific plugins from multiplexing [[GH-16972](https://github.com/hashicorp/vault/pull/16972)] -* plugins: Adding version to plugin GRPC interface [[GH-17088](https://github.com/hashicorp/vault/pull/17088)] -* plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] -* replication (enterprise): Fix race in merkle sync that can prevent streaming by returning key value matching provided hash if found in log shipper buffer. -* secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] -* secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] -* secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] -* secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). [[GH-15996](https://github.com/hashicorp/vault/pull/15996)] -* secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] -* secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] -* secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] -* secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. [[GH-17045](https://github.com/hashicorp/vault/pull/17045)] -* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] -* secrets/database/snowflake: Add multiplexing support [[GH-17159](https://github.com/hashicorp/vault/pull/17159)] -* secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17174](https://github.com/hashicorp/vault/pull/17174)] -* secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. [[GH-17199](https://github.com/hashicorp/vault/pull/17199)] -* secrets/kubernetes: upgrade to v0.2.0 [[GH-17164](https://github.com/hashicorp/vault/pull/17164)] -* secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] -* secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field [[GH-16935](https://github.com/hashicorp/vault/pull/16935)] -* secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL [[GH-17073](https://github.com/hashicorp/vault/pull/17073)] -* secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. [[GH-16958](https://github.com/hashicorp/vault/pull/16958)] -* secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] -* secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] -* secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] -* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] -* secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. [[GH-16676](https://github.com/hashicorp/vault/pull/16676)] -* secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] -* secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] -* secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] -* secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. [[GH-16249](https://github.com/hashicorp/vault/pull/16249)] -* secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. [[GH-16874](https://github.com/hashicorp/vault/pull/16874)] -* secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. [[GH-16773](https://github.com/hashicorp/vault/pull/16773)] -* secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] -* secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] -* secrets/transit: Add a dedicated HMAC key type, which can be used with key import. [[GH-16668](https://github.com/hashicorp/vault/pull/16668)] -* secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to override the HTTP response code in case of partial user-input failures. [[GH-17118](https://github.com/hashicorp/vault/pull/17118)] -* secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. [[GH-16549](https://github.com/hashicorp/vault/pull/16549)] -* ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] -* storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] -* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] -* ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] -* ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] -* ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] -* ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] -* ui: Replaces non-inclusive terms [[GH-17116](https://github.com/hashicorp/vault/pull/17116)] -* ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] -* website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] -* website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period [[GH-16950](https://github.com/hashicorp/vault/pull/16950)] -* website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] -* website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] -* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] - -BUG FIXES: - -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Agent will now respect `max_retries` retry configuration even when caching is set. [[GH-16970](https://github.com/hashicorp/vault/pull/16970)] -* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] -* api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] -* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] -* api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] -* api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] -* core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases -* core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Fix panic when the plugin catalog returns neither a plugin nor an error. [[GH-17204](https://github.com/hashicorp/vault/pull/17204)] -* core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] -* debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] -* quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* replication (enterprise): Fix data race in saveCheckpoint. -* replication (enterprise): Fix possible data race during merkle diff/sync -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] -* secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] -* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] -* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] -* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. [[GH-17019](https://github.com/hashicorp/vault/pull/17019)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] -* ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] -* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] -* vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] - -## 1.11.12 -### June 21, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.10. -* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades -will not be allowed if the license termination time is before the build date of the binary. - -FEATURES: - -* **Automated License Utilization Reporting**: Added automated license -utilization reporting, which sends minimal product-license [metering -data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) -to HashiCorp without requiring you to manually collect and report them. -* core (enterprise): Add background worker for automatic reporting of billing -information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] - -IMPROVEMENTS: - -* api: GET ... /sys/internal/counters/activity?current_billing_period=true now -results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] -* api: `/sys/internal/counters/config` endpoint now contains read-only -`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] -* api: `/sys/internal/counters/config` endpoint now contains read-only -`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] -* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] -* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] -* core (enterprise): vault server command now allows for opt-out of automated -reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] -* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] -* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] -* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] -* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] -* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] -* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] -* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] -* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] -* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] - -BUG FIXES: - -* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] -* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] -* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] -* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] -* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] -* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] -* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will -have its own changelog entry. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] -* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] -* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] -* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] -* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] -* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover -* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs - -## 1.11.11 -### June 08, 2023 - -SECURITY: - -* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] - -CHANGES: - -* core: Bump Go version to 1.19.9. -* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] - -IMPROVEMENTS: - -* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when -`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] -* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] -* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] - -BUG FIXES: - -* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] -* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] -* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] -* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. -* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. -* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace -* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] -* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. -* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. -* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation - -## 1.11.10 -### April 26, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.8. - -IMPROVEMENTS: - -* cli/namespace: Add detailed flag to output additional namespace information -such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] -* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] -* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the -`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] -* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration -for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] - -BUG FIXES: - -* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] -* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. -* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur -* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` -resulting in 412 errors. -* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] -* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] -* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] -* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil -* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] -* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens -* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] -* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] -* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] -* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] - -## 1.11.9 -### March 29, 2023 - -SECURITY: - -* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] -* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] -* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] - -IMPROVEMENTS: - -* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id -website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] -* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch -option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] -* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] - -BUG FIXES: - -* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#190](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/190)] [[GH-19720](https://github.com/hashicorp/vault/pull/19720)] -* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] -* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. -* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] -* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] -* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions -* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] -* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] -* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] - -## 1.11.8 -### March 01, 2023 - -SECURITY: - -* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] - -CHANGES: - -* core: Bump Go version to 1.19.6. - -IMPROVEMENTS: - -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] - -BUG FIXES: - -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* license (enterprise): Fix bug where license would update even if the license didn't change. -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18208](https://github.com/hashicorp/vault/pull/18208)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] -* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] - -## 1.11.7 -### February 6, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.4. - -IMPROVEMENTS: - -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] - -BUG FIXES: - -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. -* licensing (enterprise): update autoloaded license cache after reload -* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] -* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] -* storage/raft (enterprise): An already joined node can rejoin by wiping storage -and re-issueing a join request, but in doing so could transiently become a -non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - -## 1.11.6 -### November 30, 2022 - -IMPROVEMENTS: - -* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] - -BUG FIXES: - -* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18085](https://github.com/hashicorp/vault/pull/18085)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18110](https://github.com/hashicorp/vault/pull/18110)] -* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - -## 1.11.5 -### November 2, 2022 - -IMPROVEMENTS: - -* database/snowflake: Allow parallel requests to Snowflake [[GH-17594](https://github.com/hashicorp/vault/pull/17594)] -* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] - -BUG FIXES: - -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17384](https://github.com/hashicorp/vault/pull/17384)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - -## 1.11.4 -### September 30, 2022 - -SECURITY: - -* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] - -IMPROVEMENTS: - -* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] -* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] - -BUG FIXES: - -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17162](https://github.com/hashicorp/vault/pull/17162)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] -* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] - -## 1.11.3 -### August 31, 2022 - -SECURITY: - -* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -CHANGES: - -* core: Bump Go version to 1.17.13. - -IMPROVEMENTS: - -* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] -* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the -Kerberos config in Vault. This removes any instance names found in the keytab -service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] -* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] - -BUG FIXES: - -* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] -* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16523](https://github.com/hashicorp/vault/pull/16523)] -* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails -* database/elasticsearch: Fixes a bug in boolean parsing for initialize [[GH-16526](https://github.com/hashicorp/vault/pull/16526)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] -* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] -* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] - -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -## 1.11.2 -### August 2, 2022 - -IMPROVEMENTS: - -* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] - -BUG FIXES: - -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] - -## 1.11.1 -### July 21, 2022 - -SECURITY: - -* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - -CHANGES: - -* core: Bump Go version to 1.17.12. - -IMPROVEMENTS: - -* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] - -BUG FIXES: - -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* kmip (enterprise): Return SecretData as supported Object Type. -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] - -SECURITY: - -* storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - -## 1.11.0 -### June 20, 2022 - -CHANGES: - -* auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication [[GH-14954](https://github.com/hashicorp/vault/pull/14954)] -* auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] -* auth: Remove support for legacy MFA -(https://www.vaultproject.io/docs/v1.10.x/auth/mfa) [[GH-14869](https://github.com/hashicorp/vault/pull/14869)] -* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.11. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] -* database & storage: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx). This change affects Redshift & Postgres database secrets engines, and CockroachDB & Postgres storage engines [[GH-15343](https://github.com/hashicorp/vault/pull/15343)] -* licensing (enterprise): Remove support for stored licenses and associated `sys/license` and `sys/license/signed` -endpoints in favor of [autoloaded licenses](https://www.vaultproject.io/docs/enterprise/license/autoloading). -* replication (enterprise): The `/sys/replication/performance/primary/mount-filter` endpoint has been removed. Please use [Paths Filter](https://www.vaultproject.io/api-docs/system/replication/replication-performance#create-paths-filter) instead. -* secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA [[GH-15478](https://github.com/hashicorp/vault/pull/15478)] -* secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters [[GH-15655](https://github.com/hashicorp/vault/pull/15655)] -* secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) -providing the same functionality as the existing API(/pki/root/sign-self-issued) -does not require sudo capabilities but the latter still requires it in an -effort to maintain backwards compatibility. [[GH-15211](https://github.com/hashicorp/vault/pull/15211)] -* secrets/pki: Err on unknown role during sign-verbatim. [[GH-15543](https://github.com/hashicorp/vault/pull/15543)] -* secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead -of a v1 CRL. [[GH-15100](https://github.com/hashicorp/vault/pull/15100)] -* secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) -and signing APIs will now include the root CA certificate if the mount is -aware of it. [[GH-15155](https://github.com/hashicorp/vault/pull/15155)] -* secrets/pki: existing Delete Root API (pki/root) will now delete all issuers -and keys within the mount path. [[GH-15004](https://github.com/hashicorp/vault/pull/15004)] -* secrets/pki: existing Generate Root (pki/root/generate/:type), -Set Signed Intermediate (/pki/intermediate/set-signed) APIs will -add new issuers/keys to a mount instead of warning that an existing CA exists [[GH-14975](https://github.com/hashicorp/vault/pull/14975)] -* secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain -response field along with the issuer's ca chain. [[GH-15524](https://github.com/hashicorp/vault/pull/15524)] -* ui: Upgrade Ember to version 3.28 [[GH-14763](https://github.com/hashicorp/vault/pull/14763)] - -FEATURES: - -* **Autopilot Improvements (Enterprise)**: Autopilot on Vault Enterprise now supports automated upgrades and redundancy zones when using integrated storage. -* **KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine [[GH-15523](https://github.com/hashicorp/vault/pull/15523)] -* **Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. [[GH-15551](https://github.com/hashicorp/vault/pull/15551)] -* **Non-Disruptive Intermediate/Root Certificate Rotation**: This allows -import, generation and configuration of any number of keys and/or issuers -within a PKI mount, providing operators the ability to rotate certificates -in place without affecting existing client configurations. [[GH-15277](https://github.com/hashicorp/vault/pull/15277)] -* **Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] -* **Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. [[GH-15376](https://github.com/hashicorp/vault/pull/15376)] -* **Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. [[GH-15414](https://github.com/hashicorp/vault/pull/15414)] -* nomad: Bootstrap Nomad ACL system if no token is provided [[GH-12451](https://github.com/hashicorp/vault/pull/12451)] -* storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. [[GH-15054](https://github.com/hashicorp/vault/pull/15054)] - -IMPROVEMENTS: - -* activity: return nil response months in activity log API when no month data exists [[GH-15420](https://github.com/hashicorp/vault/pull/15420)] -* agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. [[GH-15204](https://github.com/hashicorp/vault/pull/15204)] -* agent: Update consul-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] -* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] -* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* api: Add ability to pass certificate as PEM bytes to api.Client. [[GH-14753](https://github.com/hashicorp/vault/pull/14753)] -* api: Add context-aware functions to vault/api for each API wrapper function. [[GH-14388](https://github.com/hashicorp/vault/pull/14388)] -* api: Added MFALogin() for handling MFA flow when using login helpers. [[GH-14900](https://github.com/hashicorp/vault/pull/14900)] -* api: If the parameters supplied over the API payload are ignored due to not -being what the endpoints were expecting, or if the parameters supplied get -replaced by the values in the endpoint's path itself, warnings will be added to -the non-empty responses listing all the ignored and replaced parameters. [[GH-14962](https://github.com/hashicorp/vault/pull/14962)] -* api: KV helper methods to simplify the common use case of reading and writing KV secrets [[GH-15305](https://github.com/hashicorp/vault/pull/15305)] -* api: Provide a helper method WithNamespace to create a cloned client with a new NS [[GH-14963](https://github.com/hashicorp/vault/pull/14963)] -* api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. [[GH-15377](https://github.com/hashicorp/vault/pull/15377)] -* api: Use the context passed to the api/auth Login helpers. [[GH-14775](https://github.com/hashicorp/vault/pull/14775)] -* api: make ListPlugins parse only known plugin types [[GH-15434](https://github.com/hashicorp/vault/pull/15434)] -* audit: Add a policy_results block into the audit log that contains the set of -policies that granted this request access. [[GH-15457](https://github.com/hashicorp/vault/pull/15457)] -* audit: Include mount_accessor in audit request and response logs [[GH-15342](https://github.com/hashicorp/vault/pull/15342)] -* audit: added entity_created boolean to audit log, set when login operations create an entity [[GH-15487](https://github.com/hashicorp/vault/pull/15487)] -* auth/aws: Add rsa2048 signature type to API [[GH-15719](https://github.com/hashicorp/vault/pull/15719)] -* auth/gcp: Enable the Google service endpoints used by the underlying client to be customized [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] -* auth/gcp: Vault CLI now infers the service account email when running on Google Cloud [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] -* auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. [[GH-15593](https://github.com/hashicorp/vault/pull/15593)] -* auth/okta: Add support for Google provider TOTP type in the Okta auth method [[GH-14985](https://github.com/hashicorp/vault/pull/14985)] -* auth/okta: Add support for performing [the number -challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) -during an Okta Verify push challenge [[GH-15361](https://github.com/hashicorp/vault/pull/15361)] -* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] -* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] -* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] -* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] -* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] -* cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data [[GH-14807](https://github.com/hashicorp/vault/pull/14807)] -* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] -* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. [[GH-15405](https://github.com/hashicorp/vault/pull/15405)] -* command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. [[GH-15417](https://github.com/hashicorp/vault/pull/15417)] -* core (enterprise): Include `termination_time` in `sys/license/status` response -* core (enterprise): Include termination time in `license inspect` command output -* core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. [[GH-15213](https://github.com/hashicorp/vault/pull/15213)] -* core/activity: Order month data in ascending order of timestamps [[GH-15259](https://github.com/hashicorp/vault/pull/15259)] -* core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data [[GH-15352](https://github.com/hashicorp/vault/pull/15352)] -* core/managed-keys (enterprise): Allow configuring the number of parallel operations to PKCS#11 managed keys. -* core: Add an export API for historical activity log data [[GH-15586](https://github.com/hashicorp/vault/pull/15586)] -* core: Add new DB methods that do not prepare statements. [[GH-15166](https://github.com/hashicorp/vault/pull/15166)] -* core: check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] -* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] -* core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. [[GH-14957](https://github.com/hashicorp/vault/pull/14957)] -* core: Upgrade github.org/x/crypto/ssh [[GH-15125](https://github.com/hashicorp/vault/pull/15125)] -* kmip (enterprise): Implement operations Query, Import, Encrypt and Decrypt. Improve operations Locate, Add Attribute, Get Attributes and Get Attribute List to handle most supported attributes. -* mfa/okta: migrate to use official Okta SDK [[GH-15355](https://github.com/hashicorp/vault/pull/15355)] -* sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. [[GH-14217](https://github.com/hashicorp/vault/pull/14217)] -* secrets/consul: Add support for Consul node-identities and service-identities [[GH-15295](https://github.com/hashicorp/vault/pull/15295)] -* secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. [[GH-10751](https://github.com/hashicorp/vault/pull/10751)] -* secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. [[GH-15614](https://github.com/hashicorp/vault/pull/15614)] -* secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. [[GH-14178](https://github.com/hashicorp/vault/pull/14178)] -* secrets/pki: Add support for CPS URLs and User Notice to Policy Information [[GH-15751](https://github.com/hashicorp/vault/pull/15751)] -* secrets/pki: Allow operators to control the issuing certificate behavior when -the requested TTL is beyond the NotAfter value of the signing certificate [[GH-15152](https://github.com/hashicorp/vault/pull/15152)] -* secrets/pki: Always return CRLs, URLs configurations, even if using the default value. [[GH-15470](https://github.com/hashicorp/vault/pull/15470)] -* secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) [[GH-15510](https://github.com/hashicorp/vault/pull/15510)] -* secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role [[GH-15429](https://github.com/hashicorp/vault/pull/15429)] -* secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). [[GH-15494](https://github.com/hashicorp/vault/pull/15494)] -* secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). [[GH-15509](https://github.com/hashicorp/vault/pull/15509)] -* secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. [[GH-14292](https://github.com/hashicorp/vault/pull/14292)] -* secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. [[GH-15440](https://github.com/hashicorp/vault/pull/15440)] -* secrets/ssh: Support for `add_before_duration` in SSH [[GH-15250](https://github.com/hashicorp/vault/pull/15250)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer -* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] -* ui: Default auto-rotation period in transit is 30 days [[GH-15474](https://github.com/hashicorp/vault/pull/15474)] -* ui: Parse schema refs from OpenAPI [[GH-14508](https://github.com/hashicorp/vault/pull/14508)] -* ui: Remove stored license references [[GH-15513](https://github.com/hashicorp/vault/pull/15513)] -* ui: Remove storybook. [[GH-15074](https://github.com/hashicorp/vault/pull/15074)] -* ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. [[GH-14659](https://github.com/hashicorp/vault/pull/14659)] -* website/docs: Add usage documentation for Kubernetes Secrets Engine [[GH-15527](https://github.com/hashicorp/vault/pull/15527)] -* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] - -DEPRECATIONS: - -* docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 [[GH-15581](https://github.com/hashicorp/vault/pull/15581)] -* secrets/consul: Deprecate old parameters "token_type" and "policy" [[GH-15550](https://github.com/hashicorp/vault/pull/15550)] -* secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency [[GH-15400](https://github.com/hashicorp/vault/pull/15400)] - -BUG FIXES: - -* Fixed panic when adding or modifying a Duo MFA Method in Enterprise -* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning [[GH-14968](https://github.com/hashicorp/vault/pull/14968)] -* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* auth/kubernetes: Fix error code when using the wrong service account [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] -* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] -* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* cli: kv get command now honors trailing spaces to retrieve secrets [[GH-15188](https://github.com/hashicorp/vault/pull/15188)] -* command: do not report listener and storage types as key not found warnings [[GH-15383](https://github.com/hashicorp/vault/pull/15383)] -* core (enterprise): Allow local alias create RPCs to persist alias metadata -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core (enterprise): Fix some races in merkle index flushing code found in testing -* core (enterprise): Handle additional edge cases reinitializing PKCS#11 libraries after login errors. -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number -* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] -* core: Fix double counting for "route" metrics [[GH-12763](https://github.com/hashicorp/vault/pull/12763)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] -* core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic [[GH-15452](https://github.com/hashicorp/vault/pull/15452)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* identity: deduplicate policies when creating/updating identity groups [[GH-15055](https://github.com/hashicorp/vault/pull/15055)] -* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] -* plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. [[GH-15579](https://github.com/hashicorp/vault/pull/15579)] -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] -* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] -* sdk/cidrutil: Only check if cidr contains remote address for IP addresses [[GH-14487](https://github.com/hashicorp/vault/pull/14487)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] -* sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter [[GH-15163](https://github.com/hashicorp/vault/pull/15163)] -* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] -* secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. [[GH-15792](https://github.com/hashicorp/vault/pull/15792)] -* secrets/pki: CRLs on performance secondary clusters are now automatically -rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hashicorp/vault/pull/15179)] -* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] -* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] -* secrets/ssh: Convert role field not_before_duration to seconds before returning it [[GH-15559](https://github.com/hashicorp/vault/pull/15559)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* storage/raft: Forward autopilot state requests on perf standbys to active node. [[GH-15493](https://github.com/hashicorp/vault/pull/15493)] -* storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process [[GH-15519](https://github.com/hashicorp/vault/pull/15519)] -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities [[GH-14794](https://github.com/hashicorp/vault/pull/14794)] -* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Fix issue with KV not recomputing model when you changed versions. [[GH-14941](https://github.com/hashicorp/vault/pull/14941)] -* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] -* ui: Fixed unsupported revocation statements field for DB roles [[GH-15573](https://github.com/hashicorp/vault/pull/15573)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] -* ui: Revert using localStorage in favor of sessionStorage [[GH-15769](https://github.com/hashicorp/vault/pull/15769)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] -* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] -* ui: fix form validations ignoring default values and disabling submit button [[GH-15560](https://github.com/hashicorp/vault/pull/15560)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] - -## 1.10.11 -### March 01, 2023 - -SECURITY: - -* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] - -CHANGES: - -* core: Bump Go version to 1.19.6. - -IMPROVEMENTS: - -* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] - -BUG FIXES: - -* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] -* core (enterprise): Fix panic when using invalid accessor for control-group request -* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] -* replication (enterprise): Fix bug where reloading external plugin on a secondary would -break replication. -* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18209](https://github.com/hashicorp/vault/pull/18209)] -* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] -* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] - -## 1.10.10 -### February 6, 2023 - -CHANGES: - -* core: Bump Go version to 1.19.4. - -IMPROVEMENTS: - -* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] -* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. -* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] -* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] -* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] - -BUG FIXES: - -* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] -* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] -* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] -* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. -* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace -* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. -* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] -* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. -* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] -* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] -* identity (enterprise): Fix a data race when creating an entity for a local alias. -* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. -* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. -* licensing (enterprise): update autoloaded license cache after reload -* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. -* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] - -## 1.10.9 -### November 30, 2022 - -BUG FIXES: - -* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] -* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. -* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] -* core: fix a start up race condition where performance standbys could go into a - mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] -* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18084](https://github.com/hashicorp/vault/pull/18084)] -* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18109](https://github.com/hashicorp/vault/pull/18109)] -* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] - -## 1.10.8 -### November 2, 2022 - -BUG FIXES: - -* core/managed-keys (enterprise): Return better error messages when encountering key creation failures -* core/managed-keys (enterprise): fix panic when having `cache_disable` true -* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] -* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] -* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] -* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] -* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] -* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] - -## 1.10.7 -### September 30, 2022 - -SECURITY: - -* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] - -BUG FIXES: - -* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] -* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] -* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] -* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] -* replication (enterprise): Fix data race in SaveCheckpoint() -* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. -* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. -* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] - -## 1.10.6 -### August 31, 2022 - -SECURITY: - -* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -CHANGES: - -* core: Bump Go version to 1.17.13. - -IMPROVEMENTS: - -* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] - -BUG FIXES: - -* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16524](https://github.com/hashicorp/vault/pull/16524)] -* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] -* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] -* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] -* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] -* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] -* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the -Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] -* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] -* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] -* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] -* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] -* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] -* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] -* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] - -SECURITY: - -* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] - -## 1.10.5 -### July 21, 2022 - -SECURITY: - -* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] - -CHANGES: - -* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] -* core: Bump Go version to 1.17.12. - -IMPROVEMENTS: - -* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] -* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] - -BUG FIXES: - -* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] -* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] -* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty -* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] -* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] -* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] -* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] -* storage/raft (enterprise): Prevent unauthenticated voter status with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] -* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. -* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] -* ui: Revert using localStorage in favor of sessionStorage [[GH-16169](https://github.com/hashicorp/vault/pull/16169)] -* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] - -## 1.10.4 -### June 10, 2022 - -CHANGES: - -* core: Bump Go version to 1.17.11. [[GH-go-ver-1104](https://github.com/hashicorp/vault/pull/go-ver-1104)] - -IMPROVEMENTS: - -* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] -* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] -* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] -* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] -* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] -* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] -* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] - -BUG FIXES: - -* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] -* auth/kubernetes: Fix error code when using the wrong service account [[GH-15585](https://github.com/hashicorp/vault/pull/15585)] -* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set -has been fixed. The previous behavior would make a request to the LDAP server to -get `user_attr` before discarding it and using the username instead. This would -make it impossible for a user to connect if this attribute was missing or had -multiple values, even though it would not be used anyway. This has been fixed -and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] -* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] -* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] -* core (enterprise): Fix overcounting of lease count quota usage at startup. -* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] -* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] -* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] -* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] -* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. -* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. -* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] -* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] -* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] - -## 1.10.3 -### May 11, 2022 - -SECURITY: -* auth: A vulnerability was identified in Vault and Vault Enterprise (“Vault”) from 1.10.0 to 1.10.2 where MFA may not be enforced on user logins after a server restart. This vulnerability, CVE-2022-30689, was fixed in Vault 1.10.3. - -BUG FIXES: - -* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] -* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] -* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] - -## 1.10.2 -### April 29, 2022 - -BUG FIXES: - -* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] -* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] - -## 1.10.1 -### April 22, 2022 - -CHANGES: - -* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] -* core: Bump Go version to 1.17.9. [[GH-15044](https://github.com/hashicorp/vault/pull/15044)] - -IMPROVEMENTS: - -* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] -* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] -* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] -* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] -* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer - -BUG FIXES: - -* Fixed panic when adding or modifying a Duo MFA Method in Enterprise -* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] -* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] -* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] -* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] -* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] -* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] -* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] -* core (enterprise): Allow local alias create RPCs to persist alias metadata [[GH-changelog:_2747](https://github.com/hashicorp/vault/pull/changelog:_2747)] -* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number -* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] -* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] -* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] -* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] -* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] -* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] -* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] -* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] -* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] -* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] -* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] -* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] -* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] -* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] -* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] -* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] -* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] -* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] - -## 1.10.0 -### March 23, 2022 - -CHANGES: - -* core (enterprise): requests with newly generated tokens to perf standbys which are lagging behind the active node return http 412 instead of 400/403/50x. -* core: Changes the unit of `default_lease_ttl` and `max_lease_ttl` values returned by -the `/sys/config/state/sanitized` endpoint from nanoseconds to seconds. [[GH-14206](https://github.com/hashicorp/vault/pull/14206)] -* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] -* plugin/database: The return value from `POST /database/config/:name` has been updated to "204 No Content" [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] -* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft -Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] -* storage/etcd: Remove support for v2. [[GH-14193](https://github.com/hashicorp/vault/pull/14193)] -* ui: Upgrade Ember to version 3.24 [[GH-13443](https://github.com/hashicorp/vault/pull/13443)] - -FEATURES: - -* **Database plugin multiplexing**: manage multiple database connections with a single plugin process [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] -* **Login MFA**: Single and two phase MFA is now available when authenticating to Vault. [[GH-14025](https://github.com/hashicorp/vault/pull/14025)] -* **Mount Migration**: Vault supports moving secrets and auth mounts both within and across namespaces. -* **Postgres in the UI**: Postgres DB is now supported by the UI [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] -* **Report in-flight requests**: Adding a trace capability to show in-flight requests, and a new gauge metric to show the total number of in-flight requests [[GH-13024](https://github.com/hashicorp/vault/pull/13024)] -* **Server Side Consistent Tokens**: Service tokens have been updated to be longer (a minimum of 95 bytes) and token prefixes for all token types are updated from s., b., and r. to hvs., hvb., and hvr. for service, batch, and recovery tokens respectively. Vault clusters with integrated storage will now have read-after-write consistency by default. [[GH-14109](https://github.com/hashicorp/vault/pull/14109)] -* **Transit SHA-3 Support**: Add support for SHA-3 in the Transit backend. [[GH-13367](https://github.com/hashicorp/vault/pull/13367)] -* **Transit Time-Based Key Autorotation**: Add support for automatic, time-based key rotation to transit secrets engine, including in the UI. [[GH-13691](https://github.com/hashicorp/vault/pull/13691)] -* **UI Client Count Improvements**: Restructures client count dashboard, making use of billing start date to improve accuracy. Adds mount-level distribution and filtering. [[GH-client-counts](https://github.com/hashicorp/vault/pull/client-counts)] -* **Agent Telemetry**: The Vault Agent can now collect and return telemetry information at the `/agent/v1/metrics` endpoint. - -IMPROVEMENTS: - -* agent: Adds ability to configure specific user-assigned managed identities for Azure auto-auth. [[GH-14214](https://github.com/hashicorp/vault/pull/14214)] -* agent: The `agent/v1/quit` endpoint can now be used to stop the Vault Agent remotely [[GH-14223](https://github.com/hashicorp/vault/pull/14223)] -* api: Allow cloning `api.Client` tokens via `api.Config.CloneToken` or `api.Client.SetCloneToken()`. [[GH-13515](https://github.com/hashicorp/vault/pull/13515)] -* api: Define constants for X-Vault-Forward and X-Vault-Inconsistent headers [[GH-14067](https://github.com/hashicorp/vault/pull/14067)] -* api: Implements Login method in Go client libraries for GCP and Azure auth methods [[GH-13022](https://github.com/hashicorp/vault/pull/13022)] -* api: Implements Login method in Go client libraries for LDAP auth methods [[GH-13841](https://github.com/hashicorp/vault/pull/13841)] -* api: Trim newline character from wrapping token in logical.Unwrap from the api package [[GH-13044](https://github.com/hashicorp/vault/pull/13044)] -* api: add api method for modifying raft autopilot configuration [[GH-12428](https://github.com/hashicorp/vault/pull/12428)] -* api: respect WithWrappingToken() option during AppRole login authentication when used with secret ID specified from environment or from string [[GH-13241](https://github.com/hashicorp/vault/pull/13241)] -* audit: The audit logs now contain the port used by the client [[GH-12790](https://github.com/hashicorp/vault/pull/12790)] -* auth/aws: Enable region detection in the CLI by specifying the region as `auto` [[GH-14051](https://github.com/hashicorp/vault/pull/14051)] -* auth/cert: Add certificate extensions as metadata [[GH-13348](https://github.com/hashicorp/vault/pull/13348)] -* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] -* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13595](https://github.com/hashicorp/vault/pull/13595)] -* auth/ldap: Add a response warning and server log whenever the config is accessed -if `userfilter` doesn't consider `userattr` [[GH-14095](https://github.com/hashicorp/vault/pull/14095)] -* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] -* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] -* auth/okta: Update [okta-sdk-golang](https://github.com/okta/okta-sdk-golang) dependency to version v2.9.1 for improved request backoff handling [[GH-13439](https://github.com/hashicorp/vault/pull/13439)] -* auth/token: The `auth/token/revoke-accessor` endpoint is now idempotent and will -not error out if the token has already been revoked. [[GH-13661](https://github.com/hashicorp/vault/pull/13661)] -* auth: reading `sys/auth/:path` now returns the configuration for the auth engine mounted at the given path [[GH-12793](https://github.com/hashicorp/vault/pull/12793)] -* cli: interactive CLI for login mfa [[GH-14131](https://github.com/hashicorp/vault/pull/14131)] -* command (enterprise): "vault license get" now uses non-deprecated endpoint /sys/license/status -* core/ha: Add new mechanism for keeping track of peers talking to active node, and new 'operator members' command to view them. [[GH-13292](https://github.com/hashicorp/vault/pull/13292)] -* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] -* core/pki: Support Y10K value in notAfter field to be compliant with IEEE 802.1AR-2018 standard [[GH-12795](https://github.com/hashicorp/vault/pull/12795)] -* core/pki: Support Y10K value in notAfter field when signing non-CA certificates [[GH-13736](https://github.com/hashicorp/vault/pull/13736)] -* core: Add duration and start_time to completed requests log entries [[GH-13682](https://github.com/hashicorp/vault/pull/13682)] -* core: Add support to list password policies at `sys/policies/password` [[GH-12787](https://github.com/hashicorp/vault/pull/12787)] -* core: Add support to list version history via API at `sys/version-history` and via CLI with `vault version-history` [[GH-13766](https://github.com/hashicorp/vault/pull/13766)] -* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] -* core: Periodically test the health of connectivity to auto-seal backends [[GH-13078](https://github.com/hashicorp/vault/pull/13078)] -* core: Reading `sys/mounts/:path` now returns the configuration for the secret engine at the given path [[GH-12792](https://github.com/hashicorp/vault/pull/12792)] -* core: Replace "master key" terminology with "root key" [[GH-13324](https://github.com/hashicorp/vault/pull/13324)] -* core: Small changes to ensure goroutines terminate in tests [[GH-14197](https://github.com/hashicorp/vault/pull/14197)] -* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] -* core: Update github.com/prometheus/client_golang to fix security vulnerability CVE-2022-21698. [[GH-14190](https://github.com/hashicorp/vault/pull/14190)] -* core: Vault now supports the PROXY protocol v2. Support for UNKNOWN connections -has also been added to the PROXY protocol v1. [[GH-13540](https://github.com/hashicorp/vault/pull/13540)] -* http (enterprise): Serve /sys/license/status endpoint within namespaces -* identity/oidc: Adds a default OIDC provider [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds a default key for OIDC clients [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds an `allow_all` assignment that permits all entities to authenticate via an OIDC client [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] -* identity/oidc: Adds proof key for code exchange (PKCE) support to OIDC providers. [[GH-13917](https://github.com/hashicorp/vault/pull/13917)] -* sdk: Add helper for decoding root tokens [[GH-10505](https://github.com/hashicorp/vault/pull/10505)] -* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] -* secrets/consul: Add support for consul enterprise namespaces and admin partitions. [[GH-13850](https://github.com/hashicorp/vault/pull/13850)] -* secrets/consul: Add support for consul roles. [[GH-14014](https://github.com/hashicorp/vault/pull/14014)] -* secrets/database/influxdb: Switch/upgrade to the `influxdb1-client` module [[GH-12262](https://github.com/hashicorp/vault/pull/12262)] -* secrets/database: Add database configuration parameter 'disable_escaping' for username and password when connecting to a database. [[GH-13414](https://github.com/hashicorp/vault/pull/13414)] -* secrets/kv: add full secret path output to table-formatted responses [[GH-14301](https://github.com/hashicorp/vault/pull/14301)] -* secrets/kv: add patch support for KVv2 key metadata [[GH-13215](https://github.com/hashicorp/vault/pull/13215)] -* secrets/kv: add subkeys endpoint to retrieve a secret's stucture without its values [[GH-13893](https://github.com/hashicorp/vault/pull/13893)] -* secrets/pki: Add ability to fetch individual certificate as DER or PEM [[GH-10948](https://github.com/hashicorp/vault/pull/10948)] -* secrets/pki: Add count and duration metrics to PKI issue and revoke calls. [[GH-13889](https://github.com/hashicorp/vault/pull/13889)] -* secrets/pki: Add error handling for error types other than UserError or InternalError [[GH-14195](https://github.com/hashicorp/vault/pull/14195)] -* secrets/pki: Allow URI SAN templates in allowed_uri_sans when allowed_uri_sans_template is set to true. [[GH-10249](https://github.com/hashicorp/vault/pull/10249)] -* secrets/pki: Allow other_sans in sign-intermediate and sign-verbatim [[GH-13958](https://github.com/hashicorp/vault/pull/13958)] -* secrets/pki: Calculate the Subject Key Identifier as suggested in [RFC 5280, Section 4.2.1.2](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2). [[GH-11218](https://github.com/hashicorp/vault/pull/11218)] -* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] -* secrets/pki: Return complete chain (in `ca_chain` field) on calls to `pki/cert/ca_chain` [[GH-13935](https://github.com/hashicorp/vault/pull/13935)] -* secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs [[GH-13927](https://github.com/hashicorp/vault/pull/13927)] -* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] -* secrets/ssh: Add support for generating non-RSA SSH CAs [[GH-14008](https://github.com/hashicorp/vault/pull/14008)] -* secrets/ssh: Allow specifying multiple approved key lengths for a single algorithm [[GH-13991](https://github.com/hashicorp/vault/pull/13991)] -* secrets/ssh: Use secure default for algorithm signer (rsa-sha2-256) with RSA SSH CA keys on new roles [[GH-14006](https://github.com/hashicorp/vault/pull/14006)] -* secrets/transit: Don't abort transit encrypt or decrypt batches on single item failure. [[GH-13111](https://github.com/hashicorp/vault/pull/13111)] -* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] -* storage/raft: Set InitialMmapSize to 100GB on 64bit architectures [[GH-13178](https://github.com/hashicorp/vault/pull/13178)] -* storage/raft: When using retry_join stanzas, join against all of them in parallel. [[GH-13606](https://github.com/hashicorp/vault/pull/13606)] -* sys/raw: Enhance sys/raw to read and write values that cannot be encoded in json. [[GH-13537](https://github.com/hashicorp/vault/pull/13537)] -* ui: Add support for ECDSA and Ed25519 certificate views [[GH-13894](https://github.com/hashicorp/vault/pull/13894)] -* ui: Add version diff view for KV V2 [[GH-13000](https://github.com/hashicorp/vault/pull/13000)] -* ui: Added client side paging for namespace list view [[GH-13195](https://github.com/hashicorp/vault/pull/13195)] -* ui: Adds flight icons to UI [[GH-12976](https://github.com/hashicorp/vault/pull/12976)] -* ui: Adds multi-factor authentication support [[GH-14049](https://github.com/hashicorp/vault/pull/14049)] -* ui: Allow static role credential rotation in Database secrets engines [[GH-14268](https://github.com/hashicorp/vault/pull/14268)] -* ui: Display badge for all versions in secrets engine header [[GH-13015](https://github.com/hashicorp/vault/pull/13015)] -* ui: Swap browser localStorage in favor of sessionStorage [[GH-14054](https://github.com/hashicorp/vault/pull/14054)] -* ui: The integrated web terminal now accepts both `-f` and `--force` as aliases -for `-force` for the `write` command. [[GH-13683](https://github.com/hashicorp/vault/pull/13683)] -* ui: Transform advanced templating with encode/decode format support [[GH-13908](https://github.com/hashicorp/vault/pull/13908)] -* ui: Updates ember blueprints to glimmer components [[GH-13149](https://github.com/hashicorp/vault/pull/13149)] -* ui: customizes empty state messages for transit and transform [[GH-13090](https://github.com/hashicorp/vault/pull/13090)] - -BUG FIXES: - -* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] -* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] -* agent: Fixes bug where vault agent is unaware of the namespace in the config when wrapping token -* api/client: Fixes an issue where the `replicateStateStore` was being set to `nil` upon consecutive calls to `client.SetReadYourWrites(true)`. [[GH-13486](https://github.com/hashicorp/vault/pull/13486)] -* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] -* auth/approle: Fix wrapping of nil errors in `login` endpoint [[GH-14107](https://github.com/hashicorp/vault/pull/14107)] -* auth/github: Use the Organization ID instead of the Organization name to verify the org membership. [[GH-13332](https://github.com/hashicorp/vault/pull/13332)] -* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] -* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] -* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] -* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] -* core (enterprise): Fix a data race in logshipper. -* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions -* core/api: Fix overwriting of request headers when using JSONMergePatch. [[GH-14222](https://github.com/hashicorp/vault/pull/14222)] -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] -* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] -* core/token: Fix null token panic from 'v1/auth/token/' endpoints and return proper error response. [[GH-13233](https://github.com/hashicorp/vault/pull/13233)] -* core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint [[GH-13236](https://github.com/hashicorp/vault/pull/13236)] -* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] -* core: `-output-curl-string` now properly sets cURL options for client and CA -certificates. [[GH-13660](https://github.com/hashicorp/vault/pull/13660)] -* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] -* core: authentication to "login" endpoint for non-existent mount path returns permission denied with status code 403 [[GH-13162](https://github.com/hashicorp/vault/pull/13162)] -* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] -* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes -* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node -* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] -* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] -* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] -* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] -* identity/oidc: Fixes potential write to readonly storage on performance secondary clusters during key rotation [[GH-14426](https://github.com/hashicorp/vault/pull/14426)] -* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] -* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] -* identity: Fix possible nil pointer dereference. [[GH-13318](https://github.com/hashicorp/vault/pull/13318)] -* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] -* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] -* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. -* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. -* metrics/autosnapshots (enterprise) : Fix bug that could cause -vault.autosnapshots.save.errors to not be incremented when there is an -autosnapshot save error. -* physical/mysql: Create table with wider `vault_key` column when initializing database tables. [[GH-14231](https://github.com/hashicorp/vault/pull/14231)] -* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] -* replication (enterprise): When using encrypted secondary tokens, only clear the -private key after a successful connection to the primary cluster -* sdk/framework: Generate proper OpenAPI specs for path patterns that use an alternation as the root. [[GH-13487](https://github.com/hashicorp/vault/pull/13487)] -* sdk/helper/ldaputil: properly escape a trailing escape character to prevent panics. [[GH-13452](https://github.com/hashicorp/vault/pull/13452)] -* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] -* sdk: Fixes OpenAPI to distinguish between paths that can do only List, or both List and Read. [[GH-13643](https://github.com/hashicorp/vault/pull/13643)] -* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] -* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] -* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) -operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] -* secrets/database/cassandra: change connect_timeout to 5s as documentation says [[GH-12443](https://github.com/hashicorp/vault/pull/12443)] -* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] -* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] -* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] -* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] -* secrets/pki: Default value for key_bits changed to 0, enabling key_type=ec key generation with default value [[GH-13080](https://github.com/hashicorp/vault/pull/13080)] -* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] -* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] -* secrets/pki: Fixes around NIST P-curve signature hash length, default value for signature_bits changed to 0. [[GH-12872](https://github.com/hashicorp/vault/pull/12872)] -* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] -* secrets/pki: Skip signature bits validation for ed25519 curve key type [[GH-13254](https://github.com/hashicorp/vault/pull/13254)] -* secrets/transit: Ensure that Vault does not panic for invalid nonce size when we aren't in convergent encryption mode. [[GH-13690](https://github.com/hashicorp/vault/pull/13690)] -* secrets/transit: Return an error if any required parameter is missing. [[GH-14074](https://github.com/hashicorp/vault/pull/14074)] -* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] -* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] -* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] -* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] -* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] -* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] -* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] -* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] -* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] -* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] -* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] -* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] -* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] -* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] -* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] -* ui: Fixes displaying empty masked values in PKI engine [[GH-14400](https://github.com/hashicorp/vault/pull/14400)] -* ui: Fixes horizontal bar chart hover issue when filtering namespaces and mounts [[GH-14493](https://github.com/hashicorp/vault/pull/14493)] -* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] -* ui: Fixes issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] -* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] -* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] -* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] -* ui: Fixes issue with SearchSelect component not holding focus [[GH-13590](https://github.com/hashicorp/vault/pull/13590)] -* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] -* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] -* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] -* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] -* ui: Fixes long secret key names overlapping masked values [[GH-13032](https://github.com/hashicorp/vault/pull/13032)] -* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] -* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] -* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] -* ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] +```release-note:feature +**Rotate Root for LDAP auth**: Rotate root operations are now supported for the LDAP auth engine. +``` diff --git a/CODEOWNERS b/CODEOWNERS index 364e143c95d1..09f34ce8621c 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,87 +1,3 @@ -# Each line is a file pattern followed by one or more owners. Being an owner -# means those groups or individuals will be added as reviewers to PRs affecting -# those areas of the code. -# -# More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners - -# Select Auth engines are owned by Ecosystem -/builtin/credential/aws/ @hashicorp/vault-ecosystem-applications -/builtin/credential/github/ @hashicorp/vault-ecosystem-applications -/builtin/credential/ldap/ @hashicorp/vault-ecosystem-applications -/builtin/credential/okta/ @hashicorp/vault-ecosystem-applications - -# Secrets engines (pki, ssh, totp and transit omitted) -/builtin/logical/aws/ @hashicorp/vault-ecosystem-applications -/builtin/logical/cassandra/ @hashicorp/vault-ecosystem-applications -/builtin/logical/consul/ @hashicorp/vault-ecosystem-applications -/builtin/logical/database/ @hashicorp/vault-ecosystem-applications -/builtin/logical/mongodb/ @hashicorp/vault-ecosystem-applications -/builtin/logical/mssql/ @hashicorp/vault-ecosystem-applications -/builtin/logical/mysql/ @hashicorp/vault-ecosystem-applications -/builtin/logical/nomad/ @hashicorp/vault-ecosystem-applications -/builtin/logical/postgresql/ @hashicorp/vault-ecosystem-applications -/builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem-applications - -# Identity Integrations (OIDC, tokens) -/vault/identity_store_oidc* @hashicorp/vault-ecosystem-applications - -/plugins/ @hashicorp/vault-ecosystem -/vault/plugin_catalog.go @hashicorp/vault-ecosystem - -/website/content/ @hashicorp/vault-education-approvers -/website/content/docs/plugin-portal.mdx @acahn @hashicorp/vault-education-approvers - -# Plugin docs -/website/content/docs/plugins/ @hashicorp/vault-ecosystem @hashicorp/vault-education-approvers -/website/content/docs/upgrading/plugins.mdx @hashicorp/vault-ecosystem @hashicorp/vault-education-approvers - -# UI code related to Vault's JWT/OIDC auth method and OIDC provider. -# Changes to these files often require coordination with backend code, -# so stewards of the backend code are added below for notification. -/ui/app/components/auth-jwt.js @hashicorp/vault-ecosystem-applications -/ui/app/routes/vault/cluster/oidc-*.js @hashicorp/vault-ecosystem-applications - -# Release config; service account is required for automation tooling. -/.release/ @hashicorp/release-engineering @hashicorp/github-secure-vault-core @hashicorp/quality-team -/.github/workflows/build.yml @hashicorp/release-engineering @hashicorp/github-secure-vault-core @hashicorp/quality-team - -# Quality engineering -/.github/ @hashicorp/quality-team -/enos/ @hashicorp/quality-team - -# Cryptosec -/builtin/logical/pki/ @hashicorp/vault-crypto -/builtin/logical/pkiext/ @hashicorp/vault-crypto -/website/content/docs/secrets/pki/ @hashicorp/vault-crypto -/website/content/api-docs/secret/pki.mdx @hashicorp/vault-crypto -/builtin/credential/cert/ @hashicorp/vault-crypto -/website/content/docs/auth/cert.mdx @hashicorp/vault-crypto -/website/content/api-docs/auth/cert.mdx @hashicorp/vault-crypto -/builtin/logical/ssh/ @hashicorp/vault-crypto -/website/content/docs/secrets/ssh/ @hashicorp/vault-crypto -/website/content/api-docs/secret/ssh.mdx @hashicorp/vault-crypto -/builtin/logical/transit/ @hashicorp/vault-crypto -/website/content/docs/secrets/transit/ @hashicorp/vault-crypto -/website/content/api-docs/secret/transit.mdx @hashicorp/vault-crypto -/helper/random/ @hashicorp/vault-crypto -/sdk/helper/certutil/ @hashicorp/vault-crypto -/sdk/helper/cryptoutil/ @hashicorp/vault-crypto -/sdk/helper/kdf/ @hashicorp/vault-crypto -/sdk/helper/keysutil/ @hashicorp/vault-crypto -/sdk/helper/ocsp/ @hashicorp/vault-crypto -/sdk/helper/salt/ @hashicorp/vault-crypto -/sdk/helper/tlsutil/ @hashicorp/vault-crypto -/shamir/ @hashicorp/vault-crypto -/vault/barrier* @hashicorp/vault-crypto -/vault/managed_key* @hashicorp/vault-crypto -/vault/seal* @hashicorp/vault-crypto -/vault/seal/ @hashicorp/vault-crypto -/website/content/docs/configuration/seal/ @hashicorp/vault-crypto -/website/content/docs/enterprise/sealwrap.mdx @hashicorp/vault-crypto -/website/content/api-docs/system/sealwrap-rewrap.mdx @hashicorp/vault-crypto -/website/content/docs/secrets/transform/ @hashicorp/vault-crypto -/website/content/api-docs/secret/transform.mdx @hashicorp/vault-crypto -/website/content/docs/secrets/kmip-profiles.mdx @hashicorp/vault-crypto -/website/content/docs/secrets/kmip.mdx @hashicorp/vault-crypto -/website/content/api-docs/secret/kmip.mdx @hashicorp/vault-crypto -/website/content/docs/enterprise/fips/ @hashicorp/vault-crypto +```release-note:improvement +ui: capabilities-self is always called in the user's root namespace +``` \ No newline at end of file diff --git a/LICENSE b/LICENSE index ae14f271d418..2fe98e926d05 100644 --- a/LICENSE +++ b/LICENSE @@ -1,91 +1,3 @@ -License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. -“Business Source License” is a trademark of MariaDB Corporation Ab. - -Parameters - -Licensor: HashiCorp, Inc. -Licensed Work: Vault 1.15.2. The Licensed Work is (c) 2023 HashiCorp, Inc. -Additional Use Grant: You may make production use of the Licensed Work, provided - Your use does not include offering the Licensed Work to third - parties on a hosted or embedded basis in order to compete with - HashiCorp’s paid version(s) of the Licensed Work. For purposes - of this license: - - A “competitive offering” is a Product that is offered to third - parties on a paid basis, including through paid support - arrangements, that significantly overlaps with the capabilities - of HashiCorp’s paid version(s) of the Licensed Work. If Your - Product is not a competitive offering when You first make it - generally available, it will not become a competitive offering - later due to HashiCorp releasing a new version of the Licensed - Work with additional capabilities. In addition, Products that - are not provided on a paid basis are not competitive. - - “Product” means software that is offered to end users to manage - in their own environments or offered as a service on a hosted - basis. - - “Embedded” means including the source code or executable code - from the Licensed Work in a competitive offering. “Embedded” - also means packaging the competitive offering in such a way - that the Licensed Work must be accessed or downloaded for the - competitive offering to operate. - - Hosting or using the Licensed Work(s) for internal purposes - within an organization is not considered a competitive - offering. HashiCorp considers your organization to include all - of your affiliates under common control. - - For binding interpretive guidance on using HashiCorp products - under the Business Source License, please visit our FAQ. - (https://www.hashicorp.com/license-faq) -Change Date: Four years from the date the Licensed Work is published. -Change License: MPL 2.0 - -For information about alternative licensing arrangements for the Licensed Work, -please contact licensing@hashicorp.com. - -Notice - -Business Source License 1.1 - -Terms - -The Licensor hereby grants you the right to copy, modify, create derivative -works, redistribute, and make non-production use of the Licensed Work. The -Licensor may make an Additional Use Grant, above, permitting limited production use. - -Effective on the Change Date, or the fourth anniversary of the first publicly -available distribution of a specific version of the Licensed Work under this -License, whichever comes first, the Licensor hereby grants you rights under -the terms of the Change License, and the rights granted in the paragraph -above terminate. - -If your use of the Licensed Work does not comply with the requirements -currently in effect as described in this License, you must purchase a -commercial license from the Licensor, its affiliated entities, or authorized -resellers, or you must refrain from using the Licensed Work. - -All copies of the original and modified Licensed Work, and derivative works -of the Licensed Work, are subject to this License. This License applies -separately for each version of the Licensed Work and the Change Date may vary -for each version of the Licensed Work released by Licensor. - -You must conspicuously display this License on each original or modified copy -of the Licensed Work. If you receive the Licensed Work in original or -modified form from a third party, the terms and conditions set forth in this -License apply to your use of that work. - -Any use of the Licensed Work in violation of this License will automatically -terminate your rights under this License for the current and all other -versions of the Licensed Work. - -This License does not grant you any right in any trademark or logo of -Licensor or its affiliates (provided that you may use a trademark or logo of -Licensor as expressly required by this License). - -TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON -AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, -EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND -TITLE. +```release-note:improvement +ui: Implement Helios Design System footer component +``` diff --git a/api/go.mod b/api/go.mod index 20fb4617af23..97a26746bd0f 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,39 +1,3 @@ -module github.com/hashicorp/vault/api - -// The Go version directive for the api package should normally only be updated when -// code in the api package requires a newer Go version to build. It should not -// automatically track the Go version used to build Vault itself. Many projects import -// the api module and we don't want to impose a newer version on them any more than we -// have to. -go 1.19 - -require ( - github.com/cenkalti/backoff/v3 v3.0.0 - github.com/go-jose/go-jose/v3 v3.0.0 - github.com/go-test/deep v1.0.2 - github.com/hashicorp/errwrap v1.1.0 - github.com/hashicorp/go-cleanhttp v0.5.2 - github.com/hashicorp/go-hclog v0.16.2 - github.com/hashicorp/go-multierror v1.1.1 - github.com/hashicorp/go-retryablehttp v0.6.6 - github.com/hashicorp/go-rootcerts v1.0.2 - github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 - github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 - github.com/hashicorp/hcl v1.0.0 - github.com/mitchellh/mapstructure v1.5.0 - golang.org/x/net v0.7.0 - golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 -) - -require ( - github.com/fatih/color v1.7.0 // indirect - github.com/google/go-cmp v0.5.7 // indirect - github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/ryanuber/go-glob v1.0.0 // indirect - golang.org/x/crypto v0.6.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect -) +```release-note:bug +secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 +``` diff --git a/api/go.sum b/api/go.sum index e8f5f1811f8f..67ea1d0ae974 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,93 +1,3 @@ -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c= -github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= -github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= -github.com/go-test/deep v1.0.2 h1:onZX1rnHT3Wv6cqNgYyFOOlgVKJrksuCMCRvJStbMYw= -github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= -github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.16.2 h1:K4ev2ib4LdQETX5cSZBG0DVLk1jwGqSPXBjdah3veNs= -github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= -github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6 h1:om4Al8Oy7kCm/B86rLCLah4Dt5Aa0Fr5rYBG60OzwHQ= -github.com/hashicorp/go-secure-stdlib/parseutil v0.1.6/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= -github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= -github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +```release-note:bug +auth/cert: Handle errors related to expired OCSP server responses +``` diff --git a/api/replication_status.go b/api/replication_status.go index 1668daf19c12..9253e44ab8c0 100644 --- a/api/replication_status.go +++ b/api/replication_status.go @@ -1,130 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/mitchellh/mapstructure" -) - -const ( - apiRepPerformanceStatusPath = "/v1/sys/replication/performance/status" - apiRepDRStatusPath = "/v1/sys/replication/dr/status" - apiRepStatusPath = "/v1/sys/replication/status" -) - -type ClusterInfo struct { - APIAddr string `json:"api_address,omitempty" mapstructure:"api_address"` - ClusterAddress string `json:"cluster_address,omitempty" mapstructure:"cluster_address"` - ConnectionStatus string `json:"connection_status,omitempty" mapstructure:"connection_status"` - LastHeartBeat string `json:"last_heartbeat,omitempty" mapstructure:"last_heartbeat"` - NodeID string `json:"node_id,omitempty" mapstructure:"node_id"` -} - -type ReplicationStatusGenericResponse struct { - LastDRWAL uint64 `json:"last_dr_wal,omitempty" mapstructure:"last_dr_wal"` - LastReindexEpoch string `json:"last_reindex_epoch,omitempty" mapstructure:"last_reindex_epoch"` - ClusterID string `json:"cluster_id,omitempty" mapstructure:"cluster_id"` - LastWAL uint64 `json:"last_wal,omitempty" mapstructure:"last_wal"` - MerkleRoot string `json:"merkle_root,omitempty" mapstructure:"merkle_root"` - Mode string `json:"mode,omitempty" mapstructure:"mode"` - PrimaryClusterAddr string `json:"primary_cluster_addr,omitempty" mapstructure:"primary_cluster_addr"` - LastPerformanceWAL uint64 `json:"last_performance_wal,omitempty" mapstructure:"last_performance_wal"` - State string `json:"state,omitempty" mapstructure:"state"` - LastRemoteWAL uint64 `json:"last_remote_wal,omitempty" mapstructure:"last_remote_wal"` - SecondaryID string `json:"secondary_id,omitempty" mapstructure:"secondary_id"` - SSCTGenerationCounter uint64 `json:"ssct_generation_counter,omitempty" mapstructure:"ssct_generation_counter"` - - KnownSecondaries []string `json:"known_secondaries,omitempty" mapstructure:"known_secondaries"` - KnownPrimaryClusterAddrs []string `json:"known_primary_cluster_addrs,omitempty" mapstructure:"known_primary_cluster_addrs"` - Primaries []ClusterInfo `json:"primaries,omitempty" mapstructure:"primaries"` - Secondaries []ClusterInfo `json:"secondaries,omitempty" mapstructure:"secondaries"` -} - -type ReplicationStatusResponse struct { - DR ReplicationStatusGenericResponse `json:"dr,omitempty" mapstructure:"dr"` - Performance ReplicationStatusGenericResponse `json:"performance,omitempty" mapstructure:"performance"` -} - -func (c *Sys) ReplicationStatus() (*ReplicationStatusResponse, error) { - return c.ReplicationStatusWithContext(context.Background(), apiRepStatusPath) -} - -func (c *Sys) ReplicationPerformanceStatusWithContext(ctx context.Context) (*ReplicationStatusGenericResponse, error) { - s, err := c.ReplicationStatusWithContext(ctx, apiRepPerformanceStatusPath) - if err != nil { - return nil, err - } - - return &s.Performance, nil -} - -func (c *Sys) ReplicationDRStatusWithContext(ctx context.Context) (*ReplicationStatusGenericResponse, error) { - s, err := c.ReplicationStatusWithContext(ctx, apiRepDRStatusPath) - if err != nil { - return nil, err - } - - return &s.DR, nil -} - -func (c *Sys) ReplicationStatusWithContext(ctx context.Context, path string) (*ReplicationStatusResponse, error) { - // default to replication/status - if path == "" { - path = apiRepStatusPath - } - - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodGet, path) - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return nil, err - } - defer func() { _ = resp.Body.Close() }() - - // First decode response into a map[string]interface{} - data := make(map[string]interface{}) - dec := json.NewDecoder(resp.Body) - dec.UseNumber() - if err := dec.Decode(&data); err != nil { - return nil, err - } - - rawData, ok := data["data"] - if !ok { - return nil, fmt.Errorf("empty data in replication status response") - } - - s := &ReplicationStatusResponse{} - g := &ReplicationStatusGenericResponse{} - switch { - case path == apiRepPerformanceStatusPath: - err = mapstructure.Decode(rawData, g) - if err != nil { - return nil, err - } - s.Performance = *g - case path == apiRepDRStatusPath: - err = mapstructure.Decode(rawData, g) - if err != nil { - return nil, err - } - s.DR = *g - default: - err = mapstructure.Decode(rawData, s) - if err != nil { - return nil, err - } - return s, err - } - - return s, err -} +```release-note:change +events: Source URL is now `vault://{vault node}` +``` diff --git a/api/sys_hastatus.go b/api/sys_hastatus.go index 2b2aa7c3e980..040b42d94da8 100644 --- a/api/sys_hastatus.go +++ b/api/sys_hastatus.go @@ -1,46 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "net/http" - "time" -) - -func (c *Sys) HAStatus() (*HAStatusResponse, error) { - return c.HAStatusWithContext(context.Background()) -} - -func (c *Sys) HAStatusWithContext(ctx context.Context) (*HAStatusResponse, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodGet, "/v1/sys/ha-status") - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result HAStatusResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -type HAStatusResponse struct { - Nodes []HANode -} - -type HANode struct { - Hostname string `json:"hostname"` - APIAddress string `json:"api_address"` - ClusterAddress string `json:"cluster_address"` - ActiveNode bool `json:"active_node"` - LastEcho *time.Time `json:"last_echo"` - Version string `json:"version"` - UpgradeVersion string `json:"upgrade_version,omitempty"` - RedundancyZone string `json:"redundancy_zone,omitempty"` -} +```release-note:bug +ui: Fix JSON editor in KV V2 unable to handle pasted values +``` diff --git a/api/sys_health.go b/api/sys_health.go index 13fd8d4d3743..215c7c6d8f11 100644 --- a/api/sys_health.go +++ b/api/sys_health.go @@ -1,52 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "net/http" -) - -func (c *Sys) Health() (*HealthResponse, error) { - return c.HealthWithContext(context.Background()) -} - -func (c *Sys) HealthWithContext(ctx context.Context) (*HealthResponse, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodGet, "/v1/sys/health") - // If the code is 400 or above it will automatically turn into an error, - // but the sys/health API defaults to returning 5xx when not sealed or - // inited, so we force this code to be something else so we parse correctly - r.Params.Add("uninitcode", "299") - r.Params.Add("sealedcode", "299") - r.Params.Add("standbycode", "299") - r.Params.Add("drsecondarycode", "299") - r.Params.Add("performancestandbycode", "299") - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result HealthResponse - err = resp.DecodeJSON(&result) - return &result, err -} - -type HealthResponse struct { - Initialized bool `json:"initialized"` - Sealed bool `json:"sealed"` - Standby bool `json:"standby"` - PerformanceStandby bool `json:"performance_standby"` - ReplicationPerformanceMode string `json:"replication_performance_mode"` - ReplicationDRMode string `json:"replication_dr_mode"` - ServerTimeUTC int64 `json:"server_time_utc"` - Version string `json:"version"` - ClusterName string `json:"cluster_name,omitempty"` - ClusterID string `json:"cluster_id,omitempty"` - LastWAL uint64 `json:"last_wal,omitempty"` -} +```release-note:improvement +plugins: Containerized plugins can be run fully rootless with the runsc runtime. +``` diff --git a/api/sys_mounts.go b/api/sys_mounts.go index a6c2a0f5412e..207a61d60952 100644 --- a/api/sys_mounts.go +++ b/api/sys_mounts.go @@ -1,337 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "errors" - "fmt" - "net/http" - "time" - - "github.com/mitchellh/mapstructure" -) - -func (c *Sys) ListMounts() (map[string]*MountOutput, error) { - return c.ListMountsWithContext(context.Background()) -} - -func (c *Sys) ListMountsWithContext(ctx context.Context) (map[string]*MountOutput, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodGet, "/v1/sys/mounts") - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - secret, err := ParseSecret(resp.Body) - if err != nil { - return nil, err - } - if secret == nil || secret.Data == nil { - return nil, errors.New("data from server response is empty") - } - - mounts := map[string]*MountOutput{} - err = mapstructure.Decode(secret.Data, &mounts) - if err != nil { - return nil, err - } - - return mounts, nil -} - -func (c *Sys) Mount(path string, mountInfo *MountInput) error { - return c.MountWithContext(context.Background(), path, mountInfo) -} - -func (c *Sys) MountWithContext(ctx context.Context, path string, mountInfo *MountInput) error { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s", path)) - if err := r.SetJSONBody(mountInfo); err != nil { - return err - } - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return err - } - defer resp.Body.Close() - - return nil -} - -func (c *Sys) Unmount(path string) error { - return c.UnmountWithContext(context.Background(), path) -} - -func (c *Sys) UnmountWithContext(ctx context.Context, path string) error { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodDelete, fmt.Sprintf("/v1/sys/mounts/%s", path)) - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -// Remount wraps RemountWithContext using context.Background. -func (c *Sys) Remount(from, to string) error { - return c.RemountWithContext(context.Background(), from, to) -} - -// RemountWithContext kicks off a remount operation, polls the status endpoint using -// the migration ID till either success or failure state is observed -func (c *Sys) RemountWithContext(ctx context.Context, from, to string) error { - remountResp, err := c.StartRemountWithContext(ctx, from, to) - if err != nil { - return err - } - - for { - remountStatusResp, err := c.RemountStatusWithContext(ctx, remountResp.MigrationID) - if err != nil { - return err - } - if remountStatusResp.MigrationInfo.MigrationStatus == "success" { - return nil - } - if remountStatusResp.MigrationInfo.MigrationStatus == "failure" { - return fmt.Errorf("Failure! Error encountered moving mount %s to %s, with migration ID %s", from, to, remountResp.MigrationID) - } - time.Sleep(1 * time.Second) - } -} - -// StartRemount wraps StartRemountWithContext using context.Background. -func (c *Sys) StartRemount(from, to string) (*MountMigrationOutput, error) { - return c.StartRemountWithContext(context.Background(), from, to) -} - -// StartRemountWithContext kicks off a mount migration and returns a response with the migration ID -func (c *Sys) StartRemountWithContext(ctx context.Context, from, to string) (*MountMigrationOutput, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - body := map[string]interface{}{ - "from": from, - "to": to, - } - - r := c.c.NewRequest(http.MethodPost, "/v1/sys/remount") - if err := r.SetJSONBody(body); err != nil { - return nil, err - } - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - secret, err := ParseSecret(resp.Body) - if err != nil { - return nil, err - } - if secret == nil || secret.Data == nil { - return nil, errors.New("data from server response is empty") - } - - var result MountMigrationOutput - err = mapstructure.Decode(secret.Data, &result) - if err != nil { - return nil, err - } - - return &result, err -} - -// RemountStatus wraps RemountStatusWithContext using context.Background. -func (c *Sys) RemountStatus(migrationID string) (*MountMigrationStatusOutput, error) { - return c.RemountStatusWithContext(context.Background(), migrationID) -} - -// RemountStatusWithContext checks the status of a mount migration operation with the provided ID -func (c *Sys) RemountStatusWithContext(ctx context.Context, migrationID string) (*MountMigrationStatusOutput, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/remount/status/%s", migrationID)) - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - secret, err := ParseSecret(resp.Body) - if err != nil { - return nil, err - } - if secret == nil || secret.Data == nil { - return nil, errors.New("data from server response is empty") - } - - var result MountMigrationStatusOutput - err = mapstructure.Decode(secret.Data, &result) - if err != nil { - return nil, err - } - - return &result, err -} - -func (c *Sys) TuneMount(path string, config MountConfigInput) error { - return c.TuneMountWithContext(context.Background(), path, config) -} - -func (c *Sys) TuneMountWithContext(ctx context.Context, path string, config MountConfigInput) error { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodPost, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) - if err := r.SetJSONBody(config); err != nil { - return err - } - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err == nil { - defer resp.Body.Close() - } - return err -} - -func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { - return c.MountConfigWithContext(context.Background(), path) -} - -func (c *Sys) MountConfigWithContext(ctx context.Context, path string) (*MountConfigOutput, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - r := c.c.NewRequest(http.MethodGet, fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) - - resp, err := c.c.rawRequestWithContext(ctx, r) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - secret, err := ParseSecret(resp.Body) - if err != nil { - return nil, err - } - if secret == nil || secret.Data == nil { - return nil, errors.New("data from server response is empty") - } - - var result MountConfigOutput - err = mapstructure.Decode(secret.Data, &result) - if err != nil { - return nil, err - } - - return &result, err -} - -type MountInput struct { - Type string `json:"type"` - Description string `json:"description"` - Config MountConfigInput `json:"config"` - Local bool `json:"local"` - SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` - ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` - Options map[string]string `json:"options"` - - // Deprecated: Newer server responses should be returning this information in the - // Type field (json: "type") instead. - PluginName string `json:"plugin_name,omitempty"` -} - -type MountConfigInput struct { - Options map[string]string `json:"options" mapstructure:"options"` - DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` - Description *string `json:"description,omitempty" mapstructure:"description"` - MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` - AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` - TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` - AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` - PluginVersion string `json:"plugin_version,omitempty"` - UserLockoutConfig *UserLockoutConfigInput `json:"user_lockout_config,omitempty"` - // Deprecated: This field will always be blank for newer server responses. - PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` -} - -type MountOutput struct { - UUID string `json:"uuid"` - Type string `json:"type"` - Description string `json:"description"` - Accessor string `json:"accessor"` - Config MountConfigOutput `json:"config"` - Options map[string]string `json:"options"` - Local bool `json:"local"` - SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` - ExternalEntropyAccess bool `json:"external_entropy_access" mapstructure:"external_entropy_access"` - PluginVersion string `json:"plugin_version" mapstructure:"plugin_version"` - RunningVersion string `json:"running_plugin_version" mapstructure:"running_plugin_version"` - RunningSha256 string `json:"running_sha256" mapstructure:"running_sha256"` - DeprecationStatus string `json:"deprecation_status" mapstructure:"deprecation_status"` -} - -type MountConfigOutput struct { - DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` - AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` - TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` - AllowedManagedKeys []string `json:"allowed_managed_keys,omitempty" mapstructure:"allowed_managed_keys"` - UserLockoutConfig *UserLockoutConfigOutput `json:"user_lockout_config,omitempty"` - // Deprecated: This field will always be blank for newer server responses. - PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` -} - -type UserLockoutConfigInput struct { - LockoutThreshold string `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` - LockoutDuration string `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` - LockoutCounterResetDuration string `json:"lockout_counter_reset_duration,omitempty" structs:"lockout_counter_reset_duration" mapstructure:"lockout_counter_reset_duration"` - DisableLockout *bool `json:"lockout_disable,omitempty" structs:"lockout_disable" mapstructure:"lockout_disable"` -} - -type UserLockoutConfigOutput struct { - LockoutThreshold uint `json:"lockout_threshold,omitempty" structs:"lockout_threshold" mapstructure:"lockout_threshold"` - LockoutDuration int `json:"lockout_duration,omitempty" structs:"lockout_duration" mapstructure:"lockout_duration"` - LockoutCounterReset int `json:"lockout_counter_reset,omitempty" structs:"lockout_counter_reset" mapstructure:"lockout_counter_reset"` - DisableLockout *bool `json:"disable_lockout,omitempty" structs:"disable_lockout" mapstructure:"disable_lockout"` -} - -type MountMigrationOutput struct { - MigrationID string `mapstructure:"migration_id"` -} - -type MountMigrationStatusOutput struct { - MigrationID string `mapstructure:"migration_id"` - MigrationInfo *MountMigrationStatusInfo `mapstructure:"migration_info"` -} - -type MountMigrationStatusInfo struct { - SourceMount string `mapstructure:"source_mount"` - TargetMount string `mapstructure:"target_mount"` - MigrationStatus string `mapstructure:"status"` -} +```release-note:bug +core/audit: Audit logging a Vault response will now use a 5 second context timeout, separate from the original request. +``` \ No newline at end of file diff --git a/api/sys_plugins_runtimes.go b/api/sys_plugins_runtimes.go index d88bca9b7269..424a006f2da3 100644 --- a/api/sys_plugins_runtimes.go +++ b/api/sys_plugins_runtimes.go @@ -1,189 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package api - -import ( - "context" - "errors" - "fmt" - "net/http" - - "github.com/mitchellh/mapstructure" -) - -// GetPluginRuntimeInput is used as input to the GetPluginRuntime function. -type GetPluginRuntimeInput struct { - Name string `json:"-"` - - // Type of the plugin runtime. Required. - Type PluginRuntimeType `json:"type"` -} - -// GetPluginRuntimeResponse is the response from the GetPluginRuntime call. -type GetPluginRuntimeResponse struct { - Type string `json:"type"` - Name string `json:"name"` - OCIRuntime string `json:"oci_runtime"` - CgroupParent string `json:"cgroup_parent"` - CPU int64 `json:"cpu_nanos"` - Memory int64 `json:"memory_bytes"` -} - -// GetPluginRuntime retrieves information about the plugin. -func (c *Sys) GetPluginRuntime(ctx context.Context, i *GetPluginRuntimeInput) (*GetPluginRuntimeResponse, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - path := pluginRuntimeCatalogPathByType(i.Type, i.Name) - req := c.c.NewRequest(http.MethodGet, path) - - resp, err := c.c.rawRequestWithContext(ctx, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var result struct { - Data *GetPluginRuntimeResponse - } - err = resp.DecodeJSON(&result) - if err != nil { - return nil, err - } - return result.Data, err -} - -// RegisterPluginRuntimeInput is used as input to the RegisterPluginRuntime function. -type RegisterPluginRuntimeInput struct { - // Name is the name of the plugin. Required. - Name string `json:"-"` - - // Type of the plugin. Required. - Type PluginRuntimeType `json:"type"` - - OCIRuntime string `json:"oci_runtime,omitempty"` - CgroupParent string `json:"cgroup_parent,omitempty"` - CPU int64 `json:"cpu_nanos,omitempty"` - Memory int64 `json:"memory_bytes,omitempty"` -} - -// RegisterPluginRuntime registers the plugin with the given information. -func (c *Sys) RegisterPluginRuntime(ctx context.Context, i *RegisterPluginRuntimeInput) error { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - path := pluginRuntimeCatalogPathByType(i.Type, i.Name) - req := c.c.NewRequest(http.MethodPut, path) - - if err := req.SetJSONBody(i); err != nil { - return err - } - - resp, err := c.c.rawRequestWithContext(ctx, req) - if err == nil { - defer resp.Body.Close() - } - return err -} - -// DeregisterPluginRuntimeInput is used as input to the DeregisterPluginRuntime function. -type DeregisterPluginRuntimeInput struct { - // Name is the name of the plugin runtime. Required. - Name string `json:"-"` - - // Type of the plugin. Required. - Type PluginRuntimeType `json:"type"` -} - -// DeregisterPluginRuntime removes the plugin with the given name from the plugin -// catalog. -func (c *Sys) DeregisterPluginRuntime(ctx context.Context, i *DeregisterPluginRuntimeInput) error { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - path := pluginRuntimeCatalogPathByType(i.Type, i.Name) - req := c.c.NewRequest(http.MethodDelete, path) - resp, err := c.c.rawRequestWithContext(ctx, req) - if err == nil { - defer resp.Body.Close() - } - return err -} - -type PluginRuntimeDetails struct { - Type string `json:"type" mapstructure:"type"` - Name string `json:"name" mapstructure:"name"` - OCIRuntime string `json:"oci_runtime" mapstructure:"oci_runtime"` - CgroupParent string `json:"cgroup_parent" mapstructure:"cgroup_parent"` - CPU int64 `json:"cpu_nanos" mapstructure:"cpu_nanos"` - Memory int64 `json:"memory_bytes" mapstructure:"memory_bytes"` -} - -// ListPluginRuntimesInput is used as input to the ListPluginRuntimes function. -type ListPluginRuntimesInput struct { - // Type of the plugin. Required. - Type PluginRuntimeType `json:"type"` -} - -// ListPluginRuntimesResponse is the response from the ListPluginRuntimes call. -type ListPluginRuntimesResponse struct { - // RuntimesByType is the list of plugin runtimes by type. - Runtimes []PluginRuntimeDetails `json:"runtimes"` -} - -// ListPluginRuntimes lists all plugin runtimes in the catalog and returns their names as a -// list of strings. -func (c *Sys) ListPluginRuntimes(ctx context.Context, input *ListPluginRuntimesInput) (*ListPluginRuntimesResponse, error) { - ctx, cancelFunc := c.c.withConfiguredTimeout(ctx) - defer cancelFunc() - - if input != nil && input.Type == PluginRuntimeTypeUnsupported { - return nil, fmt.Errorf("%q is not a supported runtime type", input.Type.String()) - } - - resp, err := c.c.rawRequestWithContext(ctx, c.c.NewRequest(http.MethodGet, "/v1/sys/plugins/runtimes/catalog")) - if err != nil && resp == nil { - return nil, err - } - if resp == nil { - return nil, nil - } - defer resp.Body.Close() - - secret, err := ParseSecret(resp.Body) - if err != nil { - return nil, err - } - if secret == nil || secret.Data == nil { - return nil, errors.New("data from server response is empty") - } - if _, ok := secret.Data["runtimes"]; !ok { - return nil, fmt.Errorf("data from server response does not contain runtimes") - } - - var runtimes []PluginRuntimeDetails - if err = mapstructure.Decode(secret.Data["runtimes"], &runtimes); err != nil { - return nil, err - } - - // return all runtimes in the catalog - if input == nil { - return &ListPluginRuntimesResponse{Runtimes: runtimes}, nil - } - - result := &ListPluginRuntimesResponse{ - Runtimes: []PluginRuntimeDetails{}, - } - for _, runtime := range runtimes { - if runtime.Type == input.Type.String() { - result.Runtimes = append(result.Runtimes, runtime) - } - } - return result, nil -} - -// pluginRuntimeCatalogPathByType is a helper to construct the proper API path by plugin type -func pluginRuntimeCatalogPathByType(runtimeType PluginRuntimeType, name string) string { - return fmt.Sprintf("/v1/sys/plugins/runtimes/catalog/%s/%s", runtimeType, name) -} +```release-note:bug +storage/raft: Fix a race whereby a new leader may present inconsistent node data to Autopilot. +``` \ No newline at end of file diff --git a/builtin/credential/aws/pkcs7/sign.go b/builtin/credential/aws/pkcs7/sign.go index 72b99388548e..e6aca7096ac3 100644 --- a/builtin/credential/aws/pkcs7/sign.go +++ b/builtin/credential/aws/pkcs7/sign.go @@ -1,435 +1,6 @@ -package pkcs7 - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - "math/big" - "time" - - "github.com/hashicorp/vault/internal" -) - -func init() { - internal.PatchSha1() -} - -// SignedData is an opaque data structure for creating signed data payloads -type SignedData struct { - sd signedData - certs []*x509.Certificate - data, messageDigest []byte - digestOid asn1.ObjectIdentifier - encryptionOid asn1.ObjectIdentifier -} - -// NewSignedData takes data and initializes a PKCS7 SignedData struct that is -// ready to be signed via AddSigner. The digest algorithm is set to SHA-256 by default -// and can be changed by calling SetDigestAlgorithm. -func NewSignedData(data []byte) (*SignedData, error) { - content, err := asn1.Marshal(data) - if err != nil { - return nil, err - } - ci := contentInfo{ - ContentType: OIDData, - Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, - } - sd := signedData{ - ContentInfo: ci, - Version: 1, - } - return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA256}, nil -} - -// SignerInfoConfig are optional values to include when adding a signer -type SignerInfoConfig struct { - ExtraSignedAttributes []Attribute - ExtraUnsignedAttributes []Attribute -} - -type signedData struct { - Version int `asn1:"default:1"` - DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"` - ContentInfo contentInfo - Certificates rawCertificates `asn1:"optional,tag:0"` - CRLs []pkix.CertificateList `asn1:"optional,tag:1"` - SignerInfos []signerInfo `asn1:"set"` -} - -type signerInfo struct { - Version int `asn1:"default:1"` - IssuerAndSerialNumber issuerAndSerial - DigestAlgorithm pkix.AlgorithmIdentifier - AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"` - DigestEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedDigest []byte - UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"` -} - -type attribute struct { - Type asn1.ObjectIdentifier - Value asn1.RawValue `asn1:"set"` -} - -func marshalAttributes(attrs []attribute) ([]byte, error) { - encodedAttributes, err := asn1.Marshal(struct { - A []attribute `asn1:"set"` - }{A: attrs}) - if err != nil { - return nil, err - } - - // Remove the leading sequence octets - var raw asn1.RawValue - asn1.Unmarshal(encodedAttributes, &raw) - return raw.Bytes, nil -} - -type rawCertificates struct { - Raw asn1.RawContent -} - -type issuerAndSerial struct { - IssuerName asn1.RawValue - SerialNumber *big.Int -} - -// SetDigestAlgorithm sets the digest algorithm to be used in the signing process. -// -// This should be called before adding signers -func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) { - sd.digestOid = d -} - -// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process. -// -// This should be called before adding signers -func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) { - sd.encryptionOid = d -} - -// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent. -func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { - var parents []*x509.Certificate - return sd.AddSignerChain(ee, pkey, parents, config) -} - -// AddSignerChain signs attributes about the content and adds certificates -// and signers infos to the Signed Data. The certificate and private key -// of the end-entity signer are used to issue the signature, and any -// parent of that end-entity that need to be added to the list of -// certifications can be specified in the parents slice. -// -// The signature algorithm used to hash the data is the one of the end-entity -// certificate. -func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error { - // Following RFC 2315, 9.2 SignerInfo type, the distinguished name of - // the issuer of the end-entity signer is stored in the issuerAndSerialNumber - // section of the SignedData.SignerInfo, alongside the serial number of - // the end-entity. - var ias issuerAndSerial - ias.SerialNumber = ee.SerialNumber - if len(parents) == 0 { - // no parent, the issuer is the end-entity cert itself - ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} - } else { - err := verifyPartialChain(ee, parents) - if err != nil { - return err - } - // the first parent is the issuer - ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject} - } - sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, - pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, - ) - hash, err := getHashForOID(sd.digestOid) - if err != nil { - return err - } - h := hash.New() - h.Write(sd.data) - sd.messageDigest = h.Sum(nil) - encryptionOid, err := getOIDForEncryptionAlgorithm(pkey, sd.digestOid) - if err != nil { - return err - } - attrs := &attributes{} - attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType) - attrs.Add(OIDAttributeMessageDigest, sd.messageDigest) - attrs.Add(OIDAttributeSigningTime, time.Now().UTC()) - for _, attr := range config.ExtraSignedAttributes { - attrs.Add(attr.Type, attr.Value) - } - finalAttrs, err := attrs.ForMarshalling() - if err != nil { - return err - } - unsignedAttrs := &attributes{} - for _, attr := range config.ExtraUnsignedAttributes { - unsignedAttrs.Add(attr.Type, attr.Value) - } - finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() - if err != nil { - return err - } - // create signature of signed attributes - signature, err := signAttributes(finalAttrs, pkey, hash) - if err != nil { - return err - } - signer := signerInfo{ - AuthenticatedAttributes: finalAttrs, - UnauthenticatedAttributes: finalUnsignedAttrs, - DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, - DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid}, - IssuerAndSerialNumber: ias, - EncryptedDigest: signature, - Version: 1, - } - sd.certs = append(sd.certs, ee) - if len(parents) > 0 { - sd.certs = append(sd.certs, parents...) - } - sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) - return nil -} - -// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData. -// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone -// and does not include any signed attributes like timestamp and so on. -// -// This function is needed to sign old Android APKs, something you probably -// shouldn't do unless you're maintaining backward compatibility for old -// applications. -func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { - var signature []byte - sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}) - hash, err := getHashForOID(sd.digestOid) - if err != nil { - return err - } - h := hash.New() - h.Write(sd.data) - sd.messageDigest = h.Sum(nil) - switch pkey := pkey.(type) { - case *dsa.PrivateKey: - // dsa doesn't implement crypto.Signer so we make a special case - // https://github.com/golang/go/issues/27889 - r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest) - if err != nil { - return err - } - signature, err = asn1.Marshal(dsaSignature{r, s}) - if err != nil { - return err - } - default: - key, ok := pkey.(crypto.Signer) - if !ok { - return errors.New("pkcs7: private key does not implement crypto.Signer") - } - signature, err = key.Sign(rand.Reader, sd.messageDigest, hash) - if err != nil { - return err - } - } - var ias issuerAndSerial - ias.SerialNumber = ee.SerialNumber - // no parent, the issue is the end-entity cert itself - ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} - if sd.encryptionOid == nil { - // if the encryption algorithm wasn't set by SetEncryptionAlgorithm, - // infer it from the digest algorithm - sd.encryptionOid, err = getOIDForEncryptionAlgorithm(pkey, sd.digestOid) - } - if err != nil { - return err - } - signer := signerInfo{ - DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, - DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid}, - IssuerAndSerialNumber: ias, - EncryptedDigest: signature, - Version: 1, - } - // create signature of signed attributes - sd.certs = append(sd.certs, ee) - sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) - return nil -} - -func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error { - unsignedAttrs := &attributes{} - for _, attr := range extraUnsignedAttrs { - unsignedAttrs.Add(attr.Type, attr.Value) - } - finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() - if err != nil { - return err - } - - si.UnauthenticatedAttributes = finalUnsignedAttrs - - return nil -} - -// AddCertificate adds the certificate to the payload. Useful for parent certificates -func (sd *SignedData) AddCertificate(cert *x509.Certificate) { - sd.certs = append(sd.certs, cert) -} - -// Detach removes content from the signed data struct to make it a detached signature. -// This must be called right before Finish() -func (sd *SignedData) Detach() { - sd.sd.ContentInfo = contentInfo{ContentType: OIDData} -} - -// GetSignedData returns the private Signed Data -func (sd *SignedData) GetSignedData() *signedData { - return &sd.sd -} - -// Finish marshals the content and its signers -func (sd *SignedData) Finish() ([]byte, error) { - sd.sd.Certificates = marshalCertificates(sd.certs) - inner, err := asn1.Marshal(sd.sd) - if err != nil { - return nil, err - } - outer := contentInfo{ - ContentType: OIDSignedData, - Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true}, - } - return asn1.Marshal(outer) -} - -// RemoveAuthenticatedAttributes removes authenticated attributes from signedData -// similar to OpenSSL's PKCS7_NOATTR or -noattr flags -func (sd *SignedData) RemoveAuthenticatedAttributes() { - for i := range sd.sd.SignerInfos { - sd.sd.SignerInfos[i].AuthenticatedAttributes = nil - } -} - -// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData -func (sd *SignedData) RemoveUnauthenticatedAttributes() { - for i := range sd.sd.SignerInfos { - sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil - } -} - -// verifyPartialChain checks that a given cert is issued by the first parent in the list, -// then continue down the path. It doesn't require the last parent to be a root CA, -// or to be trusted in any truststore. It simply verifies that the chain provided, albeit -// partial, makes sense. -func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error { - if len(parents) == 0 { - return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName) - } - err := cert.CheckSignatureFrom(parents[0]) - if err != nil { - return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err) - } - if len(parents) == 1 { - // there is no more parent to check, return - return nil - } - return verifyPartialChain(parents[0], parents[1:]) -} - -func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { - var ias issuerAndSerial - // The issuer RDNSequence has to match exactly the sequence in the certificate - // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence - ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} - ias.SerialNumber = cert.SerialNumber - - return ias, nil -} - -// signs the DER encoded form of the attributes with the private key -func signAttributes(attrs []attribute, pkey crypto.PrivateKey, digestAlg crypto.Hash) ([]byte, error) { - attrBytes, err := marshalAttributes(attrs) - if err != nil { - return nil, err - } - h := digestAlg.New() - h.Write(attrBytes) - hash := h.Sum(nil) - - // dsa doesn't implement crypto.Signer so we make a special case - // https://github.com/golang/go/issues/27889 - switch pkey := pkey.(type) { - case *dsa.PrivateKey: - r, s, err := dsa.Sign(rand.Reader, pkey, hash) - if err != nil { - return nil, err - } - return asn1.Marshal(dsaSignature{r, s}) - } - - key, ok := pkey.(crypto.Signer) - if !ok { - return nil, errors.New("pkcs7: private key does not implement crypto.Signer") - } - return key.Sign(rand.Reader, hash, digestAlg) -} - -type dsaSignature struct { - R, S *big.Int -} - -// concats and wraps the certificates in the RawValue structure -func marshalCertificates(certs []*x509.Certificate) rawCertificates { - var buf bytes.Buffer - for _, cert := range certs { - buf.Write(cert.Raw) - } - rawCerts, _ := marshalCertificateBytes(buf.Bytes()) - return rawCerts -} - -// Even though, the tag & length are stripped out during marshalling the -// RawContent, we have to encode it into the RawContent. If its missing, -// then `asn1.Marshal()` will strip out the certificate wrapper instead. -func marshalCertificateBytes(certs []byte) (rawCertificates, error) { - val := asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} - b, err := asn1.Marshal(val) - if err != nil { - return rawCertificates{}, err - } - return rawCertificates{Raw: b}, nil -} - -// DegenerateCertificate creates a signed data structure containing only the -// provided certificate or certificate chain. -func DegenerateCertificate(cert []byte) ([]byte, error) { - rawCert, err := marshalCertificateBytes(cert) - if err != nil { - return nil, err - } - emptyContent := contentInfo{ContentType: OIDData} - sd := signedData{ - Version: 1, - ContentInfo: emptyContent, - Certificates: rawCert, - CRLs: []pkix.CertificateList{}, - } - content, err := asn1.Marshal(sd) - if err != nil { - return nil, err - } - signedContent := contentInfo{ - ContentType: OIDSignedData, - Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, - } - return asn1.Marshal(signedContent) -} +```release-note:change +cli: `vault plugin info` and `vault plugin deregister` now require 2 positional arguments instead of accepting either 1 or 2. +``` +```release-note:improvement +cli: Improved error messages for `vault plugin` sub-commands. +``` diff --git a/builtin/credential/ldap/backend.go b/builtin/credential/ldap/backend.go index d938a4fea9f0..343811bfd050 100644 --- a/builtin/credential/ldap/backend.go +++ b/builtin/credential/ldap/backend.go @@ -1,174 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package ldap - -import ( - "context" - "fmt" - "strings" - - "github.com/hashicorp/cap/ldap" - "github.com/hashicorp/go-secure-stdlib/strutil" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/ldaputil" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - operationPrefixLDAP = "ldap" - errUserBindFailed = "ldap operation failed: failed to bind as user" -) - -func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { - b := Backend() - if err := b.Setup(ctx, conf); err != nil { - return nil, err - } - return b, nil -} - -func Backend() *backend { - var b backend - b.Backend = &framework.Backend{ - Help: backendHelp, - - PathsSpecial: &logical.Paths{ - Unauthenticated: []string{ - "login/*", - }, - - SealWrapStorage: []string{ - "config", - }, - }, - - Paths: []*framework.Path{ - pathConfig(&b), - pathGroups(&b), - pathGroupsList(&b), - pathUsers(&b), - pathUsersList(&b), - pathLogin(&b), - }, - - AuthRenew: b.pathLoginRenew, - BackendType: logical.TypeCredential, - } - - return &b -} - -type backend struct { - *framework.Backend -} - -func (b *backend) Login(ctx context.Context, req *logical.Request, username string, password string, usernameAsAlias bool) (string, []string, *logical.Response, []string, error) { - cfg, err := b.Config(ctx, req) - if err != nil { - return "", nil, nil, nil, err - } - if cfg == nil { - return "", nil, logical.ErrorResponse("ldap backend not configured"), nil, nil - } - - if cfg.DenyNullBind && len(password) == 0 { - return "", nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil, nil - } - - ldapClient, err := ldap.NewClient(ctx, ldaputil.ConvertConfig(cfg.ConfigEntry)) - if err != nil { - return "", nil, logical.ErrorResponse(err.Error()), nil, nil - } - - // Clean connection - defer ldapClient.Close(ctx) - - c, err := ldapClient.Authenticate(ctx, username, password, ldap.WithGroups(), ldap.WithUserAttributes()) - if err != nil { - if strings.Contains(err.Error(), "discovery of user bind DN failed") || - strings.Contains(err.Error(), "unable to bind user") { - return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials - } - - return "", nil, logical.ErrorResponse(err.Error()), nil, nil - } - - ldapGroups := c.Groups - ldapResponse := &logical.Response{ - Data: map[string]interface{}{}, - } - if len(ldapGroups) == 0 { - errString := fmt.Sprintf( - "no LDAP groups found in groupDN %q; only policies from locally-defined groups available", - cfg.GroupDN) - ldapResponse.AddWarning(errString) - } - - for _, warning := range c.Warnings { - ldapResponse.AddWarning(string(warning)) - } - - var allGroups []string - canonicalUsername := username - cs := *cfg.CaseSensitiveNames - if !cs { - canonicalUsername = strings.ToLower(username) - } - // Import the custom added groups from ldap backend - user, err := b.User(ctx, req.Storage, canonicalUsername) - if err == nil && user != nil && user.Groups != nil { - if b.Logger().IsDebug() { - b.Logger().Debug("adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) - } - allGroups = append(allGroups, user.Groups...) - } - // Merge local and LDAP groups - allGroups = append(allGroups, ldapGroups...) - - canonicalGroups := allGroups - // If not case sensitive, lowercase all - if !cs { - canonicalGroups = make([]string, len(allGroups)) - for i, v := range allGroups { - canonicalGroups[i] = strings.ToLower(v) - } - } - - // Retrieve policies - var policies []string - for _, groupName := range canonicalGroups { - group, err := b.Group(ctx, req.Storage, groupName) - if err == nil && group != nil { - policies = append(policies, group.Policies...) - } - } - if user != nil && user.Policies != nil { - policies = append(policies, user.Policies...) - } - // Policies from each group may overlap - policies = strutil.RemoveDuplicates(policies, true) - - if usernameAsAlias { - return username, policies, ldapResponse, allGroups, nil - } - - userAttrValues := c.UserAttributes[cfg.UserAttr] - if len(userAttrValues) == 0 { - return "", nil, logical.ErrorResponse("missing entity alias attribute value"), nil, nil - } - entityAliasAttribute := userAttrValues[0] - - return entityAliasAttribute, policies, ldapResponse, allGroups, nil -} - -const backendHelp = ` -The "ldap" credential provider allows authentication querying -a LDAP server, checking username and password, and associating groups -to set of policies. - -Configuration of the server is done through the "config" and "groups" -endpoints by a user with root access. Authentication is then done -by supplying the two fields for "login". -` +```release-note:bug +agent/logging: Agent should now honor correct -log-format and -log-file settings in logs generated by the consul-template library. +``` \ No newline at end of file diff --git a/builtin/credential/ldap/path_config.go b/builtin/credential/ldap/path_config.go index f6e7a152dfa4..74124710b8a8 100644 --- a/builtin/credential/ldap/path_config.go +++ b/builtin/credential/ldap/path_config.go @@ -1,271 +1,4 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package ldap - -import ( - "context" - "strings" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/ldaputil" - "github.com/hashicorp/vault/sdk/helper/tokenutil" - "github.com/hashicorp/vault/sdk/logical" -) - -const userFilterWarning = "userfilter configured does not consider userattr and may result in colliding entity aliases on logins" - -func pathConfig(b *backend) *framework.Path { - p := &framework.Path{ - Pattern: `config`, - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixLDAP, - Action: "Configure", - }, - - Fields: ldaputil.ConfigFields(), - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.pathConfigRead, - DisplayAttrs: &framework.DisplayAttributes{ - OperationSuffix: "auth-configuration", - }, - }, - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathConfigWrite, - DisplayAttrs: &framework.DisplayAttributes{ - OperationVerb: "configure-auth", - }, - }, - }, - - HelpSynopsis: pathConfigHelpSyn, - HelpDescription: pathConfigHelpDesc, - } - - tokenutil.AddTokenFields(p.Fields) - p.Fields["token_policies"].Description += ". This will apply to all tokens generated by this auth method, in addition to any configured for specific users/groups." - return p -} - -/* - * Construct ConfigEntry struct using stored configuration. - */ -func (b *backend) Config(ctx context.Context, req *logical.Request) (*ldapConfigEntry, error) { - storedConfig, err := req.Storage.Get(ctx, "config") - if err != nil { - return nil, err - } - - if storedConfig == nil { - // Create a new ConfigEntry, filling in defaults where appropriate - fd, err := b.getConfigFieldData() - if err != nil { - return nil, err - } - - result, err := ldaputil.NewConfigEntry(nil, fd) - if err != nil { - return nil, err - } - - // No user overrides, return default configuration - result.CaseSensitiveNames = new(bool) - *result.CaseSensitiveNames = false - - result.UsePre111GroupCNBehavior = new(bool) - *result.UsePre111GroupCNBehavior = false - - return &ldapConfigEntry{ConfigEntry: result}, nil - } - - // Deserialize stored configuration. - // Fields not specified in storedConfig will retain their defaults. - result := new(ldapConfigEntry) - result.ConfigEntry = new(ldaputil.ConfigEntry) - if err := storedConfig.DecodeJSON(result); err != nil { - return nil, err - } - - var persistNeeded bool - if result.CaseSensitiveNames == nil { - // Upgrade from before switching to case-insensitive - result.CaseSensitiveNames = new(bool) - *result.CaseSensitiveNames = true - persistNeeded = true - } - - if result.UsePre111GroupCNBehavior == nil { - result.UsePre111GroupCNBehavior = new(bool) - *result.UsePre111GroupCNBehavior = true - persistNeeded = true - } - - if persistNeeded && (b.System().LocalMount() || !b.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby)) { - entry, err := logical.StorageEntryJSON("config", result) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - } - - return result, nil -} - -func (b *backend) pathConfigRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - cfg, err := b.Config(ctx, req) - if err != nil { - return nil, err - } - if cfg == nil { - return nil, nil - } - - data := cfg.PasswordlessMap() - cfg.PopulateTokenData(data) - - resp := &logical.Response{ - Data: data, - } - - if warnings := b.checkConfigUserFilter(cfg); len(warnings) > 0 { - resp.Warnings = warnings - } - - return resp, nil -} - -// checkConfigUserFilter performs a best-effort check the config's userfilter. -// It will checked whether the templated or literal userattr value is present, -// and if not return a warning. -func (b *backend) checkConfigUserFilter(cfg *ldapConfigEntry) []string { - if cfg == nil || cfg.UserFilter == "" { - return nil - } - - var warnings []string - - switch { - case strings.Contains(cfg.UserFilter, "{{.UserAttr}}"): - // Case where the templated userattr value is provided - case strings.Contains(cfg.UserFilter, cfg.UserAttr): - // Case where the literal userattr value is provided - default: - b.Logger().Debug(userFilterWarning, "userfilter", cfg.UserFilter, "userattr", cfg.UserAttr) - warnings = append(warnings, userFilterWarning) - } - - return warnings -} - -func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { - cfg, err := b.Config(ctx, req) - if err != nil { - return nil, err - } - if cfg == nil { - return nil, nil - } - - // Build a ConfigEntry struct out of the supplied FieldData - cfg.ConfigEntry, err = ldaputil.NewConfigEntry(cfg.ConfigEntry, d) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - // On write, if not specified, use false. We do this here so upgrade logic - // works since it calls the same newConfigEntry function - if cfg.CaseSensitiveNames == nil { - cfg.CaseSensitiveNames = new(bool) - *cfg.CaseSensitiveNames = false - } - - if cfg.UsePre111GroupCNBehavior == nil { - cfg.UsePre111GroupCNBehavior = new(bool) - *cfg.UsePre111GroupCNBehavior = false - } - - if err := cfg.ParseTokenFields(req, d); err != nil { - return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest - } - - entry, err := logical.StorageEntryJSON("config", cfg) - if err != nil { - return nil, err - } - if err := req.Storage.Put(ctx, entry); err != nil { - return nil, err - } - - if warnings := b.checkConfigUserFilter(cfg); len(warnings) > 0 { - return &logical.Response{ - Warnings: warnings, - }, nil - } - - return nil, nil -} - -/* - * Returns FieldData describing our ConfigEntry struct schema - */ -func (b *backend) getConfigFieldData() (*framework.FieldData, error) { - configPath := b.Route("config") - - if configPath == nil { - return nil, logical.ErrUnsupportedPath - } - - raw := make(map[string]interface{}, len(configPath.Fields)) - - fd := framework.FieldData{ - Raw: raw, - Schema: configPath.Fields, - } - - return &fd, nil -} - -type ldapConfigEntry struct { - tokenutil.TokenParams - *ldaputil.ConfigEntry -} - -const pathConfigHelpSyn = ` -Configure the LDAP server to connect to, along with its options. -` - -const pathConfigHelpDesc = ` -This endpoint allows you to configure the LDAP server to connect to and its -configuration options. - -The LDAP URL can use either the "ldap://" or "ldaps://" schema. In the former -case, an unencrypted connection will be made with a default port of 389, unless -the "starttls" parameter is set to true, in which case TLS will be used. In the -latter case, a SSL connection will be established with a default port of 636. - -## A NOTE ON ESCAPING - -It is up to the administrator to provide properly escaped DNs. This includes -the user DN, bind DN for search, and so on. - -The only DN escaping performed by this backend is on usernames given at login -time when they are inserted into the final bind DN, and uses escaping rules -defined in RFC 4514. - -Additionally, Active Directory has escaping rules that differ slightly from the -RFC; in particular it requires escaping of '#' regardless of position in the DN -(the RFC only requires it to be escaped when it is the first character), and -'=', which the RFC indicates can be escaped with a backslash, but does not -contain in its set of required escapes. If you are using Active Directory and -these appear in your usernames, please ensure that they are escaped, in -addition to being properly escaped in your configured DNs. - -For reference, see https://www.ietf.org/rfc/rfc4514.txt and -http://social.technet.microsoft.com/wiki/contents/articles/5312.active-directory-characters-to-escape.aspx -` +```release-note:bug +api: Fix deadlock on calls to sys/leader with a namespace configured +on the request. +``` diff --git a/builtin/credential/ldap/path_config_rotate_root.go b/builtin/credential/ldap/path_config_rotate_root.go new file mode 100644 index 000000000000..eb8e4c04fb7c --- /dev/null +++ b/builtin/credential/ldap/path_config_rotate_root.go @@ -0,0 +1,3 @@ +```release-note:change +api: add the `enterprise` parameter to the `/sys/health` endpoint +``` diff --git a/builtin/credential/ldap/path_config_rotate_root_test.go b/builtin/credential/ldap/path_config_rotate_root_test.go new file mode 100644 index 000000000000..7d24c296a1f2 --- /dev/null +++ b/builtin/credential/ldap/path_config_rotate_root_test.go @@ -0,0 +1,3 @@ +```release-note:bug +ui: Correctly handle directory redirects from pre 1.15.0 Kv v2 list view urls. +``` diff --git a/builtin/logical/pki/acme_challenges.go b/builtin/logical/pki/acme_challenges.go index 290c8ec78c7f..f8f885f3e11e 100644 --- a/builtin/logical/pki/acme_challenges.go +++ b/builtin/logical/pki/acme_challenges.go @@ -1,502 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "bytes" - "context" - "crypto/sha256" - "crypto/subtle" - "crypto/tls" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "fmt" - "io" - "net" - "net/http" - "strings" - "time" -) - -const ( - DNSChallengePrefix = "_acme-challenge." - ALPNProtocol = "acme-tls/1" -) - -// While this should be a constant, there's no way to do a low-level test of -// ValidateTLSALPN01Challenge without spinning up a complicated Docker -// instance to build a custom responder. Because we already have a local -// toolchain, it is far easier to drive this through Go tests with a custom -// (high) port, rather than requiring permission to bind to port 443 (root-run -// tests are even worse). -var ALPNPort = "443" - -// OID of the acmeIdentifier X.509 Certificate Extension. -var OIDACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} - -// ValidateKeyAuthorization validates that the given keyAuthz from a challenge -// matches our expectation, returning (true, nil) if so, or (false, err) if -// not. -func ValidateKeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { - parts := strings.Split(keyAuthz, ".") - if len(parts) != 2 { - return false, fmt.Errorf("invalid authorization: got %v parts, expected 2", len(parts)) - } - - tokenPart := parts[0] - thumbprintPart := parts[1] - - if token != tokenPart || thumbprint != thumbprintPart { - return false, fmt.Errorf("key authorization was invalid") - } - - return true, nil -} - -// ValidateSHA256KeyAuthorization validates that the given keyAuthz from a -// challenge matches our expectation, returning (true, nil) if so, or -// (false, err) if not. -// -// This is for use with DNS challenges, which require base64 encoding. -func ValidateSHA256KeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { - authzContents := token + "." + thumbprint - checksum := sha256.Sum256([]byte(authzContents)) - expectedAuthz := base64.RawURLEncoding.EncodeToString(checksum[:]) - - if keyAuthz != expectedAuthz { - return false, fmt.Errorf("sha256 key authorization was invalid") - } - - return true, nil -} - -// ValidateRawSHA256KeyAuthorization validates that the given keyAuthz from a -// challenge matches our expectation, returning (true, nil) if so, or -// (false, err) if not. -// -// This is for use with TLS challenges, which require the raw hash output. -func ValidateRawSHA256KeyAuthorization(keyAuthz []byte, token string, thumbprint string) (bool, error) { - authzContents := token + "." + thumbprint - expectedAuthz := sha256.Sum256([]byte(authzContents)) - - if len(keyAuthz) != len(expectedAuthz) || subtle.ConstantTimeCompare(expectedAuthz[:], keyAuthz) != 1 { - return false, fmt.Errorf("sha256 key authorization was invalid") - } - - return true, nil -} - -func buildResolver(config *acmeConfigEntry) (*net.Resolver, error) { - if len(config.DNSResolver) == 0 { - return net.DefaultResolver, nil - } - - return &net.Resolver{ - PreferGo: true, - StrictErrors: false, - Dial: func(ctx context.Context, network, address string) (net.Conn, error) { - d := net.Dialer{ - Timeout: 10 * time.Second, - } - return d.DialContext(ctx, network, config.DNSResolver) - }, - }, nil -} - -func buildDialerConfig(config *acmeConfigEntry) (*net.Dialer, error) { - resolver, err := buildResolver(config) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %w", err) - } - - return &net.Dialer{ - Timeout: 10 * time.Second, - KeepAlive: -1 * time.Second, - Resolver: resolver, - }, nil -} - -// Validates a given ACME http-01 challenge against the specified domain, -// per RFC 8555. -// -// We attempt to be defensive here against timeouts, extra redirects, &c. -func ValidateHTTP01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { - path := "http://" + domain + "/.well-known/acme-challenge/" + token - dialer, err := buildDialerConfig(config) - if err != nil { - return false, fmt.Errorf("failed to build dialer: %w", err) - } - - transport := &http.Transport{ - // Only a single request is sent to this server as we do not do any - // batching of validation attempts. There is no need to do an HTTP - // KeepAlive as a result. - DisableKeepAlives: true, - MaxIdleConns: 1, - MaxIdleConnsPerHost: 1, - MaxConnsPerHost: 1, - IdleConnTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - - // We'd rather timeout and re-attempt validation later than hang - // too many validators waiting for slow hosts. - DialContext: dialer.DialContext, - ResponseHeaderTimeout: 10 * time.Second, - } - - maxRedirects := 10 - urlLength := 2000 - - client := &http.Client{ - Transport: transport, - CheckRedirect: func(req *http.Request, via []*http.Request) error { - if len(via)+1 >= maxRedirects { - return fmt.Errorf("http-01: too many redirects: %v", len(via)+1) - } - - reqUrlLen := len(req.URL.String()) - if reqUrlLen > urlLength { - return fmt.Errorf("http-01: redirect url length too long: %v", reqUrlLen) - } - - return nil - }, - } - - resp, err := client.Get(path) - if err != nil { - return false, fmt.Errorf("http-01: failed to fetch path %v: %w", path, err) - } - - // We provision a buffer which allows for a variable size challenge, some - // whitespace, and a detection gap for too long of a message. - minExpected := len(token) + 1 + len(thumbprint) - maxExpected := 512 - - defer resp.Body.Close() - - // Attempt to read the body, but don't do so infinitely. - body, err := io.ReadAll(io.LimitReader(resp.Body, int64(maxExpected+1))) - if err != nil { - return false, fmt.Errorf("http-01: unexpected error while reading body: %w", err) - } - - if len(body) > maxExpected { - return false, fmt.Errorf("http-01: response too large: received %v > %v bytes", len(body), maxExpected) - } - - if len(body) < minExpected { - return false, fmt.Errorf("http-01: response too small: received %v < %v bytes", len(body), minExpected) - } - - // Per RFC 8555 Section 8.3. HTTP Challenge: - // - // > The server SHOULD ignore whitespace characters at the end of the body. - keyAuthz := string(body) - keyAuthz = strings.TrimSpace(keyAuthz) - - // If we got here, we got no non-EOF error while reading. Try to validate - // the token because we're bounded by a reasonable amount of length. - return ValidateKeyAuthorization(keyAuthz, token, thumbprint) -} - -func ValidateDNS01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { - // Here, domain is the value from the post-wildcard-processed identifier. - // Per RFC 8555, no difference in validation occurs if a wildcard entry - // is requested or if a non-wildcard entry is requested. - // - // XXX: In this case the DNS server is operator controlled and is assumed - // to be less malicious so the default resolver is used. In the future, - // we'll want to use net.Resolver for two reasons: - // - // 1. To control the actual resolver via ACME configuration, - // 2. To use a context to set stricter timeout limits. - resolver, err := buildResolver(config) - if err != nil { - return false, fmt.Errorf("failed to build resolver: %w", err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - name := DNSChallengePrefix + domain - results, err := resolver.LookupTXT(ctx, name) - if err != nil { - return false, fmt.Errorf("dns-01: failed to lookup TXT records for domain (%v) via resolver %v: %w", name, config.DNSResolver, err) - } - - for _, keyAuthz := range results { - ok, _ := ValidateSHA256KeyAuthorization(keyAuthz, token, thumbprint) - if ok { - return true, nil - } - } - - return false, fmt.Errorf("dns-01: challenge failed against %v records", len(results)) -} - -func ValidateTLSALPN01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { - // This RFC is defined in RFC 8737 Automated Certificate Management - // Environment (ACME) TLS Application‑Layer Protocol Negotiation - // (ALPN) Challenge Extension. - // - // This is conceptually similar to ValidateHTTP01Challenge, but - // uses a TLS connection on port 443 with the specified ALPN - // protocol. - - cfg := &tls.Config{ - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge, the name of the negotiated - // protocol is "acme-tls/1". - NextProtos: []string{ALPNProtocol}, - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > ... and an SNI extension containing only the domain name - // > being validated during the TLS handshake. - // - // According to the Go docs, setting this option (even though - // InsecureSkipVerify=true is also specified), allows us to - // set the SNI extension to this value. - ServerName: domain, - - VerifyConnection: func(connState tls.ConnectionState) error { - // We initiated a fresh connection with no session tickets; - // even if we did have a session ticket, we do not wish to - // use it. Verify that the server has not inadvertently - // reused connections between validation attempts or something. - if connState.DidResume { - return fmt.Errorf("server under test incorrectly reported that handshake was resumed when no session cache was provided; refusing to continue") - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The ACME server verifies that during the TLS handshake the - // > application-layer protocol "acme-tls/1" was successfully - // > negotiated (and that the ALPN extension contained only the - // > value "acme-tls/1"). - if connState.NegotiatedProtocol != ALPNProtocol { - return fmt.Errorf("server under test negotiated unexpected ALPN protocol %v", connState.NegotiatedProtocol) - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > and that the certificate returned - // - // Because this certificate MUST be self-signed (per earlier - // statement in RFC 8737 Section 3), there is no point in sending - // more than one certificate, and so we will err early here if - // we got more than one. - if len(connState.PeerCertificates) > 1 { - return fmt.Errorf("server under test returned multiple (%v) certificates when we expected only one", len(connState.PeerCertificates)) - } - cert := connState.PeerCertificates[0] - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The client prepares for validation by constructing a - // > self-signed certificate that MUST contain an acmeIdentifier - // > extension and a subjectAlternativeName extension [RFC5280]. - // - // Verify that this is a self-signed certificate that isn't signed - // by another certificate (i.e., with the same key material but - // different issuer). - // NOTE: Do not use cert.CheckSignatureFrom(cert) as we need to bypass the - // checks for the parent certificate having the IsCA basic constraint set. - err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature) - if err != nil { - return fmt.Errorf("server under test returned a non-self-signed certificate: %v", err) - } - - if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { - return fmt.Errorf("server under test returned a non-self-signed certificate: invalid subject (%v) <-> issuer (%v) match", cert.Subject.String(), cert.Issuer.String()) - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The subjectAlternativeName extension MUST contain a single - // > dNSName entry where the value is the domain name being - // > validated. - // - // TODO: this does not validate that there are not other SANs - // with unknown (to Go) OIDs. - if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) > 0 || len(cert.IPAddresses) > 0 || len(cert.URIs) > 0 { - return fmt.Errorf("server under test returned a certificate with incorrect SANs") - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The comparison of dNSNames MUST be case insensitive - // > [RFC4343]. Note that as ACME doesn't support Unicode - // > identifiers, all dNSNames MUST be encoded using the rules - // > of [RFC3492]. - if !strings.EqualFold(cert.DNSNames[0], domain) { - return fmt.Errorf("server under test returned a certificate with unexpected identifier: %v", cert.DNSNames[0]) - } - - // Per above, verify that the acmeIdentifier extension is present - // exactly once and has the correct value. - var foundACMEId bool - for _, ext := range cert.Extensions { - if !ext.Id.Equal(OIDACMEIdentifier) { - continue - } - - // There must be only a single ACME extension. - if foundACMEId { - return fmt.Errorf("server under test returned a certificate with multiple acmeIdentifier extensions") - } - foundACMEId = true - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > a critical acmeIdentifier extension - if !ext.Critical { - return fmt.Errorf("server under test returned a certificate with an acmeIdentifier extension marked non-Critical") - } - - var keyAuthz []byte - remainder, err := asn1.Unmarshal(ext.Value, &keyAuthz) - if err != nil { - return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value: %w", err) - } - if len(remainder) > 0 { - return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value with additional trailing data") - } - - ok, err := ValidateRawSHA256KeyAuthorization(keyAuthz, token, thumbprint) - if !ok || err != nil { - return fmt.Errorf("server under test returned a certificate with an invalid key authorization (%w)", err) - } - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > The ACME server verifies that ... the certificate returned - // > contains: ... a critical acmeIdentifier extension containing - // > the expected SHA-256 digest computed in step 1. - if !foundACMEId { - return fmt.Errorf("server under test returned a certificate without the required acmeIdentifier extension") - } - - // Remove the handled critical extension and validate that we - // have no additional critical extensions left unhandled. - var index int = -1 - for oidIndex, oid := range cert.UnhandledCriticalExtensions { - if oid.Equal(OIDACMEIdentifier) { - index = oidIndex - break - } - } - if index != -1 { - // Unlike the foundACMEId case, this is not a failure; if Go - // updates to "understand" this critical extension, we do not - // wish to fail. - cert.UnhandledCriticalExtensions = append(cert.UnhandledCriticalExtensions[0:index], cert.UnhandledCriticalExtensions[index+1:]...) - } - if len(cert.UnhandledCriticalExtensions) > 0 { - return fmt.Errorf("server under test returned a certificate with additional unknown critical extensions (%v)", cert.UnhandledCriticalExtensions) - } - - // All good! - return nil - }, - - // We never want to resume a connection; do not provide session - // cache storage. - ClientSessionCache: nil, - - // Do not trust any system trusted certificates; we're going to be - // manually validating the chain, so specifying a non-empty pool - // here could only cause additional, unnecessary work. - RootCAs: x509.NewCertPool(), - - // Do not bother validating the client's chain; we know it should be - // self-signed. This also disables hostname verification, but we do - // this verification as part of VerifyConnection(...) ourselves. - // - // Per Go docs, this option is only safe in conjunction with - // VerifyConnection which we define above. - InsecureSkipVerify: true, - - // RFC 8737 Section 4. acme-tls/1 Protocol Definition: - // - // > ACME servers that implement "acme-tls/1" MUST only negotiate - // > TLS 1.2 [RFC5246] or higher when connecting to clients for - // > validation. - MinVersion: tls.VersionTLS12, - - // While RFC 8737 does not place restrictions around allowed cipher - // suites, we wish to restrict ourselves to secure defaults. Specify - // the Intermediate guideline from Mozilla's TLS config generator to - // disable obviously weak ciphers. - // - // See also: https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.7 - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - }, - } - - // Build a dialer using our custom DNS resolver, to ensure domains get - // resolved according to configuration. - dialer, err := buildDialerConfig(config) - if err != nil { - return false, fmt.Errorf("failed to build dialer: %w", err) - } - - // Per RFC 8737 Section 3. TLS with Application-Layer Protocol - // Negotiation (TLS ALPN) Challenge: - // - // > 2. The ACME server resolves the domain name being validated and - // > chooses one of the IP addresses returned for validation (the - // > server MAY validate against multiple addresses if more than - // > one is returned). - // > 3. The ACME server initiates a TLS connection to the chosen IP - // > address. This connection MUST use TCP port 443. - address := fmt.Sprintf("%v:"+ALPNPort, domain) - conn, err := dialer.Dial("tcp", address) - if err != nil { - return false, fmt.Errorf("tls-alpn-01: failed to dial host: %w", err) - } - - // Initiate the connection to the remote peer. - client := tls.Client(conn, cfg) - - // We intentionally swallow this error as it isn't useful to the - // underlying protocol we perform here. Notably, per RFC 8737 - // Section 4. acme-tls/1 Protocol Definition: - // - // > Once the handshake is completed, the client MUST NOT exchange - // > any further data with the server and MUST immediately close the - // > connection. ... Because of this, an ACME server MAY choose to - // > withhold authorization if either the certificate signature is - // > invalid or the handshake doesn't fully complete. - defer client.Close() - - // We wish to put time bounds on the total time the handshake can - // stall for, so build a connection context here. - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - // See note above about why we can allow Handshake to complete - // successfully. - if err := client.HandshakeContext(ctx); err != nil { - return false, fmt.Errorf("tls-alpn-01: failed to perform handshake: %w", err) - } - return true, nil -} +```release-note:change +ui: add subnav for replication items +``` diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go index 824d3cf8110d..3533146b7d23 100644 --- a/builtin/logical/pki/backend_test.go +++ b/builtin/logical/pki/backend_test.go @@ -1,7149 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "bytes" - "context" - "crypto" - "crypto/ecdsa" - "crypto/ed25519" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/base64" - "encoding/hex" - "encoding/json" - "encoding/pem" - "fmt" - "math" - "math/big" - mathrand "math/rand" - "net" - "net/url" - "os" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/hashicorp/vault/helper/testhelpers/teststorage" - "golang.org/x/exp/maps" - - "github.com/hashicorp/vault/helper/testhelpers" - - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - - "github.com/stretchr/testify/require" - - "github.com/armon/go-metrics" - "github.com/fatih/structs" - "github.com/go-test/deep" - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/api" - auth "github.com/hashicorp/vault/api/auth/userpass" - "github.com/hashicorp/vault/builtin/credential/userpass" - logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/mapstructure" - "golang.org/x/net/idna" -) - -var stepCount = 0 - -// From builtin/credential/cert/test-fixtures/root/rootcacert.pem -const ( - rootCACertPEM = `-----BEGIN CERTIFICATE----- -MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL -BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw -MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 -Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 -z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x -AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb -6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH -SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G -A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx -7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc -BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA -wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 -U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa -cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N -ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ -t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk -zehNe5dFTjFpylg1o6b8Ow== ------END CERTIFICATE-----` - rootCAKeyPEM = `-----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p -t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 -BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w -/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv -0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi -18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb -ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn -8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f -nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 -2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t -grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc -bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 -0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN -ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf -lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 -lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj -AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG -ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib -thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU -4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb -iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO -tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y -LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc -4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX -OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= ------END RSA PRIVATE KEY-----` -) - -func TestPKI_RequireCN(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected ca info") - } - - // Create a role which does require CN (default) - _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ - "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", - "allow_bare_domains": true, - "allow_subdomains": true, - "max_ttl": "2h", - }) - if err != nil { - t.Fatal(err) - } - - // Issue a cert with require_cn set to true and with common name supplied. - // It should succeed. - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ - "common_name": "foobar.com", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issue/example"), logical.UpdateOperation), resp, true) - if err != nil { - t.Fatal(err) - } - - // Issue a cert with require_cn set to true and with out supplying the - // common name. It should error out. - _, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) - if err == nil { - t.Fatalf("expected an error due to missing common_name") - } - - // Modify the role to make the common name optional - _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ - "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", - "allow_bare_domains": true, - "allow_subdomains": true, - "max_ttl": "2h", - "require_cn": false, - }) - if err != nil { - t.Fatal(err) - } - - // Issue a cert with require_cn set to false and without supplying the - // common name. It should succeed. - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) - if err != nil { - t.Fatal(err) - } - - if resp.Data["certificate"] == "" { - t.Fatalf("expected a cert to be generated") - } - - // Issue a cert with require_cn set to false and with a common name. It - // should succeed. - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) - if err != nil { - t.Fatal(err) - } - - if resp.Data["certificate"] == "" { - t.Fatalf("expected a cert to be generated") - } -} - -func TestPKI_DeviceCert(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - "not_after": "9999-12-31T23:59:59Z", - "not_before_duration": "2h", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected ca info") - } - var certBundle certutil.CertBundle - err = mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - t.Fatal(err) - } - - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - t.Fatal(err) - } - cert := parsedCertBundle.Certificate - notAfter := cert.NotAfter.Format(time.RFC3339) - if notAfter != "9999-12-31T23:59:59Z" { - t.Fatalf("not after from certificate: %v is not matching with input parameter: %v", cert.NotAfter, "9999-12-31T23:59:59Z") - } - if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { - t.Fatalf("root/generate/internal did not properly set validity period (notBefore): was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) - } - - // Create a role which does require CN (default) - _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ - "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", - "allow_bare_domains": true, - "allow_subdomains": true, - "not_after": "9999-12-31T23:59:59Z", - }) - if err != nil { - t.Fatal(err) - } - - // Issue a cert with require_cn set to true and with common name supplied. - // It should succeed. - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ - "common_name": "foobar.com", - }) - if err != nil { - t.Fatal(err) - } - err = mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - t.Fatal(err) - } - - parsedCertBundle, err = certBundle.ToParsedCertBundle() - if err != nil { - t.Fatal(err) - } - cert = parsedCertBundle.Certificate - notAfter = cert.NotAfter.Format(time.RFC3339) - if notAfter != "9999-12-31T23:59:59Z" { - t.Fatal(fmt.Errorf("not after from certificate is not matching with input parameter")) - } -} - -func TestBackend_InvalidParameter(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - "not_after": "9999-12-31T23:59:59Z", - "ttl": "25h", - }) - if err == nil { - t.Fatal(err) - } - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - "not_after": "9999-12-31T23:59:59", - }) - if err == nil { - t.Fatal(err) - } -} - -func TestBackend_CSRValues(t *testing.T) { - t.Parallel() - initTest.Do(setCerts) - b, _ := CreateBackendWithStorage(t) - - testCase := logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{}, - } - - intdata := map[string]interface{}{} - reqdata := map[string]interface{}{} - testCase.Steps = append(testCase.Steps, generateCSRSteps(t, ecCACert, ecCAKey, intdata, reqdata)...) - - logicaltest.Test(t, testCase) -} - -func TestBackend_URLsCRUD(t *testing.T) { - t.Parallel() - initTest.Do(setCerts) - b, _ := CreateBackendWithStorage(t) - - testCase := logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{}, - } - - intdata := map[string]interface{}{} - reqdata := map[string]interface{}{} - testCase.Steps = append(testCase.Steps, generateURLSteps(t, ecCACert, ecCAKey, intdata, reqdata)...) - - logicaltest.Test(t, testCase) -} - -// Generates and tests steps that walk through the various possibilities -// of role flags to ensure that they are properly restricted -func TestBackend_Roles(t *testing.T) { - t.Parallel() - cases := []struct { - name string - key, cert *string - useCSR bool - }{ - {"RSA", &rsaCAKey, &rsaCACert, false}, - {"RSACSR", &rsaCAKey, &rsaCACert, true}, - {"EC", &ecCAKey, &ecCACert, false}, - {"ECCSR", &ecCAKey, &ecCACert, true}, - {"ED", &edCAKey, &edCACert, false}, - {"EDCSR", &edCAKey, &edCACert, true}, - } - - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - initTest.Do(setCerts) - b, _ := CreateBackendWithStorage(t) - - testCase := logicaltest.TestCase{ - LogicalBackend: b, - Steps: []logicaltest.TestStep{ - { - Operation: logical.UpdateOperation, - Path: "config/ca", - Data: map[string]interface{}{ - "pem_bundle": *tc.key + "\n" + *tc.cert, - }, - }, - }, - } - - testCase.Steps = append(testCase.Steps, generateRoleSteps(t, tc.useCSR)...) - if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { - for i, v := range testCase.Steps { - data := map[string]interface{}{} - var keys []string - for k := range v.Data { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - interf := v.Data[k] - switch v := interf.(type) { - case bool: - if !v { - continue - } - case int: - if v == 0 { - continue - } - case []string: - if len(v) == 0 { - continue - } - case string: - if v == "" { - continue - } - lines := strings.Split(v, "\n") - if len(lines) > 1 { - data[k] = lines[0] + " ... (truncated)" - continue - } - } - data[k] = interf - - } - t.Logf("Step %d:\n%s %s err=%v %+v\n\n", i+1, v.Operation, v.Path, v.ErrorOk, data) - } - } - - logicaltest.Test(t, testCase) - }) - } -} - -// Performs some validity checking on the returned bundles -func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration, certBundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return nil, fmt.Errorf("error parsing cert bundle: %s", err) - } - - if key != nil { - switch keyType { - case "rsa": - parsedCertBundle.PrivateKeyType = certutil.RSAPrivateKey - parsedCertBundle.PrivateKey = key - parsedCertBundle.PrivateKeyBytes = x509.MarshalPKCS1PrivateKey(key.(*rsa.PrivateKey)) - case "ec": - parsedCertBundle.PrivateKeyType = certutil.ECPrivateKey - parsedCertBundle.PrivateKey = key - parsedCertBundle.PrivateKeyBytes, err = x509.MarshalECPrivateKey(key.(*ecdsa.PrivateKey)) - if err != nil { - return nil, fmt.Errorf("error parsing EC key: %s", err) - } - case "ed25519": - parsedCertBundle.PrivateKeyType = certutil.Ed25519PrivateKey - parsedCertBundle.PrivateKey = key - parsedCertBundle.PrivateKeyBytes, err = x509.MarshalPKCS8PrivateKey(key.(ed25519.PrivateKey)) - if err != nil { - return nil, fmt.Errorf("error parsing Ed25519 key: %s", err) - } - } - } - - switch { - case parsedCertBundle.Certificate == nil: - return nil, fmt.Errorf("did not find a certificate in the cert bundle") - case len(parsedCertBundle.CAChain) == 0 || parsedCertBundle.CAChain[0].Certificate == nil: - return nil, fmt.Errorf("did not find a CA in the cert bundle") - case parsedCertBundle.PrivateKey == nil: - return nil, fmt.Errorf("did not find a private key in the cert bundle") - case parsedCertBundle.PrivateKeyType == certutil.UnknownPrivateKey: - return nil, fmt.Errorf("could not figure out type of private key") - } - - switch { - case parsedCertBundle.PrivateKeyType == certutil.Ed25519PrivateKey && keyType != "ed25519": - fallthrough - case parsedCertBundle.PrivateKeyType == certutil.RSAPrivateKey && keyType != "rsa": - fallthrough - case parsedCertBundle.PrivateKeyType == certutil.ECPrivateKey && keyType != "ec": - return nil, fmt.Errorf("given key type does not match type found in bundle") - } - - cert := parsedCertBundle.Certificate - - if usage != cert.KeyUsage { - return nil, fmt.Errorf("expected usage of %#v, got %#v; ext usage is %#v", usage, cert.KeyUsage, cert.ExtKeyUsage) - } - - // There should only be one ext usage type, because only one is requested - // in the tests - if len(cert.ExtKeyUsage) != 1 { - return nil, fmt.Errorf("got wrong size key usage in generated cert; expected 1, values are %#v", cert.ExtKeyUsage) - } - switch extUsage { - case x509.ExtKeyUsageEmailProtection: - if cert.ExtKeyUsage[0] != x509.ExtKeyUsageEmailProtection { - return nil, fmt.Errorf("bad extended key usage") - } - case x509.ExtKeyUsageServerAuth: - if cert.ExtKeyUsage[0] != x509.ExtKeyUsageServerAuth { - return nil, fmt.Errorf("bad extended key usage") - } - case x509.ExtKeyUsageClientAuth: - if cert.ExtKeyUsage[0] != x509.ExtKeyUsageClientAuth { - return nil, fmt.Errorf("bad extended key usage") - } - case x509.ExtKeyUsageCodeSigning: - if cert.ExtKeyUsage[0] != x509.ExtKeyUsageCodeSigning { - return nil, fmt.Errorf("bad extended key usage") - } - } - - // TODO: We incremented 20->25 due to CircleCI execution - // being slow and pausing this test. We might consider recording the - // actual issuance time of the cert and calculating the expected - // validity period +/- fuzz, but that'd require recording and passing - // through more information. - if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 25 { - return nil, fmt.Errorf("certificate validity end: %s; expected within 25 seconds of %s", cert.NotAfter.Format(time.RFC3339), time.Now().Add(validity).Format(time.RFC3339)) - } - - return parsedCertBundle, nil -} - -func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep { - expected := certutil.URLEntries{ - IssuingCertificates: []string{ - "http://example.com/ca1", - "http://example.com/ca2", - }, - CRLDistributionPoints: []string{ - "http://example.com/crl1", - "http://example.com/crl2", - }, - OCSPServers: []string{ - "http://example.com/ocsp1", - "http://example.com/ocsp2", - }, - } - csrTemplate := x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: "my@example.com", - }, - } - - priv1024, _ := rsa.GenerateKey(rand.Reader, 1024) - csr1024, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv1024) - csrPem1024 := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: csr1024, - }))) - - priv2048, _ := rsa.GenerateKey(rand.Reader, 2048) - csr2048, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv2048) - csrPem2048 := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: csr2048, - }))) - - ret := []logicaltest.TestStep{ - { - Operation: logical.UpdateOperation, - Path: "root/generate/exported", - Data: map[string]interface{}{ - "common_name": "Root Cert", - "ttl": "180h", - }, - Check: func(resp *logical.Response) error { - if resp.Secret != nil && resp.Secret.LeaseID != "" { - return fmt.Errorf("root returned with a lease") - } - return nil - }, - }, - - { - Operation: logical.UpdateOperation, - Path: "config/urls", - Data: map[string]interface{}{ - "issuing_certificates": strings.Join(expected.IssuingCertificates, ","), - "crl_distribution_points": strings.Join(expected.CRLDistributionPoints, ","), - "ocsp_servers": strings.Join(expected.OCSPServers, ","), - }, - }, - - { - Operation: logical.ReadOperation, - Path: "config/urls", - Check: func(resp *logical.Response) error { - if resp.Data == nil { - return fmt.Errorf("no data returned") - } - var entries certutil.URLEntries - err := mapstructure.Decode(resp.Data, &entries) - if err != nil { - return err - } - if !reflect.DeepEqual(entries, expected) { - return fmt.Errorf("expected urls\n%#v\ndoes not match provided\n%#v\n", expected, entries) - } - - return nil - }, - }, - - { - Operation: logical.UpdateOperation, - Path: "root/sign-intermediate", - Data: map[string]interface{}{ - "common_name": "intermediate.cert.com", - "csr": csrPem1024, - "format": "der", - }, - ErrorOk: true, - Check: func(resp *logical.Response) error { - if !resp.IsError() { - return fmt.Errorf("expected an error response but did not get one") - } - if !strings.Contains(resp.Data["error"].(string), "2048") { - return fmt.Errorf("received an error but not about a 1024-bit key, error was: %s", resp.Data["error"].(string)) - } - - return nil - }, - }, - - { - Operation: logical.UpdateOperation, - Path: "root/sign-intermediate", - Data: map[string]interface{}{ - "common_name": "intermediate.cert.com", - "csr": csrPem2048, - "signature_bits": 512, - "format": "der", - "not_before_duration": "2h", - // Let's Encrypt -- R3 SKID - "skid": "14:2E:B3:17:B7:58:56:CB:AE:50:09:40:E6:1F:AF:9D:8B:14:C2:C6", - }, - Check: func(resp *logical.Response) error { - certString := resp.Data["certificate"].(string) - if certString == "" { - return fmt.Errorf("no certificate returned") - } - if resp.Secret != nil && resp.Secret.LeaseID != "" { - return fmt.Errorf("signed intermediate returned with a lease") - } - certBytes, _ := base64.StdEncoding.DecodeString(certString) - certs, err := x509.ParseCertificates(certBytes) - if err != nil { - return fmt.Errorf("returned cert cannot be parsed: %w", err) - } - if len(certs) != 1 { - return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) - } - cert := certs[0] - - skid, _ := hex.DecodeString("142EB317B75856CBAE500940E61FAF9D8B14C2C6") - - switch { - case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL): - return fmt.Errorf("IssuingCertificateURL:\nexpected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL) - case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints): - return fmt.Errorf("CRLDistributionPoints:\nexpected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints) - case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer): - return fmt.Errorf("OCSPServer:\nexpected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer) - case !reflect.DeepEqual([]string{"intermediate.cert.com"}, cert.DNSNames): - return fmt.Errorf("DNSNames\nexpected\n%#v\ngot\n%#v\n", []string{"intermediate.cert.com"}, cert.DNSNames) - case !reflect.DeepEqual(x509.SHA512WithRSA, cert.SignatureAlgorithm): - return fmt.Errorf("Signature Algorithm:\nexpected\n%#v\ngot\n%#v\n", x509.SHA512WithRSA, cert.SignatureAlgorithm) - case !reflect.DeepEqual(skid, cert.SubjectKeyId): - return fmt.Errorf("SKID:\nexpected\n%#v\ngot\n%#v\n", skid, cert.SubjectKeyId) - } - - if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { - t.Fatalf("root/sign-intermediate did not properly set validity period (notBefore): was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) - } - - return nil - }, - }, - - // Same as above but exclude adding to sans - { - Operation: logical.UpdateOperation, - Path: "root/sign-intermediate", - Data: map[string]interface{}{ - "common_name": "intermediate.cert.com", - "csr": csrPem2048, - "format": "der", - "exclude_cn_from_sans": true, - }, - Check: func(resp *logical.Response) error { - certString := resp.Data["certificate"].(string) - if certString == "" { - return fmt.Errorf("no certificate returned") - } - if resp.Secret != nil && resp.Secret.LeaseID != "" { - return fmt.Errorf("signed intermediate returned with a lease") - } - certBytes, _ := base64.StdEncoding.DecodeString(certString) - certs, err := x509.ParseCertificates(certBytes) - if err != nil { - return fmt.Errorf("returned cert cannot be parsed: %w", err) - } - if len(certs) != 1 { - return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) - } - cert := certs[0] - - switch { - case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL): - return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL) - case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints): - return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints) - case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer): - return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer) - case !reflect.DeepEqual([]string(nil), cert.DNSNames): - return fmt.Errorf("expected\n%#v\ngot\n%#v\n", []string(nil), cert.DNSNames) - } - - return nil - }, - }, - } - return ret -} - -func generateCSR(t *testing.T, csrTemplate *x509.CertificateRequest, keyType string, keyBits int) (interface{}, []byte, string) { - t.Helper() - - var priv interface{} - var err error - switch keyType { - case "rsa": - priv, err = rsa.GenerateKey(rand.Reader, keyBits) - case "ec": - switch keyBits { - case 224: - priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader) - case 256: - priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - case 384: - priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) - case 521: - priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) - default: - t.Fatalf("Got unknown ec< key bits: %v", keyBits) - } - case "ed25519": - _, priv, err = ed25519.GenerateKey(rand.Reader) - } - - if err != nil { - t.Fatalf("Got error generating private key for CSR: %v", err) - } - - csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, priv) - if err != nil { - t.Fatalf("Got error generating CSR: %v", err) - } - - csrPem := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: csr, - }))) - - return priv, csr, csrPem -} - -func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep { - csrTemplate, csrPem := generateTestCsr(t, certutil.RSAPrivateKey, 2048) - - ret := []logicaltest.TestStep{ - { - Operation: logical.UpdateOperation, - Path: "root/generate/exported", - Data: map[string]interface{}{ - "common_name": "Root Cert", - "ttl": "180h", - "max_path_length": 0, - }, - }, - - { - Operation: logical.UpdateOperation, - Path: "root/sign-intermediate", - Data: map[string]interface{}{ - "use_csr_values": true, - "csr": csrPem, - "format": "der", - }, - ErrorOk: true, - }, - - { - Operation: logical.DeleteOperation, - Path: "root", - }, - - { - Operation: logical.UpdateOperation, - Path: "root/generate/exported", - Data: map[string]interface{}{ - "common_name": "Root Cert", - "ttl": "180h", - "max_path_length": 1, - }, - }, - - { - Operation: logical.UpdateOperation, - Path: "root/sign-intermediate", - Data: map[string]interface{}{ - "use_csr_values": true, - "csr": csrPem, - "format": "der", - }, - Check: func(resp *logical.Response) error { - certString := resp.Data["certificate"].(string) - if certString == "" { - return fmt.Errorf("no certificate returned") - } - certBytes, _ := base64.StdEncoding.DecodeString(certString) - certs, err := x509.ParseCertificates(certBytes) - if err != nil { - return fmt.Errorf("returned cert cannot be parsed: %w", err) - } - if len(certs) != 1 { - return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) - } - cert := certs[0] - - if cert.MaxPathLen != 0 { - return fmt.Errorf("max path length of %d does not match the requested of 3", cert.MaxPathLen) - } - if !cert.MaxPathLenZero { - return fmt.Errorf("max path length zero is not set") - } - - // We need to set these as they are filled in with unparsed values in the final cert - csrTemplate.Subject.Names = cert.Subject.Names - csrTemplate.Subject.ExtraNames = cert.Subject.ExtraNames - - switch { - case !reflect.DeepEqual(cert.Subject, csrTemplate.Subject): - return fmt.Errorf("cert subject\n%#v\ndoes not match csr subject\n%#v\n", cert.Subject, csrTemplate.Subject) - case !reflect.DeepEqual(cert.DNSNames, csrTemplate.DNSNames): - return fmt.Errorf("cert dns names\n%#v\ndoes not match csr dns names\n%#v\n", cert.DNSNames, csrTemplate.DNSNames) - case !reflect.DeepEqual(cert.EmailAddresses, csrTemplate.EmailAddresses): - return fmt.Errorf("cert email addresses\n%#v\ndoes not match csr email addresses\n%#v\n", cert.EmailAddresses, csrTemplate.EmailAddresses) - case !reflect.DeepEqual(cert.IPAddresses, csrTemplate.IPAddresses): - return fmt.Errorf("cert ip addresses\n%#v\ndoes not match csr ip addresses\n%#v\n", cert.IPAddresses, csrTemplate.IPAddresses) - } - return nil - }, - }, - } - return ret -} - -func generateTestCsr(t *testing.T, keyType certutil.PrivateKeyType, keyBits int) (x509.CertificateRequest, string) { - t.Helper() - - csrTemplate := x509.CertificateRequest{ - Subject: pkix.Name{ - Country: []string{"MyCountry"}, - PostalCode: []string{"MyPostalCode"}, - SerialNumber: "MySerialNumber", - CommonName: "my@example.com", - }, - DNSNames: []string{ - "name1.example.com", - "name2.example.com", - "name3.example.com", - }, - EmailAddresses: []string{ - "name1@example.com", - "name2@example.com", - "name3@example.com", - }, - IPAddresses: []net.IP{ - net.ParseIP("::ff:1:2:3:4"), - net.ParseIP("::ff:5:6:7:8"), - }, - } - - _, _, csrPem := generateCSR(t, &csrTemplate, string(keyType), keyBits) - return csrTemplate, csrPem -} - -// Generates steps to test out various role permutations -func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { - roleVals := roleEntry{ - MaxTTL: 12 * time.Hour, - KeyType: "rsa", - KeyBits: 2048, - RequireCN: true, - AllowWildcardCertificates: new(bool), - } - *roleVals.AllowWildcardCertificates = true - - issueVals := certutil.IssueData{} - ret := []logicaltest.TestStep{} - - roleTestStep := logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "roles/test", - } - var issueTestStep logicaltest.TestStep - if useCSRs { - issueTestStep = logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "sign/test", - } - } else { - issueTestStep = logicaltest.TestStep{ - Operation: logical.UpdateOperation, - Path: "issue/test", - } - } - - generatedRSAKeys := map[int]crypto.Signer{} - generatedECKeys := map[int]crypto.Signer{} - generatedEdKeys := map[int]crypto.Signer{} - /* - // For the number of tests being run, a seed of 1 has been tested - // to hit all of the various values below. However, for normal - // testing we use a randomized time for maximum fuzziness. - */ - var seed int64 = 1 - fixedSeed := os.Getenv("VAULT_PKITESTS_FIXED_SEED") - if len(fixedSeed) == 0 { - seed = time.Now().UnixNano() - } else { - var err error - seed, err = strconv.ParseInt(fixedSeed, 10, 64) - if err != nil { - t.Fatalf("error parsing fixed seed of %s: %v", fixedSeed, err) - } - } - mathRand := mathrand.New(mathrand.NewSource(seed)) - // t.Logf("seed under test: %v", seed) - - // Used by tests not toggling common names to turn off the behavior of random key bit fuzziness - keybitSizeRandOff := false - - genericErrorOkCheck := func(resp *logical.Response) error { - if resp.IsError() { - return nil - } - return fmt.Errorf("expected an error, but did not seem to get one") - } - - // Adds tests with the currently configured issue/role information - addTests := func(testCheck logicaltest.TestCheckFunc) { - stepCount++ - // t.Logf("test step %d\nrole vals: %#v\n", stepCount, roleVals) - stepCount++ - // t.Logf("test step %d\nissue vals: %#v\n", stepCount, issueTestStep) - roleTestStep.Data = roleVals.ToResponseData() - roleTestStep.Data["generate_lease"] = false - ret = append(ret, roleTestStep) - issueTestStep.Data = structs.New(issueVals).Map() - switch { - case issueTestStep.ErrorOk: - issueTestStep.Check = genericErrorOkCheck - case testCheck != nil: - issueTestStep.Check = testCheck - default: - issueTestStep.Check = nil - } - ret = append(ret, issueTestStep) - } - - getCountryCheck := func(role roleEntry) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - - expected := strutil.RemoveDuplicates(role.Country, true) - if !reflect.DeepEqual(cert.Subject.Country, expected) { - return fmt.Errorf("error: returned certificate has Country of %s but %s was specified in the role", cert.Subject.Country, expected) - } - return nil - } - } - - getOuCheck := func(role roleEntry) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - - expected := strutil.RemoveDuplicatesStable(role.OU, true) - if !reflect.DeepEqual(cert.Subject.OrganizationalUnit, expected) { - return fmt.Errorf("error: returned certificate has OU of %s but %s was specified in the role", cert.Subject.OrganizationalUnit, expected) - } - return nil - } - } - - getOrganizationCheck := func(role roleEntry) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - - expected := strutil.RemoveDuplicates(role.Organization, true) - if !reflect.DeepEqual(cert.Subject.Organization, expected) { - return fmt.Errorf("error: returned certificate has Organization of %s but %s was specified in the role", cert.Subject.Organization, expected) - } - return nil - } - } - - getLocalityCheck := func(role roleEntry) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - - expected := strutil.RemoveDuplicates(role.Locality, true) - if !reflect.DeepEqual(cert.Subject.Locality, expected) { - return fmt.Errorf("error: returned certificate has Locality of %s but %s was specified in the role", cert.Subject.Locality, expected) - } - return nil - } - } - - getProvinceCheck := func(role roleEntry) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - - expected := strutil.RemoveDuplicates(role.Province, true) - if !reflect.DeepEqual(cert.Subject.Province, expected) { - return fmt.Errorf("error: returned certificate has Province of %s but %s was specified in the role", cert.Subject.Province, expected) - } - return nil - } - } - - getStreetAddressCheck := func(role roleEntry) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - - expected := strutil.RemoveDuplicates(role.StreetAddress, true) - if !reflect.DeepEqual(cert.Subject.StreetAddress, expected) { - return fmt.Errorf("error: returned certificate has StreetAddress of %s but %s was specified in the role", cert.Subject.StreetAddress, expected) - } - return nil - } - } - - getPostalCodeCheck := func(role roleEntry) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - - expected := strutil.RemoveDuplicates(role.PostalCode, true) - if !reflect.DeepEqual(cert.Subject.PostalCode, expected) { - return fmt.Errorf("error: returned certificate has PostalCode of %s but %s was specified in the role", cert.Subject.PostalCode, expected) - } - return nil - } - } - - getNotBeforeCheck := func(role roleEntry) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - - actualDiff := time.Since(cert.NotBefore) - certRoleDiff := (role.NotBeforeDuration - actualDiff).Truncate(time.Second) - // These times get truncated, so give a 1 second buffer on each side - if certRoleDiff >= -1*time.Second && certRoleDiff <= 1*time.Second { - return nil - } - return fmt.Errorf("validity period out of range diff: %v", certRoleDiff) - } - } - - // Returns a TestCheckFunc that performs various validity checks on the - // returned certificate information, mostly within checkCertsAndPrivateKey - getCnCheck := func(name string, role roleEntry, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration) logicaltest.TestCheckFunc { - var certBundle certutil.CertBundle - return func(resp *logical.Response) error { - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := checkCertsAndPrivateKey(role.KeyType, key, usage, extUsage, validity, &certBundle) - if err != nil { - return fmt.Errorf("error checking generated certificate: %s", err) - } - cert := parsedCertBundle.Certificate - if cert.Subject.CommonName != name { - return fmt.Errorf("error: returned certificate has CN of %s but %s was requested", cert.Subject.CommonName, name) - } - if strings.Contains(cert.Subject.CommonName, "@") { - if len(cert.DNSNames) != 0 || len(cert.EmailAddresses) != 1 { - return fmt.Errorf("error: found more than one DNS SAN or not one Email SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses) - } - } else { - if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) != 0 { - return fmt.Errorf("error: found more than one Email SAN or not one DNS SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses) - } - } - var retName string - if len(cert.DNSNames) > 0 { - retName = cert.DNSNames[0] - } - if len(cert.EmailAddresses) > 0 { - retName = cert.EmailAddresses[0] - } - if retName != name { - // Check IDNA - p := idna.New( - idna.StrictDomainName(true), - idna.VerifyDNSLength(true), - ) - converted, err := p.ToUnicode(retName) - if err != nil { - t.Fatal(err) - } - if converted != name { - return fmt.Errorf("error: returned certificate has a DNS SAN of %s (from idna: %s) but %s was requested", retName, converted, name) - } - } - return nil - } - } - - type csrPlan struct { - errorOk bool - roleKeyBits int - cert string - privKey crypto.Signer - } - - getCsr := func(keyType string, keyBits int, csrTemplate *x509.CertificateRequest) (*pem.Block, crypto.Signer) { - var privKey crypto.Signer - var ok bool - switch keyType { - case "rsa": - privKey, ok = generatedRSAKeys[keyBits] - if !ok { - privKey, _ = rsa.GenerateKey(rand.Reader, keyBits) - generatedRSAKeys[keyBits] = privKey - } - - case "ec": - var curve elliptic.Curve - - switch keyBits { - case 224: - curve = elliptic.P224() - case 256: - curve = elliptic.P256() - case 384: - curve = elliptic.P384() - case 521: - curve = elliptic.P521() - } - - privKey, ok = generatedECKeys[keyBits] - if !ok { - privKey, _ = ecdsa.GenerateKey(curve, rand.Reader) - generatedECKeys[keyBits] = privKey - } - - case "ed25519": - privKey, ok = generatedEdKeys[keyBits] - if !ok { - _, privKey, _ = ed25519.GenerateKey(rand.Reader) - generatedEdKeys[keyBits] = privKey - } - - default: - panic("invalid key type: " + keyType) - } - - csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privKey) - if err != nil { - t.Fatalf("Error creating certificate request: %s", err) - } - block := pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: csr, - } - return &block, privKey - } - - getRandCsr := func(keyType string, errorOk bool, csrTemplate *x509.CertificateRequest) csrPlan { - rsaKeyBits := []int{2048, 3072, 4096} - ecKeyBits := []int{224, 256, 384, 521} - plan := csrPlan{errorOk: errorOk} - - var testBitSize int - switch keyType { - case "rsa": - plan.roleKeyBits = rsaKeyBits[mathRand.Int()%len(rsaKeyBits)] - testBitSize = plan.roleKeyBits - - // If we don't expect an error already, randomly choose a - // key size and expect an error if it's less than the role - // setting - if !keybitSizeRandOff && !errorOk { - testBitSize = rsaKeyBits[mathRand.Int()%len(rsaKeyBits)] - } - - if testBitSize < plan.roleKeyBits { - plan.errorOk = true - } - - case "ec": - plan.roleKeyBits = ecKeyBits[mathRand.Int()%len(ecKeyBits)] - testBitSize = plan.roleKeyBits - - // If we don't expect an error already, randomly choose a - // key size and expect an error if it's less than the role - // setting - if !keybitSizeRandOff && !errorOk { - testBitSize = ecKeyBits[mathRand.Int()%len(ecKeyBits)] - } - - if testBitSize < plan.roleKeyBits { - plan.errorOk = true - } - - default: - panic("invalid key type: " + keyType) - } - if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { - t.Logf("roleKeyBits=%d testBitSize=%d errorOk=%v", plan.roleKeyBits, testBitSize, plan.errorOk) - } - - block, privKey := getCsr(keyType, testBitSize, csrTemplate) - plan.cert = strings.TrimSpace(string(pem.EncodeToMemory(block))) - plan.privKey = privKey - return plan - } - - // Common names to test with the various role flags toggled - var commonNames struct { - Localhost bool `structs:"localhost"` - BareDomain bool `structs:"example.com"` - SecondDomain bool `structs:"foobar.com"` - SubDomain bool `structs:"foo.example.com"` - Wildcard bool `structs:"*.example.com"` - SubSubdomain bool `structs:"foo.bar.example.com"` - SubSubdomainWildcard bool `structs:"*.bar.example.com"` - GlobDomain bool `structs:"fooexample.com"` - IDN bool `structs:"daɪˈɛrɨsɨs"` - AnyHost bool `structs:"porkslap.beer"` - } - - // Adds a series of tests based on the current selection of - // allowed common names; contains some (seeded) randomness - // - // This allows for a variety of common names to be tested in various - // combinations with allowed toggles of the role - addCnTests := func() { - cnMap := structs.New(commonNames).Map() - for name, allowedInt := range cnMap { - roleVals.KeyType = "rsa" - roleVals.KeyBits = 2048 - if mathRand.Int()%3 == 1 { - roleVals.KeyType = "ec" - roleVals.KeyBits = 224 - } - - roleVals.ServerFlag = false - roleVals.ClientFlag = false - roleVals.CodeSigningFlag = false - roleVals.EmailProtectionFlag = false - - var usage []string - if mathRand.Int()%2 == 1 { - usage = append(usage, "DigitalSignature") - } - if mathRand.Int()%2 == 1 { - usage = append(usage, "ContentCoMmitment") - } - if mathRand.Int()%2 == 1 { - usage = append(usage, "KeyEncipherment") - } - if mathRand.Int()%2 == 1 { - usage = append(usage, "DataEncipherment") - } - if mathRand.Int()%2 == 1 { - usage = append(usage, "KeyAgreemEnt") - } - if mathRand.Int()%2 == 1 { - usage = append(usage, "CertSign") - } - if mathRand.Int()%2 == 1 { - usage = append(usage, "CRLSign") - } - if mathRand.Int()%2 == 1 { - usage = append(usage, "EncipherOnly") - } - if mathRand.Int()%2 == 1 { - usage = append(usage, "DecipherOnly") - } - - roleVals.KeyUsage = usage - parsedKeyUsage := parseKeyUsages(roleVals.KeyUsage) - if parsedKeyUsage == 0 && len(usage) != 0 { - panic("parsed key usages was zero") - } - - var extUsage x509.ExtKeyUsage - i := mathRand.Int() % 4 - switch { - case i == 0: - // Punt on this for now since I'm not clear the actual proper - // way to format these - if name != "daɪˈɛrɨsɨs" { - extUsage = x509.ExtKeyUsageEmailProtection - roleVals.EmailProtectionFlag = true - break - } - fallthrough - case i == 1: - extUsage = x509.ExtKeyUsageServerAuth - roleVals.ServerFlag = true - case i == 2: - extUsage = x509.ExtKeyUsageClientAuth - roleVals.ClientFlag = true - default: - extUsage = x509.ExtKeyUsageCodeSigning - roleVals.CodeSigningFlag = true - } - - allowed := allowedInt.(bool) - issueVals.CommonName = name - if roleVals.EmailProtectionFlag { - if !strings.HasPrefix(name, "*") { - issueVals.CommonName = "user@" + issueVals.CommonName - } - } - - issueTestStep.ErrorOk = !allowed - - validity := roleVals.MaxTTL - - if useCSRs { - templ := &x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: issueVals.CommonName, - }, - } - plan := getRandCsr(roleVals.KeyType, issueTestStep.ErrorOk, templ) - issueVals.CSR = plan.cert - roleVals.KeyBits = plan.roleKeyBits - issueTestStep.ErrorOk = plan.errorOk - - addTests(getCnCheck(issueVals.CommonName, roleVals, plan.privKey, x509.KeyUsage(parsedKeyUsage), extUsage, validity)) - } else { - addTests(getCnCheck(issueVals.CommonName, roleVals, nil, x509.KeyUsage(parsedKeyUsage), extUsage, validity)) - } - } - } - - funcs := []interface{}{ - addCnTests, getCnCheck, getCountryCheck, getLocalityCheck, getNotBeforeCheck, - getOrganizationCheck, getOuCheck, getPostalCodeCheck, getRandCsr, getStreetAddressCheck, - getProvinceCheck, - } - if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { - t.Logf("funcs=%d", len(funcs)) - } - - // Common Name tests - { - // common_name not provided - issueVals.CommonName = "" - issueTestStep.ErrorOk = true - addTests(nil) - - // Nothing is allowed - addCnTests() - - roleVals.AllowLocalhost = true - commonNames.Localhost = true - addCnTests() - - roleVals.AllowedDomains = []string{"foobar.com"} - addCnTests() - - roleVals.AllowedDomains = []string{"example.com"} - roleVals.AllowSubdomains = true - commonNames.SubDomain = true - commonNames.Wildcard = true - commonNames.SubSubdomain = true - commonNames.SubSubdomainWildcard = true - addCnTests() - - roleVals.AllowedDomains = []string{"foobar.com", "example.com"} - commonNames.SecondDomain = true - roleVals.AllowBareDomains = true - commonNames.BareDomain = true - addCnTests() - - roleVals.AllowedDomains = []string{"foobar.com", "*example.com"} - roleVals.AllowGlobDomains = true - commonNames.GlobDomain = true - addCnTests() - - roleVals.AllowAnyName = true - roleVals.EnforceHostnames = true - commonNames.AnyHost = true - commonNames.IDN = true - addCnTests() - - roleVals.EnforceHostnames = false - addCnTests() - - // Ensure that we end up with acceptable key sizes since they won't be - // toggled any longer - keybitSizeRandOff = true - addCnTests() - } - // Country tests - { - roleVals.Country = []string{"foo"} - addTests(getCountryCheck(roleVals)) - - roleVals.Country = []string{"foo", "bar"} - addTests(getCountryCheck(roleVals)) - } - // OU tests - { - roleVals.OU = []string{"foo"} - addTests(getOuCheck(roleVals)) - - roleVals.OU = []string{"bar", "foo"} - addTests(getOuCheck(roleVals)) - } - // Organization tests - { - roleVals.Organization = []string{"system:masters"} - addTests(getOrganizationCheck(roleVals)) - - roleVals.Organization = []string{"foo", "bar"} - addTests(getOrganizationCheck(roleVals)) - } - // Locality tests - { - roleVals.Locality = []string{"foo"} - addTests(getLocalityCheck(roleVals)) - - roleVals.Locality = []string{"foo", "bar"} - addTests(getLocalityCheck(roleVals)) - } - // Province tests - { - roleVals.Province = []string{"foo"} - addTests(getProvinceCheck(roleVals)) - - roleVals.Province = []string{"foo", "bar"} - addTests(getProvinceCheck(roleVals)) - } - // StreetAddress tests - { - roleVals.StreetAddress = []string{"123 foo street"} - addTests(getStreetAddressCheck(roleVals)) - - roleVals.StreetAddress = []string{"123 foo street", "456 bar avenue"} - addTests(getStreetAddressCheck(roleVals)) - } - // PostalCode tests - { - roleVals.PostalCode = []string{"f00"} - addTests(getPostalCodeCheck(roleVals)) - - roleVals.PostalCode = []string{"f00", "b4r"} - addTests(getPostalCodeCheck(roleVals)) - } - // NotBefore tests - { - roleVals.NotBeforeDuration = 10 * time.Second - addTests(getNotBeforeCheck(roleVals)) - - roleVals.NotBeforeDuration = 30 * time.Second - addTests(getNotBeforeCheck(roleVals)) - - roleVals.NotBeforeDuration = 0 - } - - // IP SAN tests - { - getIpCheck := func(expectedIp ...net.IP) logicaltest.TestCheckFunc { - return func(resp *logical.Response) error { - var certBundle certutil.CertBundle - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error parsing cert bundle: %s", err) - } - cert := parsedCertBundle.Certificate - var expected []net.IP - expected = append(expected, expectedIp...) - if diff := deep.Equal(cert.IPAddresses, expected); len(diff) > 0 { - return fmt.Errorf("wrong SAN IPs, diff: %v", diff) - } - return nil - } - } - addIPSANTests := func(useCSRs, useCSRSANs, allowIPSANs, errorOk bool, ipSANs string, csrIPSANs []net.IP, check logicaltest.TestCheckFunc) { - if useCSRs { - csrTemplate := &x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: issueVals.CommonName, - }, - IPAddresses: csrIPSANs, - } - block, _ := getCsr(roleVals.KeyType, roleVals.KeyBits, csrTemplate) - issueVals.CSR = strings.TrimSpace(string(pem.EncodeToMemory(block))) - } - oldRoleVals, oldIssueVals, oldIssueTestStep := roleVals, issueVals, issueTestStep - roleVals.UseCSRSANs = useCSRSANs - roleVals.AllowIPSANs = allowIPSANs - issueVals.CommonName = "someone@example.com" - issueVals.IPSANs = ipSANs - issueTestStep.ErrorOk = errorOk - addTests(check) - roleVals, issueVals, issueTestStep = oldRoleVals, oldIssueVals, oldIssueTestStep - } - roleVals.AllowAnyName = true - roleVals.EnforceHostnames = true - roleVals.AllowLocalhost = true - roleVals.UseCSRCommonName = true - commonNames.Localhost = true - - netip1, netip2 := net.IP{127, 0, 0, 1}, net.IP{170, 171, 172, 173} - textip1, textip3 := "127.0.0.1", "::1" - - // IPSANs not allowed and not provided, should not be an error. - addIPSANTests(useCSRs, false, false, false, "", nil, getIpCheck()) - - // IPSANs not allowed, valid IPSANs provided, should be an error. - addIPSANTests(useCSRs, false, false, true, textip1+","+textip3, nil, nil) - - // IPSANs allowed, bogus IPSANs provided, should be an error. - addIPSANTests(useCSRs, false, true, true, "foobar", nil, nil) - - // Given IPSANs as API argument and useCSRSANs false, CSR arg ignored. - addIPSANTests(useCSRs, false, true, false, textip1, - []net.IP{netip2}, getIpCheck(netip1)) - - if useCSRs { - // IPSANs not allowed, valid IPSANs provided via CSR, should be an error. - addIPSANTests(useCSRs, true, false, true, "", []net.IP{netip1}, nil) - - // Given IPSANs as both API and CSR arguments and useCSRSANs=true, API arg ignored. - addIPSANTests(useCSRs, true, true, false, textip3, - []net.IP{netip1, netip2}, getIpCheck(netip1, netip2)) - } - } - - { - getOtherCheck := func(expectedOthers ...otherNameUtf8) logicaltest.TestCheckFunc { - return func(resp *logical.Response) error { - var certBundle certutil.CertBundle - err := mapstructure.Decode(resp.Data, &certBundle) - if err != nil { - return err - } - parsedCertBundle, err := certBundle.ToParsedCertBundle() - if err != nil { - return fmt.Errorf("error parsing cert bundle: %s", err) - } - cert := parsedCertBundle.Certificate - foundOthers, err := getOtherSANsFromX509Extensions(cert.Extensions) - if err != nil { - return err - } - var expected []otherNameUtf8 - expected = append(expected, expectedOthers...) - if diff := deep.Equal(foundOthers, expected); len(diff) > 0 { - return fmt.Errorf("wrong SAN IPs, diff: %v", diff) - } - return nil - } - } - - addOtherSANTests := func(useCSRs, useCSRSANs bool, allowedOtherSANs []string, errorOk bool, otherSANs []string, csrOtherSANs []otherNameUtf8, check logicaltest.TestCheckFunc) { - otherSansMap := func(os []otherNameUtf8) map[string][]string { - ret := make(map[string][]string) - for _, o := range os { - ret[o.oid] = append(ret[o.oid], o.value) - } - return ret - } - if useCSRs { - csrTemplate := &x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: issueVals.CommonName, - }, - } - if err := handleOtherCSRSANs(csrTemplate, otherSansMap(csrOtherSANs)); err != nil { - t.Fatal(err) - } - block, _ := getCsr(roleVals.KeyType, roleVals.KeyBits, csrTemplate) - issueVals.CSR = strings.TrimSpace(string(pem.EncodeToMemory(block))) - } - oldRoleVals, oldIssueVals, oldIssueTestStep := roleVals, issueVals, issueTestStep - roleVals.UseCSRSANs = useCSRSANs - roleVals.AllowedOtherSANs = allowedOtherSANs - issueVals.CommonName = "someone@example.com" - issueVals.OtherSANs = strings.Join(otherSANs, ",") - issueTestStep.ErrorOk = errorOk - addTests(check) - roleVals, issueVals, issueTestStep = oldRoleVals, oldIssueVals, oldIssueTestStep - } - roleVals.AllowAnyName = true - roleVals.EnforceHostnames = true - roleVals.AllowLocalhost = true - roleVals.UseCSRCommonName = true - commonNames.Localhost = true - - newOtherNameUtf8 := func(s string) (ret otherNameUtf8) { - pieces := strings.Split(s, ";") - if len(pieces) == 2 { - piecesRest := strings.Split(pieces[1], ":") - if len(piecesRest) == 2 { - switch strings.ToUpper(piecesRest[0]) { - case "UTF-8", "UTF8": - return otherNameUtf8{oid: pieces[0], value: piecesRest[1]} - } - } - } - t.Fatalf("error parsing otherName: %q", s) - return - } - oid1 := "1.3.6.1.4.1.311.20.2.3" - oth1str := oid1 + ";utf8:devops@nope.com" - oth1 := newOtherNameUtf8(oth1str) - oth2 := otherNameUtf8{oid1, "me@example.com"} - // allowNone, allowAll := []string{}, []string{oid1 + ";UTF-8:*"} - allowNone, allowAll := []string{}, []string{"*"} - - // OtherSANs not allowed and not provided, should not be an error. - addOtherSANTests(useCSRs, false, allowNone, false, nil, nil, getOtherCheck()) - - // OtherSANs not allowed, valid OtherSANs provided, should be an error. - addOtherSANTests(useCSRs, false, allowNone, true, []string{oth1str}, nil, nil) - - // OtherSANs allowed, bogus OtherSANs provided, should be an error. - addOtherSANTests(useCSRs, false, allowAll, true, []string{"foobar"}, nil, nil) - - // Given OtherSANs as API argument and useCSRSANs false, CSR arg ignored. - addOtherSANTests(useCSRs, false, allowAll, false, []string{oth1str}, - []otherNameUtf8{oth2}, getOtherCheck(oth1)) - - if useCSRs { - // OtherSANs not allowed, valid OtherSANs provided via CSR, should be an error. - addOtherSANTests(useCSRs, true, allowNone, true, nil, []otherNameUtf8{oth1}, nil) - - // Given OtherSANs as both API and CSR arguments and useCSRSANs=true, API arg ignored. - addOtherSANTests(useCSRs, false, allowAll, false, []string{oth2.String()}, - []otherNameUtf8{oth1}, getOtherCheck(oth2)) - } - } - - // Lease tests - { - roleTestStep.ErrorOk = true - roleVals.Lease = "" - roleVals.MaxTTL = 0 - addTests(nil) - - roleVals.Lease = "12h" - roleVals.MaxTTL = 6 * time.Hour - addTests(nil) - - roleTestStep.ErrorOk = false - roleVals.TTL = 0 - roleVals.MaxTTL = 12 * time.Hour - } - - // Listing test - ret = append(ret, logicaltest.TestStep{ - Operation: logical.ListOperation, - Path: "roles/", - Check: func(resp *logical.Response) error { - if resp.Data == nil { - return fmt.Errorf("nil data") - } - - keysRaw, ok := resp.Data["keys"] - if !ok { - return fmt.Errorf("no keys found") - } - - keys, ok := keysRaw.([]string) - if !ok { - return fmt.Errorf("could not convert keys to a string list") - } - - if len(keys) != 1 { - return fmt.Errorf("unexpected keys length of %d", len(keys)) - } - - if keys[0] != "test" { - return fmt.Errorf("unexpected key value of %s", keys[0]) - } - - return nil - }, - }) - - return ret -} - -func TestRolesAltIssuer(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Create two issuers. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root a - example.com", - "issuer_name": "root-a", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - rootAPem := resp.Data["certificate"].(string) - rootACert := parseCert(t, rootAPem) - - resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root b - example.com", - "issuer_name": "root-b", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - rootBPem := resp.Data["certificate"].(string) - rootBCert := parseCert(t, rootBPem) - - // Create three roles: one with no assignment, one with explicit root-a, - // one with explicit root-b. - _, err = CBWrite(b, s, "roles/use-default", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - }) - require.NoError(t, err) - - _, err = CBWrite(b, s, "roles/use-root-a", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - "issuer_ref": "root-a", - }) - require.NoError(t, err) - - _, err = CBWrite(b, s, "roles/use-root-b", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "issuer_ref": "root-b", - }) - require.NoError(t, err) - - // Now issue certs against these roles. - resp, err = CBWrite(b, s, "issue/use-default", map[string]interface{}{ - "common_name": "testing", - "ttl": "5s", - }) - require.NoError(t, err) - leafPem := resp.Data["certificate"].(string) - leafCert := parseCert(t, leafPem) - err = leafCert.CheckSignatureFrom(rootACert) - require.NoError(t, err, "should be signed by root-a but wasn't") - - resp, err = CBWrite(b, s, "issue/use-root-a", map[string]interface{}{ - "common_name": "testing", - "ttl": "5s", - }) - require.NoError(t, err) - leafPem = resp.Data["certificate"].(string) - leafCert = parseCert(t, leafPem) - err = leafCert.CheckSignatureFrom(rootACert) - require.NoError(t, err, "should be signed by root-a but wasn't") - - resp, err = CBWrite(b, s, "issue/use-root-b", map[string]interface{}{ - "common_name": "testing", - "ttl": "5s", - }) - require.NoError(t, err) - leafPem = resp.Data["certificate"].(string) - leafCert = parseCert(t, leafPem) - err = leafCert.CheckSignatureFrom(rootBCert) - require.NoError(t, err, "should be signed by root-b but wasn't") - - // Update the default issuer to be root B and make sure that the - // use-default role updates. - _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ - "default": "root-b", - }) - require.NoError(t, err) - - resp, err = CBWrite(b, s, "issue/use-default", map[string]interface{}{ - "common_name": "testing", - "ttl": "5s", - }) - require.NoError(t, err) - leafPem = resp.Data["certificate"].(string) - leafCert = parseCert(t, leafPem) - err = leafCert.CheckSignatureFrom(rootBCert) - require.NoError(t, err, "should be signed by root-b but wasn't") -} - -func TestBackend_PathFetchValidRaw(t *testing.T) { - t.Parallel() - b, storage := CreateBackendWithStorage(t) - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/generate/internal", - Storage: storage, - Data: map[string]interface{}{ - "common_name": "test.com", - "ttl": "6h", - }, - MountPoint: "pki/", - }) - require.NoError(t, err) - if resp != nil && resp.IsError() { - t.Fatalf("failed to generate root, %#v", resp) - } - rootCaAsPem := resp.Data["certificate"].(string) - - // Chain should contain the root. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: "ca_chain", - Storage: storage, - Data: map[string]interface{}{}, - MountPoint: "pki/", - }) - require.NoError(t, err) - if resp != nil && resp.IsError() { - t.Fatalf("failed read ca_chain, %#v", resp) - } - if strings.Count(string(resp.Data[logical.HTTPRawBody].([]byte)), rootCaAsPem) != 1 { - t.Fatalf("expected raw chain to contain the root cert") - } - - // The ca/pem should return us the actual CA... - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: "ca/pem", - Storage: storage, - Data: map[string]interface{}{}, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("ca/pem"), logical.ReadOperation), resp, true) - require.NoError(t, err) - if resp != nil && resp.IsError() { - t.Fatalf("failed read ca/pem, %#v", resp) - } - // check the raw cert matches the response body - if !bytes.Equal(resp.Data[logical.HTTPRawBody].([]byte), []byte(rootCaAsPem)) { - t.Fatalf("failed to get raw cert") - } - - _, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "roles/example", - Storage: storage, - Data: map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "max_ttl": "1h", - "no_store": "false", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "error setting up pki role: %v", err) - - // Now issue a short-lived certificate from our pki-external. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "issue/example", - Storage: storage, - Data: map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "5m", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "error issuing certificate: %v", err) - require.NotNil(t, resp, "got nil response from issuing request") - - issueCrtAsPem := resp.Data["certificate"].(string) - issuedCrt := parseCert(t, issueCrtAsPem) - expectedSerial := serialFromCert(issuedCrt) - expectedCert := []byte(issueCrtAsPem) - - // get der cert - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: fmt.Sprintf("cert/%s/raw", expectedSerial), - Storage: storage, - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to get raw cert, %#v", resp) - } - if err != nil { - t.Fatal(err) - } - - // check the raw cert matches the response body - rawBody := resp.Data[logical.HTTPRawBody].([]byte) - bodyAsPem := []byte(strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rawBody})))) - if !bytes.Equal(bodyAsPem, expectedCert) { - t.Fatalf("failed to get raw cert for serial number: %s", expectedSerial) - } - if resp.Data[logical.HTTPContentType] != "application/pkix-cert" { - t.Fatalf("failed to get raw cert content-type") - } - - // get pem - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: fmt.Sprintf("cert/%s/raw/pem", expectedSerial), - Storage: storage, - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to get raw, %#v", resp) - } - if err != nil { - t.Fatal(err) - } - - // check the pem cert matches the response body - if !bytes.Equal(resp.Data[logical.HTTPRawBody].([]byte), expectedCert) { - t.Fatalf("failed to get pem cert") - } - if resp.Data[logical.HTTPContentType] != "application/pem-certificate-chain" { - t.Fatalf("failed to get raw cert content-type") - } -} - -func TestBackend_PathFetchCertList(t *testing.T) { - t.Parallel() - // create the backend - b, storage := CreateBackendWithStorage(t) - - // generate root - rootData := map[string]interface{}{ - "common_name": "test.com", - "ttl": "6h", - } - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/generate/internal", - Storage: storage, - Data: rootData, - MountPoint: "pki/", - }) - - if resp != nil && resp.IsError() { - t.Fatalf("failed to generate root, %#v", resp) - } - if err != nil { - t.Fatal(err) - } - - // config urls - urlsData := map[string]interface{}{ - "issuing_certificates": "http://127.0.0.1:8200/v1/pki/ca", - "crl_distribution_points": "http://127.0.0.1:8200/v1/pki/crl", - } - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "config/urls", - Storage: storage, - Data: urlsData, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.UpdateOperation), resp, true) - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: "config/urls", - Storage: storage, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.ReadOperation), resp, true) - - if resp != nil && resp.IsError() { - t.Fatalf("failed to config urls, %#v", resp) - } - if err != nil { - t.Fatal(err) - } - - // create a role entry - roleData := map[string]interface{}{ - "allowed_domains": "test.com", - "allow_subdomains": "true", - "max_ttl": "4h", - } - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "roles/test-example", - Storage: storage, - Data: roleData, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to create a role, %#v", resp) - } - if err != nil { - t.Fatal(err) - } - - // issue some certs - i := 1 - for i < 10 { - certData := map[string]interface{}{ - "common_name": "example.test.com", - } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "issue/test-example", - Storage: storage, - Data: certData, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to issue a cert, %#v", resp) - } - if err != nil { - t.Fatal(err) - } - - i = i + 1 - } - - // list certs - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ListOperation, - Path: "certs", - Storage: storage, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to list certs, %#v", resp) - } - if err != nil { - t.Fatal(err) - } - // check that the root and 9 additional certs are all listed - if len(resp.Data["keys"].([]string)) != 10 { - t.Fatalf("failed to list all 10 certs") - } - - // list certs/ - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ListOperation, - Path: "certs/", - Storage: storage, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to list certs, %#v", resp) - } - if err != nil { - t.Fatal(err) - } - // check that the root and 9 additional certs are all listed - if len(resp.Data["keys"].([]string)) != 10 { - t.Fatalf("failed to list all 10 certs") - } -} - -func TestBackend_SignVerbatim(t *testing.T) { - t.Parallel() - testCases := []struct { - testName string - keyType string - }{ - {testName: "RSA", keyType: "rsa"}, - {testName: "ED25519", keyType: "ed25519"}, - {testName: "EC", keyType: "ec"}, - {testName: "Any", keyType: "any"}, - } - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - runTestSignVerbatim(t, tc.keyType) - }) - } -} - -func runTestSignVerbatim(t *testing.T, keyType string) { - // create the backend - b, storage := CreateBackendWithStorage(t) - - // generate root - rootData := map[string]interface{}{ - "common_name": "test.com", - "not_after": "9999-12-31T23:59:59Z", - } - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/generate/internal", - Storage: storage, - Data: rootData, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to generate root, %#v", *resp) - } - if err != nil { - t.Fatal(err) - } - - // create a CSR and key - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal(err) - } - csrReq := &x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: "foo.bar.com", - }, - // Check that otherName extensions are not duplicated (see hashicorp/vault#16700). - // If these extensions are duplicated, sign-verbatim will fail when parsing the signed certificate on Go 1.19+ (see golang/go#50988). - // On older versions of Go this test will fail due to an explicit check for duplicate otherNames later in this test. - ExtraExtensions: []pkix.Extension{ - { - Id: oidExtensionSubjectAltName, - Critical: false, - Value: []byte{0x30, 0x26, 0xA0, 0x24, 0x06, 0x0A, 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x03, 0xA0, 0x16, 0x0C, 0x14, 0x75, 0x73, 0x65, 0x72, 0x6E, 0x61, 0x6D, 0x65, 0x40, 0x65, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x2E, 0x63, 0x6F, 0x6D}, - }, - }, - } - csr, err := x509.CreateCertificateRequest(rand.Reader, csrReq, key) - if err != nil { - t.Fatal(err) - } - if len(csr) == 0 { - t.Fatal("generated csr is empty") - } - pemCSR := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: csr, - }))) - if len(pemCSR) == 0 { - t.Fatal("pem csr is empty") - } - - signVerbatimData := map[string]interface{}{ - "csr": pemCSR, - } - if keyType == "rsa" { - signVerbatimData["signature_bits"] = 512 - } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "sign-verbatim", - Storage: storage, - Data: signVerbatimData, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign-verbatim"), logical.UpdateOperation), resp, true) - - if resp != nil && resp.IsError() { - t.Fatalf("failed to sign-verbatim basic CSR: %#v", *resp) - } - if err != nil { - t.Fatal(err) - } - if resp.Secret != nil { - t.Fatal("secret is not nil") - } - - // create a role entry; we use this to check that sign-verbatim when used with a role is still honoring TTLs - roleData := map[string]interface{}{ - "ttl": "4h", - "max_ttl": "8h", - "key_type": keyType, - "not_before_duration": "2h", - } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "roles/test", - Storage: storage, - Data: roleData, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to create a role, %#v", *resp) - } - if err != nil { - t.Fatal(err) - } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "sign-verbatim/test", - Storage: storage, - Data: map[string]interface{}{ - "csr": pemCSR, - "ttl": "5h", - }, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to sign-verbatim ttl'd CSR: %#v", *resp) - } - if err != nil { - t.Fatal(err) - } - if resp.Secret != nil { - t.Fatal("got a lease when we should not have") - } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "sign-verbatim/test", - Storage: storage, - Data: map[string]interface{}{ - "csr": pemCSR, - "ttl": "12h", - }, - MountPoint: "pki/", - }) - if err != nil { - t.Fatal(err) - } - if resp != nil && resp.IsError() { - t.Fatalf(resp.Error().Error()) - } - if resp.Data == nil || resp.Data["certificate"] == nil { - t.Fatal("did not get expected data") - } - certString := resp.Data["certificate"].(string) - block, _ := pem.Decode([]byte(certString)) - if block == nil { - t.Fatal("nil pem block") - } - certs, err := x509.ParseCertificates(block.Bytes) - if err != nil { - t.Fatal(err) - } - if len(certs) != 1 { - t.Fatalf("expected a single cert, got %d", len(certs)) - } - cert := certs[0] - if math.Abs(float64(time.Now().Add(12*time.Hour).Unix()-cert.NotAfter.Unix())) < 10 { - t.Fatalf("sign-verbatim did not properly cap validity period (notAfter) on signed CSR: was %v vs requested %v but should've been %v", cert.NotAfter, time.Now().Add(12*time.Hour), time.Now().Add(8*time.Hour)) - } - if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { - t.Fatalf("sign-verbatim did not properly cap validity period (notBefore) on signed CSR: was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) - } - - // Now check signing a certificate using the not_after input using the Y10K value - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "sign-verbatim/test", - Storage: storage, - Data: map[string]interface{}{ - "csr": pemCSR, - "not_after": "9999-12-31T23:59:59Z", - }, - MountPoint: "pki/", - }) - if err != nil { - t.Fatal(err) - } - if resp != nil && resp.IsError() { - t.Fatalf(resp.Error().Error()) - } - if resp.Data == nil || resp.Data["certificate"] == nil { - t.Fatal("did not get expected data") - } - certString = resp.Data["certificate"].(string) - block, _ = pem.Decode([]byte(certString)) - if block == nil { - t.Fatal("nil pem block") - } - certs, err = x509.ParseCertificates(block.Bytes) - if err != nil { - t.Fatal(err) - } - if len(certs) != 1 { - t.Fatalf("expected a single cert, got %d", len(certs)) - } - cert = certs[0] - - // Fallback check for duplicate otherName, necessary on Go versions before 1.19. - // We assume that there is only one SAN in the original CSR and that it is an otherName. - san_count := 0 - for _, ext := range cert.Extensions { - if ext.Id.Equal(oidExtensionSubjectAltName) { - san_count += 1 - } - } - if san_count != 1 { - t.Fatalf("expected one SAN extension, got %d", san_count) - } - - notAfter := cert.NotAfter.Format(time.RFC3339) - if notAfter != "9999-12-31T23:59:59Z" { - t.Fatal(fmt.Errorf("not after from certificate is not matching with input parameter")) - } - - // now check that if we set generate-lease it takes it from the role and the TTLs match - roleData = map[string]interface{}{ - "ttl": "4h", - "max_ttl": "8h", - "generate_lease": true, - "key_type": keyType, - } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "roles/test", - Storage: storage, - Data: roleData, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to create a role, %#v", *resp) - } - if err != nil { - t.Fatal(err) - } - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "sign-verbatim/test", - Storage: storage, - Data: map[string]interface{}{ - "csr": pemCSR, - "ttl": "5h", - }, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to sign-verbatim role-leased CSR: %#v", *resp) - } - if err != nil { - t.Fatal(err) - } - if resp.Secret == nil { - t.Fatalf("secret is nil, response is %#v", *resp) - } - if math.Abs(float64(resp.Secret.TTL-(5*time.Hour))) > float64(5*time.Hour) { - t.Fatalf("ttl not default; wanted %v, got %v", b.System().DefaultLeaseTTL(), resp.Secret.TTL) - } -} - -func TestBackend_Root_Idempotency(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // This is a change within 1.11, we are no longer idempotent across generate/internal calls. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - }) - require.NoError(t, err) - require.NotNil(t, resp, "expected ca info") - keyId1 := resp.Data["key_id"] - issuerId1 := resp.Data["issuer_id"] - cert := parseCert(t, resp.Data["certificate"].(string)) - certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") - - // -> Validate the SKID matches between the root cert and the key - resp, err = CBRead(b, s, "key/"+keyId1.(keyID).String()) - require.NoError(t, err) - require.NotNil(t, resp, "expected a response") - require.Equal(t, resp.Data["subject_key_id"], certSkid) - - resp, err = CBRead(b, s, "cert/ca_chain") - require.NoError(t, err, "error reading ca_chain: %v", err) - - r1Data := resp.Data - - // Calling generate/internal should generate a new CA as well. - resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - }) - require.NoError(t, err) - require.NotNil(t, resp, "expected ca info") - keyId2 := resp.Data["key_id"] - issuerId2 := resp.Data["issuer_id"] - cert = parseCert(t, resp.Data["certificate"].(string)) - certSkid = certutil.GetHexFormatted(cert.SubjectKeyId, ":") - - // -> Validate the SKID matches between the root cert and the key - resp, err = CBRead(b, s, "key/"+keyId2.(keyID).String()) - require.NoError(t, err) - require.NotNil(t, resp, "expected a response") - require.Equal(t, resp.Data["subject_key_id"], certSkid) - - // Make sure that we actually generated different issuer and key values - require.NotEqual(t, keyId1, keyId2) - require.NotEqual(t, issuerId1, issuerId2) - - // Now because the issued CA's have no links, the call to ca_chain should return the same data (ca chain from default) - resp, err = CBRead(b, s, "cert/ca_chain") - require.NoError(t, err, "error reading ca_chain: %v", err) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("cert/ca_chain"), logical.ReadOperation), resp, true) - - r2Data := resp.Data - if !reflect.DeepEqual(r1Data, r2Data) { - t.Fatal("got different ca certs") - } - - // Now let's validate that the import bundle is idempotent. - pemBundleRootCA := rootCACertPEM + "\n" + rootCAKeyPEM - resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ - "pem_bundle": pemBundleRootCA, - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/ca"), logical.UpdateOperation), resp, true) - - require.NoError(t, err) - require.NotNil(t, resp, "expected ca info") - firstMapping := resp.Data["mapping"].(map[string]string) - firstImportedKeys := resp.Data["imported_keys"].([]string) - firstImportedIssuers := resp.Data["imported_issuers"].([]string) - firstExistingKeys := resp.Data["existing_keys"].([]string) - firstExistingIssuers := resp.Data["existing_issuers"].([]string) - - require.NotContains(t, firstImportedKeys, keyId1) - require.NotContains(t, firstImportedKeys, keyId2) - require.NotContains(t, firstImportedIssuers, issuerId1) - require.NotContains(t, firstImportedIssuers, issuerId2) - require.Empty(t, firstExistingKeys) - require.Empty(t, firstExistingIssuers) - require.NotEmpty(t, firstMapping) - require.Equal(t, 1, len(firstMapping)) - - var issuerId3 string - var keyId3 string - for i, k := range firstMapping { - issuerId3 = i - keyId3 = k - } - - // Performing this again should result in no key/issuer ids being imported/generated. - resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ - "pem_bundle": pemBundleRootCA, - }) - require.NoError(t, err) - require.NotNil(t, resp, "expected ca info") - secondMapping := resp.Data["mapping"].(map[string]string) - secondImportedKeys := resp.Data["imported_keys"] - secondImportedIssuers := resp.Data["imported_issuers"] - secondExistingKeys := resp.Data["existing_keys"] - secondExistingIssuers := resp.Data["existing_issuers"] - - require.Empty(t, secondImportedKeys) - require.Empty(t, secondImportedIssuers) - require.Contains(t, secondExistingKeys, keyId3) - require.Contains(t, secondExistingIssuers, issuerId3) - require.Equal(t, 1, len(secondMapping)) - - resp, err = CBDelete(b, s, "root") - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, 1, len(resp.Warnings)) - - // Make sure we can delete twice... - resp, err = CBDelete(b, s, "root") - require.NoError(t, err) - require.NotNil(t, resp) - require.Equal(t, 1, len(resp.Warnings)) - - _, err = CBRead(b, s, "cert/ca_chain") - require.Error(t, err, "expected an error fetching deleted ca_chain") - - // We should be able to import the same ca bundle as before and get a different key/issuer ids - resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ - "pem_bundle": pemBundleRootCA, - }) - require.NoError(t, err) - require.NotNil(t, resp, "expected ca info") - postDeleteImportedKeys := resp.Data["imported_keys"] - postDeleteImportedIssuers := resp.Data["imported_issuers"] - - // Make sure that we actually generated different issuer and key values, then the previous import - require.NotNil(t, postDeleteImportedKeys) - require.NotNil(t, postDeleteImportedIssuers) - require.NotEqual(t, postDeleteImportedKeys, firstImportedKeys) - require.NotEqual(t, postDeleteImportedIssuers, firstImportedIssuers) - - resp, err = CBRead(b, s, "cert/ca_chain") - require.NoError(t, err) - - caChainPostDelete := resp.Data - if reflect.DeepEqual(r1Data, caChainPostDelete) { - t.Fatal("ca certs from ca_chain were the same post delete, should have changed.") - } -} - -func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { - t.Parallel() - b_root, s_root := CreateBackendWithStorage(t) - b_int, s_int := CreateBackendWithStorage(t) - var err error - - // Direct issuing from root - _, err = CBWrite(b_root, s_root, "root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - - _, err = CBWrite(b_root, s_root, "roles/test", map[string]interface{}{ - "allow_bare_domains": true, - "allow_subdomains": true, - "allow_any_name": true, - }) - if err != nil { - t.Fatal(err) - } - - resp, err := CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ - "common_name": "myint.com", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b_root.Route("intermediate/generate/internal"), logical.UpdateOperation), resp, true) - require.Contains(t, resp.Data, "key_id") - intKeyId := resp.Data["key_id"].(keyID) - csr := resp.Data["csr"] - - resp, err = CBRead(b_int, s_int, "key/"+intKeyId.String()) - require.NoError(t, err) - require.NotNil(t, resp, "expected a response") - intSkid := resp.Data["subject_key_id"].(string) - - if err != nil { - t.Fatal(err) - } - - _, err = CBWrite(b_root, s_root, "sign/test", map[string]interface{}{ - "common_name": "myint.com", - "csr": csr, - "ttl": "60h", - }) - require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") - - _, err = CBWrite(b_root, s_root, "sign-verbatim/test", map[string]interface{}{ - "common_name": "myint.com", - "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", - "csr": csr, - "ttl": "60h", - }) - require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") - - resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ - "common_name": "myint.com", - "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", - "csr": csr, - "ttl": "60h", - }) - if err != nil { - t.Fatalf("got error: %v", err) - } - if resp == nil { - t.Fatal("got nil response") - } - if len(resp.Warnings) == 0 { - t.Fatalf("expected warnings, got %#v", *resp) - } - - cert := parseCert(t, resp.Data["certificate"].(string)) - certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") - require.Equal(t, intSkid, certSkid) -} - -func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { - t.Parallel() - // create the backend - b, s := CreateBackendWithStorage(t) - - // generate root - data, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - require.NoError(t, err, "failed generating internal root cert") - rootCaPem := data.Data["certificate"].(string) - - // Create a signing role like Consul did with the default args prior to Vault 1.10 - _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ - "allow_any_name": true, - "allowed_serial_numbers": []string{"MySerialNumber"}, - "key_type": "any", - "key_bits": "2048", - "signature_bits": "256", - }) - require.NoError(t, err, "failed creating legacy role") - - _, csrPem := generateTestCsr(t, certutil.ECPrivateKey, 256) - data, err = CBWrite(b, s, "sign/test", map[string]interface{}{ - "csr": csrPem, - }) - require.NoError(t, err, "failed signing csr") - certAsPem := data.Data["certificate"].(string) - - signedCert := parseCert(t, certAsPem) - rootCert := parseCert(t, rootCaPem) - requireSignedBy(t, signedCert, rootCert) -} - -func TestBackend_SignSelfIssued(t *testing.T) { - t.Parallel() - // create the backend - b, storage := CreateBackendWithStorage(t) - - // generate root - rootData := map[string]interface{}{ - "common_name": "test.com", - "ttl": "172800", - } - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/generate/internal", - Storage: storage, - Data: rootData, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to generate root, %#v", *resp) - } - if err != nil { - t.Fatal(err) - } - - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal(err) - } - - template := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "foo.bar.com", - }, - SerialNumber: big.NewInt(1234), - IsCA: false, - BasicConstraintsValid: true, - } - - ss, _ := getSelfSigned(t, template, template, key) - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/sign-self-issued", - Storage: storage, - Data: map[string]interface{}{ - "certificate": ss, - }, - MountPoint: "pki/", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("got nil response") - } - if !resp.IsError() { - t.Fatalf("expected error due to non-CA; got: %#v", *resp) - } - - // Set CA to true, but leave issuer alone - template.IsCA = true - - issuer := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "bar.foo.com", - }, - SerialNumber: big.NewInt(2345), - IsCA: true, - BasicConstraintsValid: true, - } - ss, ssCert := getSelfSigned(t, template, issuer, key) - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/sign-self-issued", - Storage: storage, - Data: map[string]interface{}{ - "certificate": ss, - }, - MountPoint: "pki/", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("got nil response") - } - if !resp.IsError() { - t.Fatalf("expected error due to different issuer; cert info is\nIssuer\n%#v\nSubject\n%#v\n", ssCert.Issuer, ssCert.Subject) - } - - ss, _ = getSelfSigned(t, template, template, key) - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/sign-self-issued", - Storage: storage, - Data: map[string]interface{}{ - "certificate": ss, - }, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("root/sign-self-issued"), logical.UpdateOperation), resp, true) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("got nil response") - } - if resp.IsError() { - t.Fatalf("error in response: %s", resp.Error().Error()) - } - - newCertString := resp.Data["certificate"].(string) - block, _ := pem.Decode([]byte(newCertString)) - newCert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - - sc := b.makeStorageContext(context.Background(), storage) - signingBundle, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) - if err != nil { - t.Fatal(err) - } - if reflect.DeepEqual(newCert.Subject, newCert.Issuer) { - t.Fatal("expected different subject/issuer") - } - if !reflect.DeepEqual(newCert.Issuer, signingBundle.Certificate.Subject) { - t.Fatalf("expected matching issuer/CA subject\n\nIssuer:\n%#v\nSubject:\n%#v\n", newCert.Issuer, signingBundle.Certificate.Subject) - } - if bytes.Equal(newCert.AuthorityKeyId, newCert.SubjectKeyId) { - t.Fatal("expected different authority/subject") - } - if !bytes.Equal(newCert.AuthorityKeyId, signingBundle.Certificate.SubjectKeyId) { - t.Fatal("expected authority on new cert to be same as signing subject") - } - if newCert.Subject.CommonName != "foo.bar.com" { - t.Fatalf("unexpected common name on new cert: %s", newCert.Subject.CommonName) - } -} - -// TestBackend_SignSelfIssued_DifferentTypes tests the functionality of the -// require_matching_certificate_algorithms flag. -func TestBackend_SignSelfIssued_DifferentTypes(t *testing.T) { - t.Parallel() - // create the backend - b, storage := CreateBackendWithStorage(t) - - // generate root - rootData := map[string]interface{}{ - "common_name": "test.com", - "ttl": "172800", - "key_type": "ec", - "key_bits": "521", - } - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/generate/internal", - Storage: storage, - Data: rootData, - MountPoint: "pki/", - }) - if resp != nil && resp.IsError() { - t.Fatalf("failed to generate root, %#v", *resp) - } - if err != nil { - t.Fatal(err) - } - - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - t.Fatal(err) - } - - template := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "foo.bar.com", - }, - SerialNumber: big.NewInt(1234), - IsCA: true, - BasicConstraintsValid: true, - } - - // Tests absent the flag - ss, _ := getSelfSigned(t, template, template, key) - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/sign-self-issued", - Storage: storage, - Data: map[string]interface{}{ - "certificate": ss, - }, - MountPoint: "pki/", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("got nil response") - } - - // Set CA to true, but leave issuer alone - template.IsCA = true - - // Tests with flag present but false - ss, _ = getSelfSigned(t, template, template, key) - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/sign-self-issued", - Storage: storage, - Data: map[string]interface{}{ - "certificate": ss, - "require_matching_certificate_algorithms": false, - }, - MountPoint: "pki/", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("got nil response") - } - - // Test with flag present and true - ss, _ = getSelfSigned(t, template, template, key) - _, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "root/sign-self-issued", - Storage: storage, - Data: map[string]interface{}{ - "certificate": ss, - "require_matching_certificate_algorithms": true, - }, - MountPoint: "pki/", - }) - if err == nil { - t.Fatal("expected error due to mismatched algorithms") - } -} - -// This is a really tricky test because the Go stdlib asn1 package is incapable -// of doing the right thing with custom OID SANs (see comments in the package, -// it's readily admitted that it's too magic) but that means that any -// validation logic written for this test isn't being independently verified, -// as in, if cryptobytes is used to decode it to make the test work, that -// doesn't mean we're encoding and decoding correctly, only that we made the -// test pass. Instead, when run verbosely it will first perform a bunch of -// checks to verify that the OID SAN logic doesn't screw up other SANs, then -// will spit out the PEM. This can be validated independently. -// -// You want the hex dump of the octet string corresponding to the X509v3 -// Subject Alternative Name. There's a nice online utility at -// https://lapo.it/asn1js that can be used to view the structure of an -// openssl-generated other SAN at -// https://lapo.it/asn1js/#3022A020060A2B060104018237140203A0120C106465766F7073406C6F63616C686F7374 -// (openssl asn1parse can also be used with -strparse using an offset of the -// hex blob for the subject alternative names extension). -// -// The structure output from here should match that precisely (even if the OID -// itself doesn't) in the second test. -// -// The test that encodes two should have them be in separate elements in the -// top-level sequence; see -// https://lapo.it/asn1js/#3046A020060A2B060104018237140203A0120C106465766F7073406C6F63616C686F7374A022060A2B060104018237140204A0140C12322D6465766F7073406C6F63616C686F7374 for an openssl-generated example. -// -// The good news is that it's valid to simply copy and paste the PEM output from -// here into the form at that site as it will do the right thing so it's pretty -// easy to validate. -func TestBackend_OID_SANs(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - var err error - var resp *logical.Response - var certStr string - var block *pem.Block - var cert *x509.Certificate - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - - _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ - "allowed_domains": []string{"foobar.com", "zipzap.com"}, - "allow_bare_domains": true, - "allow_subdomains": true, - "allow_ip_sans": true, - "allowed_other_sans": "1.3.6.1.4.1.311.20.2.3;UTF8:devops@*,1.3.6.1.4.1.311.20.2.4;utf8:d*e@foobar.com", - }) - if err != nil { - t.Fatal(err) - } - - // Get a baseline before adding OID SANs. In the next sections we'll verify - // that the SANs are all added even as the OID SAN inclusion forces other - // adding logic (custom rather than built-in Golang logic) - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foobar.com,foo.foobar.com,bar.foobar.com", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - certStr = resp.Data["certificate"].(string) - block, _ = pem.Decode([]byte(certStr)) - cert, err = x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - if cert.IPAddresses[0].String() != "1.2.3.4" { - t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) - } - if len(cert.DNSNames) != 3 || - cert.DNSNames[0] != "bar.foobar.com" || - cert.DNSNames[1] != "foo.foobar.com" || - cert.DNSNames[2] != "foobar.com" { - t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) - } - - // First test some bad stuff that shouldn't work - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - // Not a valid value for the first possibility - "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF8:devop@nope.com", - }) - if err == nil { - t.Fatal("expected error") - } - - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - // Not a valid OID for the first possibility - "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF8:devops@nope.com", - }) - if err == nil { - t.Fatal("expected error") - } - - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - // Not a valid name for the second possibility - "other_sans": "1.3.6.1.4.1.311.20.2.4;UTF8:d34g@foobar.com", - }) - if err == nil { - t.Fatal("expected error") - } - - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - // Not a valid OID for the second possibility - "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF8:d34e@foobar.com", - }) - if err == nil { - t.Fatal("expected error") - } - - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - // Not a valid type - "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF2:d34e@foobar.com", - }) - if err == nil { - t.Fatal("expected error") - } - - // Valid for first possibility - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:devops@nope.com", - }) - if err != nil { - t.Fatal(err) - } - certStr = resp.Data["certificate"].(string) - block, _ = pem.Decode([]byte(certStr)) - cert, err = x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - if cert.IPAddresses[0].String() != "1.2.3.4" { - t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) - } - if len(cert.DNSNames) != 3 || - cert.DNSNames[0] != "bar.foobar.com" || - cert.DNSNames[1] != "foo.foobar.com" || - cert.DNSNames[2] != "foobar.com" { - t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) - } - if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { - t.Logf("certificate 1 to check:\n%s", certStr) - } - - // Valid for second possibility - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - "other_sans": "1.3.6.1.4.1.311.20.2.4;UTF8:d234e@foobar.com", - }) - if err != nil { - t.Fatal(err) - } - certStr = resp.Data["certificate"].(string) - block, _ = pem.Decode([]byte(certStr)) - cert, err = x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - if cert.IPAddresses[0].String() != "1.2.3.4" { - t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) - } - if len(cert.DNSNames) != 3 || - cert.DNSNames[0] != "bar.foobar.com" || - cert.DNSNames[1] != "foo.foobar.com" || - cert.DNSNames[2] != "foobar.com" { - t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) - } - if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { - t.Logf("certificate 2 to check:\n%s", certStr) - } - - // Valid for both - oid1, type1, val1 := "1.3.6.1.4.1.311.20.2.3", "utf8", "devops@nope.com" - oid2, type2, val2 := "1.3.6.1.4.1.311.20.2.4", "utf-8", "d234e@foobar.com" - otherNames := []string{ - fmt.Sprintf("%s;%s:%s", oid1, type1, val1), - fmt.Sprintf("%s;%s:%s", oid2, type2, val2), - } - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - "other_sans": strings.Join(otherNames, ","), - }) - if err != nil { - t.Fatal(err) - } - certStr = resp.Data["certificate"].(string) - block, _ = pem.Decode([]byte(certStr)) - cert, err = x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - if cert.IPAddresses[0].String() != "1.2.3.4" { - t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) - } - if len(cert.DNSNames) != 3 || - cert.DNSNames[0] != "bar.foobar.com" || - cert.DNSNames[1] != "foo.foobar.com" || - cert.DNSNames[2] != "foobar.com" { - t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) - } - expectedOtherNames := []otherNameUtf8{{oid1, val1}, {oid2, val2}} - foundOtherNames, err := getOtherSANsFromX509Extensions(cert.Extensions) - if err != nil { - t.Fatal(err) - } - if diff := deep.Equal(expectedOtherNames, foundOtherNames); len(diff) != 0 { - t.Errorf("unexpected otherNames: %v", diff) - } - if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { - t.Logf("certificate 3 to check:\n%s", certStr) - } -} - -func TestBackend_AllowedSerialNumbers(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - var err error - var resp *logical.Response - var certStr string - var block *pem.Block - var cert *x509.Certificate - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - - // First test that Serial Numbers are not allowed - _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - }) - if err != nil { - t.Fatal(err) - } - - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar", - "ttl": "1h", - }) - if err != nil { - t.Fatal(err) - } - - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar", - "ttl": "1h", - "serial_number": "foobar", - }) - if err == nil { - t.Fatal("expected error") - } - - // Update the role to allow serial numbers - _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "allowed_serial_numbers": "f00*,b4r*", - }) - if err != nil { - t.Fatal(err) - } - - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar", - "ttl": "1h", - // Not a valid serial number - "serial_number": "foobar", - }) - if err == nil { - t.Fatal("expected error") - } - - // Valid for first possibility - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar", - "serial_number": "f00bar", - }) - if err != nil { - t.Fatal(err) - } - certStr = resp.Data["certificate"].(string) - block, _ = pem.Decode([]byte(certStr)) - cert, err = x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - if cert.Subject.SerialNumber != "f00bar" { - t.Fatalf("unexpected Subject SerialNumber %s", cert.Subject.SerialNumber) - } - if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { - t.Logf("certificate 1 to check:\n%s", certStr) - } - - // Valid for second possibility - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar", - "serial_number": "b4rf00", - }) - if err != nil { - t.Fatal(err) - } - certStr = resp.Data["certificate"].(string) - block, _ = pem.Decode([]byte(certStr)) - cert, err = x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - if cert.Subject.SerialNumber != "b4rf00" { - t.Fatalf("unexpected Subject SerialNumber %s", cert.Subject.SerialNumber) - } - if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { - t.Logf("certificate 2 to check:\n%s", certStr) - } -} - -func TestBackend_URI_SANs(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - var err error - - _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - - _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ - "allowed_domains": []string{"foobar.com", "zipzap.com"}, - "allow_bare_domains": true, - "allow_subdomains": true, - "allow_ip_sans": true, - "allowed_uri_sans": []string{"http://someuri/abc", "spiffe://host.com/*"}, - }) - if err != nil { - t.Fatal(err) - } - - // First test some bad stuff that shouldn't work - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - "uri_sans": "http://www.mydomain.com/zxf", - }) - if err == nil { - t.Fatal("expected error") - } - - // Test valid single entry - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - "uri_sans": "http://someuri/abc", - }) - if err != nil { - t.Fatal(err) - } - - // Test globed entry - _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - "uri_sans": "spiffe://host.com/something", - }) - if err != nil { - t.Fatal(err) - } - - // Test multiple entries - resp, err := CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "foobar.com", - "ip_sans": "1.2.3.4", - "alt_names": "foo.foobar.com,bar.foobar.com", - "ttl": "1h", - "uri_sans": "spiffe://host.com/something,http://someuri/abc", - }) - if err != nil { - t.Fatal(err) - } - - certStr := resp.Data["certificate"].(string) - block, _ := pem.Decode([]byte(certStr)) - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - - URI0, _ := url.Parse("spiffe://host.com/something") - URI1, _ := url.Parse("http://someuri/abc") - - if len(cert.URIs) != 2 { - t.Fatalf("expected 2 valid URIs SANs %v", cert.URIs) - } - - if cert.URIs[0].String() != URI0.String() || cert.URIs[1].String() != URI1.String() { - t.Fatalf( - "expected URIs SANs %v to equal provided values spiffe://host.com/something, http://someuri/abc", - cert.URIs) - } -} - -func TestBackend_AllowedURISANsTemplate(t *testing.T) { - t.Parallel() - coreConfig := &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "userpass": userpass.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Write test policy for userpass auth method. - err := client.Sys().PutPolicy("test", ` - path "pki/*" { - capabilities = ["update"] - }`) - if err != nil { - t.Fatal(err) - } - - // Enable userpass auth method. - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - - // Configure test role for userpass. - if _, err := client.Logical().Write("auth/userpass/users/userpassname", map[string]interface{}{ - "password": "test", - "policies": "test", - }); err != nil { - t.Fatal(err) - } - - // Login userpass for test role and keep client token. - secret, err := client.Logical().Write("auth/userpass/login/userpassname", map[string]interface{}{ - "password": "test", - }) - if err != nil || secret == nil { - t.Fatal(err) - } - userpassToken := secret.Auth.ClientToken - - // Get auth accessor for identity template. - auths, err := client.Sys().ListAuth() - if err != nil { - t.Fatal(err) - } - userpassAccessor := auths["userpass/"].Accessor - - // Mount PKI. - err = client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - }, - }) - if err != nil { - t.Fatal(err) - } - - // Generate internal CA. - _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - - // Write role PKI. - _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ - "allowed_uri_sans": []string{ - "spiffe://domain/{{identity.entity.aliases." + userpassAccessor + ".name}}", - "spiffe://domain/{{identity.entity.aliases." + userpassAccessor + ".name}}/*", "spiffe://domain/foo", - }, - "allowed_uri_sans_template": true, - "require_cn": false, - }) - if err != nil { - t.Fatal(err) - } - - // Issue certificate with identity templating - client.SetToken(userpassToken) - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/userpassname, spiffe://domain/foo"}) - if err != nil { - t.Fatal(err) - } - - // Issue certificate with identity templating and glob - client.SetToken(userpassToken) - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/userpassname/bar"}) - if err != nil { - t.Fatal(err) - } - - // Issue certificate with non-matching identity template parameter - client.SetToken(userpassToken) - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/unknownuser"}) - if err == nil { - t.Fatal(err) - } - - // Set allowed_uri_sans_template to false. - _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ - "allowed_uri_sans_template": false, - }) - if err != nil { - t.Fatal(err) - } - - // Issue certificate with userpassToken. - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/users/userpassname"}) - if err == nil { - t.Fatal("expected error") - } -} - -func TestBackend_AllowedDomainsTemplate(t *testing.T) { - t.Parallel() - coreConfig := &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "userpass": userpass.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Write test policy for userpass auth method. - err := client.Sys().PutPolicy("test", ` - path "pki/*" { - capabilities = ["update"] - }`) - if err != nil { - t.Fatal(err) - } - - // Enable userpass auth method. - if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { - t.Fatal(err) - } - - // Configure test role for userpass. - if _, err := client.Logical().Write("auth/userpass/users/userpassname", map[string]interface{}{ - "password": "test", - "policies": "test", - }); err != nil { - t.Fatal(err) - } - - // Login userpass for test role and set client token - userpassAuth, err := auth.NewUserpassAuth("userpassname", &auth.Password{FromString: "test"}) - if err != nil { - t.Fatal(err) - } - - // Get auth accessor for identity template. - auths, err := client.Sys().ListAuth() - if err != nil { - t.Fatal(err) - } - userpassAccessor := auths["userpass/"].Accessor - - // Mount PKI. - err = client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - }, - }) - if err != nil { - t.Fatal(err) - } - - // Generate internal CA. - _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - - // Write role PKI. - _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ - "allowed_domains": []string{ - "foobar.com", "zipzap.com", "{{identity.entity.aliases." + userpassAccessor + ".name}}", - "foo.{{identity.entity.aliases." + userpassAccessor + ".name}}.example.com", - }, - "allowed_domains_template": true, - "allow_bare_domains": true, - }) - if err != nil { - t.Fatal(err) - } - - // Issue certificate with userpassToken. - secret, err := client.Auth().Login(context.TODO(), userpassAuth) - if err != nil { - t.Fatal(err) - } - if err != nil || secret == nil { - t.Fatal(err) - } - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "userpassname"}) - if err != nil { - t.Fatal(err) - } - - // Issue certificate for foobar.com to verify allowed_domain_template doesn't break plain domains. - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "foobar.com"}) - if err != nil { - t.Fatal(err) - } - - // Issue certificate for unknown userpassname. - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "unknownuserpassname"}) - if err == nil { - t.Fatal("expected error") - } - - // Issue certificate for foo.userpassname.domain. - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "foo.userpassname.example.com"}) - if err != nil { - t.Fatal("expected error") - } - - // Set allowed_domains_template to false. - _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ - "allowed_domains_template": false, - }) - if err != nil { - t.Fatal(err) - } - - // Issue certificate with userpassToken. - _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "userpassname"}) - if err == nil { - t.Fatal("expected error") - } -} - -func TestReadWriteDeleteRoles(t *testing.T) { - t.Parallel() - ctx := context.Background() - coreConfig := &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "userpass": userpass.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Mount PKI. - err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - }, - }) - if err != nil { - t.Fatal(err) - } - - resp, err := client.Logical().ReadWithContext(ctx, "pki/roles/test") - if err != nil { - t.Fatal(err) - } - - if resp != nil { - t.Fatalf("response should have been emtpy but was:\n%#v", resp) - } - - // Write role PKI. - _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{}) - if err != nil { - t.Fatal(err) - } - - // Read the role. - resp, err = client.Logical().ReadWithContext(ctx, "pki/roles/test") - if err != nil { - t.Fatal(err) - } - - if resp.Data == nil { - t.Fatal("default data within response was nil when it should have contained data") - } - - // Validate that we have not changed any defaults unknowingly - expectedData := map[string]interface{}{ - "key_type": "rsa", - "use_csr_sans": true, - "client_flag": true, - "allowed_serial_numbers": []interface{}{}, - "generate_lease": false, - "signature_bits": json.Number("256"), - "use_pss": false, - "allowed_domains": []interface{}{}, - "allowed_uri_sans_template": false, - "enforce_hostnames": true, - "policy_identifiers": []interface{}{}, - "require_cn": true, - "allowed_domains_template": false, - "allow_token_displayname": false, - "country": []interface{}{}, - "not_after": "", - "postal_code": []interface{}{}, - "use_csr_common_name": true, - "allow_localhost": true, - "allow_subdomains": false, - "allow_wildcard_certificates": true, - "allowed_other_sans": []interface{}{}, - "allowed_uri_sans": []interface{}{}, - "basic_constraints_valid_for_non_ca": false, - "key_usage": []interface{}{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, - "not_before_duration": json.Number("30"), - "allow_glob_domains": false, - "ttl": json.Number("0"), - "ou": []interface{}{}, - "email_protection_flag": false, - "locality": []interface{}{}, - "server_flag": true, - "allow_bare_domains": false, - "allow_ip_sans": true, - "ext_key_usage_oids": []interface{}{}, - "allow_any_name": false, - "ext_key_usage": []interface{}{}, - "key_bits": json.Number("2048"), - "max_ttl": json.Number("0"), - "no_store": false, - "organization": []interface{}{}, - "province": []interface{}{}, - "street_address": []interface{}{}, - "code_signing_flag": false, - "issuer_ref": "default", - "cn_validations": []interface{}{"email", "hostname"}, - "allowed_user_ids": []interface{}{}, - } - - if diff := deep.Equal(expectedData, resp.Data); len(diff) > 0 { - t.Fatalf("pki role default values have changed, diff: %v", diff) - } - - _, err = client.Logical().DeleteWithContext(ctx, "pki/roles/test") - if err != nil { - t.Fatal(err) - } - - resp, err = client.Logical().ReadWithContext(ctx, "pki/roles/test") - if err != nil { - t.Fatal(err) - } - - if resp != nil { - t.Fatalf("response should have been empty but was:\n%#v", resp) - } -} - -func setCerts() { - cak, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - panic(err) - } - marshaledKey, err := x509.MarshalECPrivateKey(cak) - if err != nil { - panic(err) - } - keyPEMBlock := &pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: marshaledKey, - } - ecCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) - if err != nil { - panic(err) - } - subjKeyID, err := certutil.GetSubjKeyID(cak) - if err != nil { - panic(err) - } - caCertTemplate := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "root.localhost", - }, - SubjectKeyId: subjKeyID, - DNSNames: []string{"root.localhost"}, - KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), - SerialNumber: big.NewInt(mathrand.Int63()), - NotAfter: time.Now().Add(262980 * time.Hour), - BasicConstraintsValid: true, - IsCA: true, - } - caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, cak.Public(), cak) - if err != nil { - panic(err) - } - caCertPEMBlock := &pem.Block{ - Type: "CERTIFICATE", - Bytes: caBytes, - } - ecCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) - - rak, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - panic(err) - } - marshaledKey = x509.MarshalPKCS1PrivateKey(rak) - keyPEMBlock = &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: marshaledKey, - } - rsaCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) - if err != nil { - panic(err) - } - _, err = certutil.GetSubjKeyID(rak) - if err != nil { - panic(err) - } - caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, rak.Public(), rak) - if err != nil { - panic(err) - } - caCertPEMBlock = &pem.Block{ - Type: "CERTIFICATE", - Bytes: caBytes, - } - rsaCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) - - _, edk, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - panic(err) - } - marshaledKey, err = x509.MarshalPKCS8PrivateKey(edk) - if err != nil { - panic(err) - } - keyPEMBlock = &pem.Block{ - Type: "PRIVATE KEY", - Bytes: marshaledKey, - } - edCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) - if err != nil { - panic(err) - } - _, err = certutil.GetSubjKeyID(edk) - if err != nil { - panic(err) - } - caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, edk.Public(), edk) - if err != nil { - panic(err) - } - caCertPEMBlock = &pem.Block{ - Type: "CERTIFICATE", - Bytes: caBytes, - } - edCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) -} - -func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { - // Use a ridiculously long time to minimize the chance - // that we have to deal with more than one interval. - // InMemSink rounds down to an interval boundary rather than - // starting one at the time of initialization. - // - // This test is not parallelizable. - inmemSink := metrics.NewInmemSink( - 1000000*time.Hour, - 2000000*time.Hour) - - metricsConf := metrics.DefaultConfig("") - metricsConf.EnableHostname = false - metricsConf.EnableHostnameLabel = false - metricsConf.EnableServiceLabel = false - metricsConf.EnableTypePrefix = false - - _, err := metrics.NewGlobal(metricsConf, inmemSink) - if err != nil { - t.Fatal(err) - } - - // Enable PKI secret engine - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - cores := cluster.Cores - vault.TestWaitActive(t, cores[0].Core) - client := cores[0].Client - - // Mount /pki as a root CA - err = client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "32h", - }, - }) - if err != nil { - t.Fatal(err) - } - - // Set up Metric Configuration, then restart to enable it - _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ - "maintain_stored_certificate_counts": true, - "publish_stored_certificate_count_metrics": true, - }) - _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ - "mounts": "pki/", - }) - - // Check the metrics initialized in order to calculate backendUUID for /pki - // BackendUUID not consistent during tests with UUID from /sys/mounts/pki - metricsSuffix := "total_certificates_stored" - backendUUID := "" - mostRecentInterval := inmemSink.Data()[len(inmemSink.Data())-1] - for _, existingGauge := range mostRecentInterval.Gauges { - if strings.HasSuffix(existingGauge.Name, metricsSuffix) { - expandedGaugeName := existingGauge.Name - backendUUID = strings.Split(expandedGaugeName, ".")[2] - break - } - } - if backendUUID == "" { - t.Fatalf("No Gauge Found ending with %s", metricsSuffix) - } - - // Set the cluster's certificate as the root CA in /pki - pemBundleRootCA := string(cluster.CACertPEM) + string(cluster.CAKeyPEM) - _, err = client.Logical().Write("pki/config/ca", map[string]interface{}{ - "pem_bundle": pemBundleRootCA, - }) - if err != nil { - t.Fatal(err) - } - - // Mount /pki2 to operate as an intermediate CA - err = client.Sys().Mount("pki2", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "32h", - }, - }) - if err != nil { - t.Fatal(err) - } - // Set up Metric Configuration, then restart to enable it - _, err = client.Logical().Write("pki2/config/auto-tidy", map[string]interface{}{ - "maintain_stored_certificate_counts": true, - "publish_stored_certificate_count_metrics": true, - }) - _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ - "mounts": "pki2/", - }) - - // Create a CSR for the intermediate CA - secret, err := client.Logical().Write("pki2/intermediate/generate/internal", nil) - if err != nil { - t.Fatal(err) - } - intermediateCSR := secret.Data["csr"].(string) - - // Sign the intermediate CSR using /pki - secret, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ - "permitted_dns_domains": ".myvault.com", - "csr": intermediateCSR, - "ttl": "10s", - }) - if err != nil { - t.Fatal(err) - } - intermediateCertSerial := secret.Data["serial_number"].(string) - intermediateCASerialColon := strings.ReplaceAll(strings.ToLower(intermediateCertSerial), ":", "-") - - // Get the intermediate cert after signing - secret, err = client.Logical().Read("pki/cert/" + intermediateCASerialColon) - if err != nil { - t.Fatal(err) - } - - if secret == nil || len(secret.Data) == 0 || len(secret.Data["certificate"].(string)) == 0 { - t.Fatal("expected certificate information from read operation") - } - - // Issue a revoke on on /pki - _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": intermediateCertSerial, - }) - if err != nil { - t.Fatal(err) - } - - // Check the cert-count metrics - expectedCertCountGaugeMetrics := map[string]float32{ - "secrets.pki." + backendUUID + ".total_revoked_certificates_stored": 1, - "secrets.pki." + backendUUID + ".total_certificates_stored": 1, - } - mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] - for gauge, value := range expectedCertCountGaugeMetrics { - if _, ok := mostRecentInterval.Gauges[gauge]; !ok { - t.Fatalf("Expected metrics to include a value for gauge %s", gauge) - } - if value != mostRecentInterval.Gauges[gauge].Value { - t.Fatalf("Expected value metric %s to be %f but got %f", gauge, value, mostRecentInterval.Gauges[gauge].Value) - } - } - - // Revoke adds a fixed 2s buffer, so we sleep for a bit longer to ensure - // the revocation time is past the current time. - time.Sleep(3 * time.Second) - - // Issue a tidy on /pki - _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ - "tidy_cert_store": true, - "tidy_revoked_certs": true, - "safety_buffer": "1s", - }) - if err != nil { - t.Fatal(err) - } - - // Sleep a bit to make sure we're past the safety buffer - time.Sleep(2 * time.Second) - - // Get CRL and ensure the tidied cert is still in the list after the tidy - // operation since it's not past the NotAfter (ttl) value yet. - crl := getParsedCrl(t, client, "pki") - - revokedCerts := crl.TBSCertList.RevokedCertificates - if len(revokedCerts) == 0 { - t.Fatal("expected CRL to be non-empty") - } - - sn := certutil.GetHexFormatted(revokedCerts[0].SerialNumber.Bytes(), ":") - if sn != intermediateCertSerial { - t.Fatalf("expected: %v, got: %v", intermediateCertSerial, sn) - } - - // Wait for cert to expire - time.Sleep(10 * time.Second) - - // Issue a tidy on /pki - _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ - "tidy_cert_store": true, - "tidy_revoked_certs": true, - "safety_buffer": "1s", - }) - if err != nil { - t.Fatal(err) - } - - // Sleep a bit to make sure we're past the safety buffer - time.Sleep(2 * time.Second) - - // Issue a tidy-status on /pki - { - tidyStatus, err := client.Logical().Read("pki/tidy-status") - if err != nil { - t.Fatal(err) - } - expectedData := map[string]interface{}{ - "safety_buffer": json.Number("1"), - "issuer_safety_buffer": json.Number("31536000"), - "revocation_queue_safety_buffer": json.Number("172800"), - "tidy_cert_store": true, - "tidy_revoked_certs": true, - "tidy_revoked_cert_issuer_associations": false, - "tidy_expired_issuers": false, - "tidy_move_legacy_ca_bundle": false, - "tidy_revocation_queue": false, - "tidy_cross_cluster_revoked_certs": false, - "pause_duration": "0s", - "state": "Finished", - "error": nil, - "time_started": nil, - "time_finished": nil, - "last_auto_tidy_finished": nil, - "message": nil, - "cert_store_deleted_count": json.Number("1"), - "revoked_cert_deleted_count": json.Number("1"), - "missing_issuer_cert_count": json.Number("0"), - "current_cert_store_count": json.Number("0"), - "current_revoked_cert_count": json.Number("0"), - "revocation_queue_deleted_count": json.Number("0"), - "cross_revoked_cert_deleted_count": json.Number("0"), - "internal_backend_uuid": backendUUID, - "tidy_acme": false, - "acme_account_safety_buffer": json.Number("2592000"), - "acme_orders_deleted_count": json.Number("0"), - "acme_account_revoked_count": json.Number("0"), - "acme_account_deleted_count": json.Number("0"), - "total_acme_account_count": json.Number("0"), - } - // Let's copy the times from the response so that we can use deep.Equal() - timeStarted, ok := tidyStatus.Data["time_started"] - if !ok || timeStarted == "" { - t.Fatal("Expected tidy status response to include a value for time_started") - } - expectedData["time_started"] = timeStarted - timeFinished, ok := tidyStatus.Data["time_finished"] - if !ok || timeFinished == "" { - t.Fatal("Expected tidy status response to include a value for time_finished") - } - expectedData["time_finished"] = timeFinished - expectedData["last_auto_tidy_finished"] = tidyStatus.Data["last_auto_tidy_finished"] - - if diff := deep.Equal(expectedData, tidyStatus.Data); diff != nil { - t.Fatal(diff) - } - } - // Check the tidy metrics - { - // Map of gauges to expected value - expectedGauges := map[string]float32{ - "secrets.pki.tidy.cert_store_current_entry": 0, - "secrets.pki.tidy.cert_store_total_entries": 1, - "secrets.pki.tidy.revoked_cert_current_entry": 0, - "secrets.pki.tidy.revoked_cert_total_entries": 1, - "secrets.pki.tidy.start_time_epoch": 0, - "secrets.pki." + backendUUID + ".total_certificates_stored": 0, - "secrets.pki." + backendUUID + ".total_revoked_certificates_stored": 0, - "secrets.pki.tidy.cert_store_total_entries_remaining": 0, - "secrets.pki.tidy.revoked_cert_total_entries_remaining": 0, - } - // Map of counters to the sum of the metrics for that counter - expectedCounters := map[string]float64{ - "secrets.pki.tidy.cert_store_deleted_count": 1, - "secrets.pki.tidy.revoked_cert_deleted_count": 1, - "secrets.pki.tidy.success": 2, - // Note that "secrets.pki.tidy.failure" won't be in the captured metrics - } - - // If the metrics span more than one interval, skip the checks - intervals := inmemSink.Data() - if len(intervals) == 1 { - interval := inmemSink.Data()[0] - - for gauge, value := range expectedGauges { - if _, ok := interval.Gauges[gauge]; !ok { - t.Fatalf("Expected metrics to include a value for gauge %s", gauge) - } - if value != interval.Gauges[gauge].Value { - t.Fatalf("Expected value metric %s to be %f but got %f", gauge, value, interval.Gauges[gauge].Value) - } - - } - for counter, value := range expectedCounters { - if _, ok := interval.Counters[counter]; !ok { - t.Fatalf("Expected metrics to include a value for couter %s", counter) - } - if value != interval.Counters[counter].Sum { - t.Fatalf("Expected the sum of metric %s to be %f but got %f", counter, value, interval.Counters[counter].Sum) - } - } - - tidyDuration, ok := interval.Samples["secrets.pki.tidy.duration"] - if !ok { - t.Fatal("Expected metrics to include a value for sample secrets.pki.tidy.duration") - } - if tidyDuration.Count <= 0 { - t.Fatalf("Expected metrics to have count > 0 for sample secrets.pki.tidy.duration, but got %d", tidyDuration.Count) - } - } - } - - crl = getParsedCrl(t, client, "pki") - - revokedCerts = crl.TBSCertList.RevokedCertificates - if len(revokedCerts) != 0 { - t.Fatal("expected CRL to be empty") - } -} - -func TestBackend_Root_FullCAChain(t *testing.T) { - t.Parallel() - testCases := []struct { - testName string - keyType string - }{ - {testName: "RSA", keyType: "rsa"}, - {testName: "ED25519", keyType: "ed25519"}, - {testName: "EC", keyType: "ec"}, - } - for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - runFullCAChainTest(t, tc.keyType) - }) - } -} - -func runFullCAChainTest(t *testing.T, keyType string) { - // Generate a root CA at /pki-root - b_root, s_root := CreateBackendWithStorage(t) - - var err error - - resp, err := CBWrite(b_root, s_root, "root/generate/exported", map[string]interface{}{ - "common_name": "root myvault.com", - "key_type": keyType, - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected ca info") - } - rootData := resp.Data - rootCert := rootData["certificate"].(string) - - // Validate that root's /cert/ca-chain now contains the certificate. - resp, err = CBRead(b_root, s_root, "cert/ca_chain") - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected intermediate chain information") - } - - fullChain := resp.Data["ca_chain"].(string) - requireCertInCaChainString(t, fullChain, rootCert, "expected root cert within root cert/ca_chain") - - // Make sure when we issue a leaf certificate we get the full chain back. - _, err = CBWrite(b_root, s_root, "roles/example", map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "max_ttl": "1h", - }) - require.NoError(t, err, "error setting up pki root role: %v", err) - - resp, err = CBWrite(b_root, s_root, "issue/example", map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "5m", - }) - require.NoError(t, err, "error issuing certificate from pki root: %v", err) - fullChainArray := resp.Data["ca_chain"].([]string) - requireCertInCaChainArray(t, fullChainArray, rootCert, "expected root cert within root issuance pki-root/issue/example") - - // Now generate an intermediate at /pki-intermediate, signed by the root. - b_int, s_int := CreateBackendWithStorage(t) - - resp, err = CBWrite(b_int, s_int, "intermediate/generate/exported", map[string]interface{}{ - "common_name": "intermediate myvault.com", - "key_type": keyType, - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected intermediate CSR info") - } - intermediateData := resp.Data - intermediateKey := intermediateData["private_key"].(string) - - resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ - "csr": intermediateData["csr"], - "format": "pem", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected signed intermediate info") - } - intermediateSignedData := resp.Data - intermediateCert := intermediateSignedData["certificate"].(string) - - rootCaCert := parseCert(t, rootCert) - intermediaryCaCert := parseCert(t, intermediateCert) - requireSignedBy(t, intermediaryCaCert, rootCaCert) - intermediateCaChain := intermediateSignedData["ca_chain"].([]string) - - require.Equal(t, parseCert(t, intermediateCaChain[0]), intermediaryCaCert, "intermediate signed cert should have been part of ca_chain") - require.Equal(t, parseCert(t, intermediateCaChain[1]), rootCaCert, "root cert should have been part of ca_chain") - - _, err = CBWrite(b_int, s_int, "intermediate/set-signed", map[string]interface{}{ - "certificate": intermediateCert + "\n" + rootCert + "\n", - }) - if err != nil { - t.Fatal(err) - } - - // Validate that intermediate's ca_chain field now includes the full - // chain. - resp, err = CBRead(b_int, s_int, "cert/ca_chain") - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected intermediate chain information") - } - - // Verify we have a proper CRL now - crl := getParsedCrlFromBackend(t, b_int, s_int, "crl") - require.Equal(t, 0, len(crl.TBSCertList.RevokedCertificates)) - - fullChain = resp.Data["ca_chain"].(string) - requireCertInCaChainString(t, fullChain, intermediateCert, "expected full chain to contain intermediate certificate from pki-intermediate/cert/ca_chain") - requireCertInCaChainString(t, fullChain, rootCert, "expected full chain to contain root certificate from pki-intermediate/cert/ca_chain") - - // Make sure when we issue a leaf certificate we get the full chain back. - _, err = CBWrite(b_int, s_int, "roles/example", map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "max_ttl": "1h", - }) - require.NoError(t, err, "error setting up pki intermediate role: %v", err) - - resp, err = CBWrite(b_int, s_int, "issue/example", map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "5m", - }) - require.NoError(t, err, "error issuing certificate from pki intermediate: %v", err) - fullChainArray = resp.Data["ca_chain"].([]string) - requireCertInCaChainArray(t, fullChainArray, intermediateCert, "expected full chain to contain intermediate certificate from pki-intermediate/issue/example") - requireCertInCaChainArray(t, fullChainArray, rootCert, "expected full chain to contain root certificate from pki-intermediate/issue/example") - - // Finally, import this signing cert chain into a new mount to ensure - // "external" CAs behave as expected. - b_ext, s_ext := CreateBackendWithStorage(t) - - _, err = CBWrite(b_ext, s_ext, "config/ca", map[string]interface{}{ - "pem_bundle": intermediateKey + "\n" + intermediateCert + "\n" + rootCert + "\n", - }) - if err != nil { - t.Fatal(err) - } - - // Validate the external chain information was loaded correctly. - resp, err = CBRead(b_ext, s_ext, "cert/ca_chain") - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected intermediate chain information") - } - - fullChain = resp.Data["ca_chain"].(string) - if strings.Count(fullChain, intermediateCert) != 1 { - t.Fatalf("expected full chain to contain intermediate certificate; got %v occurrences", strings.Count(fullChain, intermediateCert)) - } - if strings.Count(fullChain, rootCert) != 1 { - t.Fatalf("expected full chain to contain root certificate; got %v occurrences", strings.Count(fullChain, rootCert)) - } - - // Now issue a short-lived certificate from our pki-external. - _, err = CBWrite(b_ext, s_ext, "roles/example", map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "max_ttl": "1h", - }) - require.NoError(t, err, "error setting up pki role: %v", err) - - resp, err = CBWrite(b_ext, s_ext, "issue/example", map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "5m", - }) - require.NoError(t, err, "error issuing certificate: %v", err) - require.NotNil(t, resp, "got nil response from issuing request") - issueCrtAsPem := resp.Data["certificate"].(string) - issuedCrt := parseCert(t, issueCrtAsPem) - - // Verify that the certificates are signed by the intermediary CA key... - requireSignedBy(t, issuedCrt, intermediaryCaCert) - - // Test that we can request that the root ca certificate not appear in the ca_chain field - resp, err = CBWrite(b_ext, s_ext, "issue/example", map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "5m", - "remove_roots_from_chain": "true", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing certificate when removing self signed") - fullChain = strings.Join(resp.Data["ca_chain"].([]string), "\n") - if strings.Count(fullChain, intermediateCert) != 1 { - t.Fatalf("expected full chain to contain intermediate certificate; got %v occurrences", strings.Count(fullChain, intermediateCert)) - } - if strings.Count(fullChain, rootCert) != 0 { - t.Fatalf("expected full chain to NOT contain root certificate; got %v occurrences", strings.Count(fullChain, rootCert)) - } -} - -func requireCertInCaChainArray(t *testing.T, chain []string, cert string, msgAndArgs ...interface{}) { - var fullChain string - for _, caCert := range chain { - fullChain = fullChain + "\n" + caCert - } - - requireCertInCaChainString(t, fullChain, cert, msgAndArgs) -} - -func requireCertInCaChainString(t *testing.T, chain string, cert string, msgAndArgs ...interface{}) { - count := strings.Count(chain, cert) - if count != 1 { - failMsg := fmt.Sprintf("Found %d occurrances of the cert in the provided chain", count) - require.FailNow(t, failMsg, msgAndArgs...) - } -} - -type MultiBool int - -const ( - MFalse MultiBool = iota - MTrue MultiBool = iota - MAny MultiBool = iota -) - -func (o MultiBool) ToValues() []bool { - if o == MTrue { - return []bool{true} - } - - if o == MFalse { - return []bool{false} - } - - if o == MAny { - return []bool{true, false} - } - - return []bool{} -} - -type IssuanceRegression struct { - AllowedDomains []string - AllowBareDomains MultiBool - AllowGlobDomains MultiBool - AllowSubdomains MultiBool - AllowLocalhost MultiBool - AllowWildcardCertificates MultiBool - CNValidations []string - CommonName string - Issued bool -} - -func RoleIssuanceRegressionHelper(t *testing.T, b *backend, s logical.Storage, index int, test IssuanceRegression) int { - tested := 0 - for _, AllowBareDomains := range test.AllowBareDomains.ToValues() { - for _, AllowGlobDomains := range test.AllowGlobDomains.ToValues() { - for _, AllowSubdomains := range test.AllowSubdomains.ToValues() { - for _, AllowLocalhost := range test.AllowLocalhost.ToValues() { - for _, AllowWildcardCertificates := range test.AllowWildcardCertificates.ToValues() { - role := fmt.Sprintf("issuance-regression-%d-bare-%v-glob-%v-subdomains-%v-localhost-%v-wildcard-%v", index, AllowBareDomains, AllowGlobDomains, AllowSubdomains, AllowLocalhost, AllowWildcardCertificates) - _, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ - "allowed_domains": test.AllowedDomains, - "allow_bare_domains": AllowBareDomains, - "allow_glob_domains": AllowGlobDomains, - "allow_subdomains": AllowSubdomains, - "allow_localhost": AllowLocalhost, - "allow_wildcard_certificates": AllowWildcardCertificates, - "cn_validations": test.CNValidations, - // TODO: test across this vector as well. Currently certain wildcard - // matching is broken with it enabled (such as x*x.foo). - "enforce_hostnames": false, - "key_type": "ec", - "key_bits": 256, - "no_store": true, - // With the CN Validations field, ensure we prevent CN from appearing - // in SANs. - }) - if err != nil { - t.Fatal(err) - } - - resp, err := CBWrite(b, s, "issue/"+role, map[string]interface{}{ - "common_name": test.CommonName, - "exclude_cn_from_sans": true, - }) - - haveErr := err != nil || resp == nil - expectErr := !test.Issued - - if haveErr != expectErr { - t.Fatalf("issuance regression test [%d] failed: haveErr: %v, expectErr: %v, err: %v, resp: %v, test case: %v, role: %v", index, haveErr, expectErr, err, resp, test, role) - } - - tested += 1 - } - } - } - } - } - - return tested -} - -func TestBackend_Roles_IssuanceRegression(t *testing.T) { - t.Parallel() - // Regression testing of role's issuance policy. - testCases := []IssuanceRegression{ - // allowed, bare, glob, subdomains, localhost, wildcards, cn, issued - - // === Globs not allowed but used === // - // Allowed contains globs, but globbing not allowed, resulting in all - // issuances failing. Note that tests against issuing a wildcard with - // a bare domain will be covered later. - /* 0 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "baz.fud.bar.foo", false}, - /* 1 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.fud.bar.foo", false}, - /* 2 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "fud.bar.foo", false}, - /* 3 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.bar.foo", false}, - /* 4 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "bar.foo", false}, - /* 5 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.foo", false}, - /* 6 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "foo", false}, - /* 7 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "baz.fud.bar.foo", false}, - /* 8 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.fud.bar.foo", false}, - /* 9 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "fud.bar.foo", false}, - /* 10 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.bar.foo", false}, - /* 11 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "bar.foo", false}, - /* 12 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "foo", false}, - - // === Localhost sanity === // - // Localhost forbidden, not matching allowed domains -> not issued - /* 13 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MFalse, MAny, nil, "localhost", false}, - // Localhost allowed, not matching allowed domains -> issued - /* 14 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MTrue, MAny, nil, "localhost", true}, - // Localhost allowed via allowed domains (and bare allowed), not by AllowLocalhost -> issued - /* 15 */ {[]string{"localhost"}, MTrue, MAny, MAny, MFalse, MAny, nil, "localhost", true}, - // Localhost allowed via allowed domains (and bare not allowed), not by AllowLocalhost -> not issued - /* 16 */ {[]string{"localhost"}, MFalse, MAny, MAny, MFalse, MAny, nil, "localhost", false}, - // Localhost allowed via allowed domains (but bare not allowed), and by AllowLocalhost -> issued - /* 17 */ {[]string{"localhost"}, MFalse, MAny, MAny, MTrue, MAny, nil, "localhost", true}, - - // === Bare wildcard issuance == // - // allowed_domains contains one or more wildcards and bare domains allowed, - // resulting in the cert being issued. - /* 18 */ {[]string{"*.foo"}, MTrue, MAny, MAny, MAny, MTrue, nil, "*.foo", true}, - /* 19 */ {[]string{"*.*.foo"}, MTrue, MAny, MAny, MAny, MAny, nil, "*.*.foo", false}, // Does not conform to RFC 6125 - - // === Double Leading Glob Testing === // - // Allowed contains globs, but glob allowed so certain matches work. - // The value of bare and localhost does not impact these results. - /* 20 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains - /* 21 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains - /* 22 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "fud.bar.foo", true}, - /* 23 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.bar.foo", true}, // Regression fix: Vault#13530 - /* 24 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "bar.foo", false}, - /* 25 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "*.foo", false}, - /* 26 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "foo", false}, - - // Allowed contains globs, but glob and subdomain both work, so we expect - // wildcard issuance to work as well. The value of bare and localhost does - // not impact these results. - /* 27 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, - /* 28 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, - /* 29 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, - /* 30 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, // Regression fix: Vault#13530 - /* 31 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "bar.foo", false}, - /* 32 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "*.foo", false}, - /* 33 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "foo", false}, - - // === Single Leading Glob Testing === // - // Allowed contains globs, but glob allowed so certain matches work. - // The value of bare and localhost does not impact these results. - /* 34 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains - /* 35 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains - /* 36 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "fud.bar.foo", true}, // glob domains allow infinite subdomains - /* 37 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.bar.foo", true}, // glob domain allows wildcards of subdomains - /* 38 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "bar.foo", true}, - /* 39 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "foo", false}, - - // Allowed contains globs, but glob and subdomain both work, so we expect - // wildcard issuance to work as well. The value of bare and localhost does - // not impact these results. - /* 40 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, - /* 41 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, - /* 42 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, - /* 43 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, - /* 44 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "bar.foo", true}, - /* 45 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "foo", false}, - - // === Only base domain name === // - // Allowed contains only domain components, but subdomains not allowed. This - // results in most issuances failing unless we allow bare domains, in which - // case only the final issuance for "foo" will succeed. - /* 46 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", false}, - /* 47 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.fud.bar.foo", false}, - /* 48 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "fud.bar.foo", false}, - /* 49 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.bar.foo", false}, - /* 50 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "bar.foo", false}, - /* 51 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.foo", false}, - /* 52 */ {[]string{"foo"}, MFalse, MAny, MFalse, MAny, MAny, nil, "foo", false}, - /* 53 */ {[]string{"foo"}, MTrue, MAny, MFalse, MAny, MAny, nil, "foo", true}, - - // Allowed contains only domain components, and subdomains are now allowed. - // This results in most issuances succeeding, with the exception of the - // base foo, which is still governed by base's value. - /* 54 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, - /* 55 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, - /* 56 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, - /* 57 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, - /* 58 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "bar.foo", true}, - /* 59 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.foo", true}, - /* 60 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "x*x.foo", true}, // internal wildcards should be allowed per RFC 6125/6.4.3 - /* 61 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*x.foo", true}, // prefix wildcards should be allowed per RFC 6125/6.4.3 - /* 62 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "x*.foo", true}, // suffix wildcards should be allowed per RFC 6125/6.4.3 - /* 63 */ {[]string{"foo"}, MFalse, MAny, MTrue, MAny, MAny, nil, "foo", false}, - /* 64 */ {[]string{"foo"}, MTrue, MAny, MTrue, MAny, MAny, nil, "foo", true}, - - // === Internal Glob Matching === // - // Basic glob matching requirements - /* 65 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xerox.foo", true}, - /* 66 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xylophone.files.pyrex.foo", true}, // globs can match across subdomains - /* 67 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xercex.bar.foo", false}, // x.foo isn't matched - /* 68 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "bar.foo", false}, // x*x isn't matched. - /* 69 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.foo", false}, // unrelated wildcard - /* 70 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x*x.foo", false}, // Does not conform to RFC 6125 - /* 71 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.xyx.foo", false}, // Globs and Subdomains do not layer per docs. - - // Various requirements around x*x.foo wildcard matching. - /* 72 */ {[]string{"x*x.foo"}, MFalse, MFalse, MAny, MAny, MAny, nil, "x*x.foo", false}, // base disabled, shouldn't match wildcard - /* 73 */ {[]string{"x*x.foo"}, MFalse, MTrue, MAny, MAny, MTrue, nil, "x*x.foo", true}, // base disallowed, but globbing allowed and should match - /* 74 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MTrue, nil, "x*x.foo", true}, // base allowed, should match wildcard - - // Basic glob matching requirements with internal dots. - /* 75 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xerox.foo", false}, // missing dots - /* 76 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "x.ero.x.foo", true}, - /* 77 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xylophone.files.pyrex.foo", false}, // missing dots - /* 78 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "x.ylophone.files.pyre.x.foo", true}, // globs can match across subdomains - /* 79 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xercex.bar.foo", false}, // x.foo isn't matched - /* 80 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "bar.foo", false}, // x.*.x isn't matched. - /* 81 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.foo", false}, // unrelated wildcard - /* 82 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x.*.x.foo", false}, // Does not conform to RFC 6125 - /* 83 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x.y.x.foo", false}, // Globs and Subdomains do not layer per docs. - - // === Wildcard restriction testing === // - /* 84 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, nil, "*.fud.bar.foo", false}, // glob domain allows wildcard of subdomains - /* 85 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, nil, "*.bar.foo", false}, // glob domain allows wildcards of subdomains - /* 86 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.fud.bar.foo", false}, - /* 87 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.bar.foo", false}, - /* 88 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.foo", false}, - /* 89 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "x*x.foo", false}, - /* 90 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*x.foo", false}, - /* 91 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "x*.foo", false}, - /* 92 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MFalse, nil, "x*x.foo", false}, - /* 93 */ {[]string{"*.foo"}, MFalse, MFalse, MAny, MAny, MAny, nil, "*.foo", false}, // Bare and globs forbidden despite (potentially) allowing wildcards. - /* 94 */ {[]string{"x.*.x.foo"}, MAny, MAny, MAny, MAny, MAny, nil, "x.*.x.foo", false}, // Does not conform to RFC 6125 - - // === CN validation allowances === // - /* 95 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.fud.bar.foo", true}, - /* 96 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.fud.*.foo", true}, - /* 97 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.bar.*.bar", true}, - /* 98 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "foo@foo", true}, - /* 99 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "foo@foo@foo", true}, - /* 100 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "bar@bar@bar", true}, - /* 101 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@bar@bar", false}, - /* 102 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@bar", false}, - /* 103 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@foo", true}, - /* 104 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar@foo", false}, - /* 105 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar@bar", false}, - /* 106 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar.foo", true}, - /* 107 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar.bar", false}, - /* 108 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar.foo", false}, - /* 109 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar.bar", false}, - } - - if len(testCases) != 110 { - t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) - } - - b, s := CreateBackendWithStorage(t) - - // We need a RSA key so all signature sizes are valid with it. - resp, err := CBWrite(b, s, "root/generate/exported", map[string]interface{}{ - "common_name": "myvault.com", - "ttl": "128h", - "key_type": "rsa", - "key_bits": 2048, - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected ca info") - } - - tested := 0 - for index, test := range testCases { - tested += RoleIssuanceRegressionHelper(t, b, s, index, test) - } - - t.Logf("Issuance regression expanded matrix test scenarios: %d", tested) -} - -type KeySizeRegression struct { - // Values reused for both Role and CA configuration. - RoleKeyType string - RoleKeyBits []int - - // Signature Bits presently is only specified on the role. - RoleSignatureBits []int - RoleUsePSS bool - - // These are tuples; must be of the same length. - TestKeyTypes []string - TestKeyBits []int - - // All of the above key types/sizes must pass or fail together. - ExpectError bool -} - -func (k KeySizeRegression) KeyTypeValues() []string { - if k.RoleKeyType == "any" { - return []string{"rsa", "ec", "ed25519"} - } - - return []string{k.RoleKeyType} -} - -func RoleKeySizeRegressionHelper(t *testing.T, b *backend, s logical.Storage, index int, test KeySizeRegression) int { - tested := 0 - - for _, caKeyType := range test.KeyTypeValues() { - for _, caKeyBits := range test.RoleKeyBits { - // Generate a new CA key. - resp, err := CBWrite(b, s, "root/generate/exported", map[string]interface{}{ - "common_name": "myvault.com", - "ttl": "128h", - "key_type": caKeyType, - "key_bits": caKeyBits, - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected ca info") - } - - for _, roleKeyBits := range test.RoleKeyBits { - for _, roleSignatureBits := range test.RoleSignatureBits { - role := fmt.Sprintf("key-size-regression-%d-keytype-%v-keybits-%d-signature-bits-%d", index, test.RoleKeyType, roleKeyBits, roleSignatureBits) - _, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ - "key_type": test.RoleKeyType, - "key_bits": roleKeyBits, - "signature_bits": roleSignatureBits, - "use_pss": test.RoleUsePSS, - }) - if err != nil { - t.Fatal(err) - } - - for index, keyType := range test.TestKeyTypes { - keyBits := test.TestKeyBits[index] - - _, _, csrPem := generateCSR(t, &x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: "localhost", - }, - }, keyType, keyBits) - - resp, err = CBWrite(b, s, "sign/"+role, map[string]interface{}{ - "common_name": "localhost", - "csr": csrPem, - }) - - haveErr := err != nil || resp == nil - - if haveErr != test.ExpectError { - t.Fatalf("key size regression test [%d] failed: haveErr: %v, expectErr: %v, err: %v, resp: %v, test case: %v, caKeyType: %v, caKeyBits: %v, role: %v, keyType: %v, keyBits: %v", index, haveErr, test.ExpectError, err, resp, test, caKeyType, caKeyBits, role, keyType, keyBits) - } - - if resp != nil && test.RoleUsePSS && caKeyType == "rsa" { - leafCert := parseCert(t, resp.Data["certificate"].(string)) - switch leafCert.SignatureAlgorithm { - case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS: - default: - t.Fatalf("key size regression test [%d] failed on role %v: unexpected signature algorithm; expected RSA-type CA to sign a leaf cert with PSS algorithm; got %v", index, role, leafCert.SignatureAlgorithm.String()) - } - } - - tested += 1 - } - } - } - - _, err = CBDelete(b, s, "root") - if err != nil { - t.Fatal(err) - } - } - } - - return tested -} - -func TestBackend_Roles_KeySizeRegression(t *testing.T) { - t.Parallel() - // Regression testing of role's issuance policy. - testCases := []KeySizeRegression{ - // RSA with default parameters should fail to issue smaller RSA keys - // and any size ECDSA/Ed25519 keys. - /* 0 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, false, []string{"rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{1024, 224, 256, 384, 521, 0}, true}, - // But it should work to issue larger RSA keys. - /* 1 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa"}, []int{2048, 3072}, false}, - - // EC with default parameters should fail to issue smaller EC keys - // and any size RSA/Ed25519 keys. - /* 2 */ {"ec", []int{0}, []int{0}, false, []string{"rsa", "ec", "ed25519"}, []int{2048, 224, 0}, true}, - // But it should work to issue larger EC keys. Note that we should be - // independent of signature bits as that's computed from the issuer - // type (for EC based issuers). - /* 3 */ {"ec", []int{224}, []int{0, 256, 384, 521}, false, []string{"ec", "ec", "ec", "ec"}, []int{224, 256, 384, 521}, false}, - /* 4 */ {"ec", []int{0, 256}, []int{0, 256, 384, 521}, false, []string{"ec", "ec", "ec"}, []int{256, 384, 521}, false}, - /* 5 */ {"ec", []int{384}, []int{0, 256, 384, 521}, false, []string{"ec", "ec"}, []int{384, 521}, false}, - /* 6 */ {"ec", []int{521}, []int{0, 256, 384, 512}, false, []string{"ec"}, []int{521}, false}, - - // Ed25519 should reject RSA and EC keys. - /* 7 */ {"ed25519", []int{0}, []int{0}, false, []string{"rsa", "ec", "ec"}, []int{2048, 256, 521}, true}, - // But it should work to issue Ed25519 keys. - /* 8 */ {"ed25519", []int{0}, []int{0}, false, []string{"ed25519"}, []int{0}, false}, - - // Any key type should reject insecure RSA key sizes. - /* 9 */ {"any", []int{0}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa"}, []int{512, 1024}, true}, - // But work for everything else. - /* 10 */ {"any", []int{0}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{2048, 3072, 224, 256, 384, 521, 0}, false}, - - // RSA with larger than default key size should reject smaller ones. - /* 11 */ {"rsa", []int{3072}, []int{0, 256, 384, 512}, false, []string{"rsa"}, []int{2048}, true}, - - // We should be able to sign with PSS with any CA key type. - /* 12 */ {"rsa", []int{0}, []int{0, 256, 384, 512}, true, []string{"rsa"}, []int{2048}, false}, - /* 13 */ {"ec", []int{0}, []int{0}, true, []string{"ec"}, []int{256}, false}, - /* 14 */ {"ed25519", []int{0}, []int{0}, true, []string{"ed25519"}, []int{0}, false}, - } - - if len(testCases) != 15 { - t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) - } - - b, s := CreateBackendWithStorage(t) - - tested := 0 - for index, test := range testCases { - tested += RoleKeySizeRegressionHelper(t, b, s, index, test) - } - - t.Logf("Key size regression expanded matrix test scenarios: %d", tested) -} - -func TestRootWithExistingKey(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - var err error - - // Fail requests if type is existing, and we specify the key_type param - _, err = CBWrite(b, s, "root/generate/existing", map[string]interface{}{ - "common_name": "root myvault.com", - "key_type": "rsa", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") - - // Fail requests if type is existing, and we specify the key_bits param - _, err = CBWrite(b, s, "root/generate/existing", map[string]interface{}{ - "common_name": "root myvault.com", - "key_bits": "2048", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") - - // Fail if the specified key does not exist. - _, err = CBWrite(b, s, "issuers/generate/root/existing", map[string]interface{}{ - "common_name": "root myvault.com", - "issuer_name": "my-issuer1", - "key_ref": "my-key1", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "unable to find PKI key for reference: my-key1") - - // Fail if the specified key name is default. - _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ - "common_name": "root myvault.com", - "issuer_name": "my-issuer1", - "key_name": "Default", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "reserved keyword 'default' can not be used as key name") - - // Fail if the specified issuer name is default. - _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ - "common_name": "root myvault.com", - "issuer_name": "DEFAULT", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "reserved keyword 'default' can not be used as issuer name") - - // Create the first CA - resp, err := CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ - "common_name": "root myvault.com", - "key_type": "rsa", - "issuer_name": "my-issuer1", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/root/internal"), logical.UpdateOperation), resp, true) - require.NoError(t, err) - require.NotNil(t, resp.Data["certificate"]) - myIssuerId1 := resp.Data["issuer_id"] - myKeyId1 := resp.Data["key_id"] - require.NotEmpty(t, myIssuerId1) - require.NotEmpty(t, myKeyId1) - - // Fetch the parsed CRL; it should be empty as we've not revoked anything - parsedCrl := getParsedCrlFromBackend(t, b, s, "issuer/my-issuer1/crl/der") - require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") - - // Fail if the specified issuer name is re-used. - _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ - "common_name": "root myvault.com", - "issuer_name": "my-issuer1", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "issuer name already in use") - - // Create the second CA - resp, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ - "common_name": "root myvault.com", - "key_type": "rsa", - "issuer_name": "my-issuer2", - "key_name": "root-key2", - }) - require.NoError(t, err) - require.NotNil(t, resp.Data["certificate"]) - myIssuerId2 := resp.Data["issuer_id"] - myKeyId2 := resp.Data["key_id"] - require.NotEmpty(t, myIssuerId2) - require.NotEmpty(t, myKeyId2) - - // Fetch the parsed CRL; it should be empty as we've not revoked anything - parsedCrl = getParsedCrlFromBackend(t, b, s, "issuer/my-issuer2/crl/der") - require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") - - // Fail if the specified key name is re-used. - _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ - "common_name": "root myvault.com", - "issuer_name": "my-issuer3", - "key_name": "root-key2", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "key name already in use") - - // Create a third CA re-using key from CA 1 - resp, err = CBWrite(b, s, "issuers/generate/root/existing", map[string]interface{}{ - "common_name": "root myvault.com", - "issuer_name": "my-issuer3", - "key_ref": myKeyId1, - }) - require.NoError(t, err) - require.NotNil(t, resp.Data["certificate"]) - myIssuerId3 := resp.Data["issuer_id"] - myKeyId3 := resp.Data["key_id"] - require.NotEmpty(t, myIssuerId3) - require.NotEmpty(t, myKeyId3) - - // Fetch the parsed CRL; it should be empty as we've not revoking anything. - parsedCrl = getParsedCrlFromBackend(t, b, s, "issuer/my-issuer3/crl/der") - require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") - // Signatures should be the same since this is just a reissued cert. We - // use signature as a proxy for "these two CRLs are equal". - firstCrl := getParsedCrlFromBackend(t, b, s, "issuer/my-issuer1/crl/der") - require.Equal(t, parsedCrl.SignatureValue, firstCrl.SignatureValue) - - require.NotEqual(t, myIssuerId1, myIssuerId2) - require.NotEqual(t, myIssuerId1, myIssuerId3) - require.NotEqual(t, myKeyId1, myKeyId2) - require.Equal(t, myKeyId1, myKeyId3) - - resp, err = CBList(b, s, "issuers") - require.NoError(t, err) - require.Equal(t, 3, len(resp.Data["keys"].([]string))) - require.Contains(t, resp.Data["keys"], string(myIssuerId1.(issuerID))) - require.Contains(t, resp.Data["keys"], string(myIssuerId2.(issuerID))) - require.Contains(t, resp.Data["keys"], string(myIssuerId3.(issuerID))) -} - -func TestIntermediateWithExistingKey(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - var err error - - // Fail requests if type is existing, and we specify the key_type param - _, err = CBWrite(b, s, "intermediate/generate/existing", map[string]interface{}{ - "common_name": "root myvault.com", - "key_type": "rsa", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") - - // Fail requests if type is existing, and we specify the key_bits param - _, err = CBWrite(b, s, "intermediate/generate/existing", map[string]interface{}{ - "common_name": "root myvault.com", - "key_bits": "2048", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") - - // Fail if the specified key does not exist. - _, err = CBWrite(b, s, "issuers/generate/intermediate/existing", map[string]interface{}{ - "common_name": "root myvault.com", - "key_ref": "my-key1", - }) - require.Error(t, err) - require.Contains(t, err.Error(), "unable to find PKI key for reference: my-key1") - - // Create the first intermediate CA - resp, err := CBWrite(b, s, "issuers/generate/intermediate/internal", map[string]interface{}{ - "common_name": "root myvault.com", - "key_type": "rsa", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/intermediate/internal"), logical.UpdateOperation), resp, true) - require.NoError(t, err) - // csr1 := resp.Data["csr"] - myKeyId1 := resp.Data["key_id"] - require.NotEmpty(t, myKeyId1) - - // Create the second intermediate CA - resp, err = CBWrite(b, s, "issuers/generate/intermediate/internal", map[string]interface{}{ - "common_name": "root myvault.com", - "key_type": "rsa", - "key_name": "interkey1", - }) - require.NoError(t, err) - // csr2 := resp.Data["csr"] - myKeyId2 := resp.Data["key_id"] - require.NotEmpty(t, myKeyId2) - - // Create a third intermediate CA re-using key from intermediate CA 1 - resp, err = CBWrite(b, s, "issuers/generate/intermediate/existing", map[string]interface{}{ - "common_name": "root myvault.com", - "key_ref": myKeyId1, - }) - require.NoError(t, err) - // csr3 := resp.Data["csr"] - myKeyId3 := resp.Data["key_id"] - require.NotEmpty(t, myKeyId3) - - require.NotEqual(t, myKeyId1, myKeyId2) - require.Equal(t, myKeyId1, myKeyId3, "our new ca did not seem to reuse the key as we expected.") -} - -func TestIssuanceTTLs(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root example.com", - "issuer_name": "root", - "ttl": "10s", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - rootCert := parseCert(t, resp.Data["certificate"].(string)) - - _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ - "allow_any_name": true, - "enforce_hostnames": false, - "key_type": "ec", - }) - require.NoError(t, err) - - _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing", - "ttl": "1s", - }) - require.NoError(t, err, "expected issuance to succeed due to shorter ttl than cert ttl") - - _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing", - }) - require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") - - resp, err = CBPatch(b, s, "issuer/root", map[string]interface{}{ - "leaf_not_after_behavior": "permit", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["leaf_not_after_behavior"], "permit") - - _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing", - }) - require.NoError(t, err, "expected issuance to succeed due to permitted longer TTL") - - resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ - "issuer_name": "root", - "leaf_not_after_behavior": "truncate", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["leaf_not_after_behavior"], "truncate") - - _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing", - }) - require.NoError(t, err, "expected issuance to succeed due to truncated ttl") - - // Sleep until the parent cert expires and the clock rolls over - // to the next second. - time.Sleep(time.Until(rootCert.NotAfter) + (1500 * time.Millisecond)) - - resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ - "issuer_name": "root", - "leaf_not_after_behavior": "err", - }) - require.NoError(t, err) - require.NotNil(t, resp) - - // Even 1s ttl should now fail. - _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ - "common_name": "testing", - "ttl": "1s", - }) - require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") -} - -func TestSealWrappedStorageConfigured(t *testing.T) { - t.Parallel() - b, _ := CreateBackendWithStorage(t) - wrappedEntries := b.Backend.PathsSpecial.SealWrapStorage - - // Make sure our legacy bundle is within the list - // NOTE: do not convert these test values to constants, we should always have these paths within seal wrap config - require.Contains(t, wrappedEntries, "config/ca_bundle", "Legacy bundle missing from seal wrap") - // The trailing / is important as it treats the entire folder requiring seal wrapping, not just config/key - require.Contains(t, wrappedEntries, "config/key/", "key prefix with trailing / missing from seal wrap.") -} - -func TestBackend_ConfigCA_WithECParams(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Generated key with OpenSSL: - // $ openssl ecparam -out p256.key -name prime256v1 -genkey - // - // Regression test for https://github.com/hashicorp/vault/issues/16667 - resp, err := CBWrite(b, s, "config/ca", map[string]interface{}{ - "pem_bundle": ` ------BEGIN EC PARAMETERS----- -BggqhkjOPQMBBw== ------END EC PARAMETERS----- ------BEGIN EC PRIVATE KEY----- -MHcCAQEEINzXthCZdhyV7+wIEBl/ty+ctNsUS99ykTeax6EbYZtvoAoGCCqGSM49 -AwEHoUQDQgAE57NX8bR/nDoW8yRgLswoXBQcjHrdyfuHS0gPwki6BNnfunUzryVb -8f22/JWj6fsEF6AOADZlrswKIbR2Es9e/w== ------END EC PRIVATE KEY----- - `, - }) - require.NoError(t, err) - require.NotNil(t, resp, "expected ca info") - importedKeys := resp.Data["imported_keys"].([]string) - importedIssuers := resp.Data["imported_issuers"].([]string) - - require.Equal(t, len(importedKeys), 1) - require.Equal(t, len(importedIssuers), 0) -} - -func TestPerIssuerAIA(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Generating a root without anything should not have AIAs. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "root example.com", - "issuer_name": "root", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - rootCert := parseCert(t, resp.Data["certificate"].(string)) - require.Empty(t, rootCert.OCSPServer) - require.Empty(t, rootCert.IssuingCertificateURL) - require.Empty(t, rootCert.CRLDistributionPoints) - - // Set some local URLs on the issuer. - resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ - "issuing_certificates": []string{"https://google.com"}, - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/default"), logical.UpdateOperation), resp, true) - - require.NoError(t, err) - - _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": true, - "ttl": "85s", - "key_type": "ec", - }) - require.NoError(t, err) - - // Issue something with this re-configured issuer. - resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ - "common_name": "localhost.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - leafCert := parseCert(t, resp.Data["certificate"].(string)) - require.Empty(t, leafCert.OCSPServer) - require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://google.com"}) - require.Empty(t, leafCert.CRLDistributionPoints) - - // Set global URLs and ensure they don't appear on this issuer's leaf. - _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ - "issuing_certificates": []string{"https://example.com/ca", "https://backup.example.com/ca"}, - "crl_distribution_points": []string{"https://example.com/crl", "https://backup.example.com/crl"}, - "ocsp_servers": []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}, - }) - require.NoError(t, err) - resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ - "common_name": "localhost.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - leafCert = parseCert(t, resp.Data["certificate"].(string)) - require.Empty(t, leafCert.OCSPServer) - require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://google.com"}) - require.Empty(t, leafCert.CRLDistributionPoints) - - // Now come back and remove the local modifications and ensure we get - // the defaults again. - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "issuing_certificates": []string{}, - }) - require.NoError(t, err) - resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ - "common_name": "localhost.com", - }) - require.NoError(t, err) - require.NotNil(t, resp) - leafCert = parseCert(t, resp.Data["certificate"].(string)) - require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://example.com/ca", "https://backup.example.com/ca"}) - require.Equal(t, leafCert.OCSPServer, []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}) - require.Equal(t, leafCert.CRLDistributionPoints, []string{"https://example.com/crl", "https://backup.example.com/crl"}) - - // Validate that we can set an issuer name and remove it. - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "issuer_name": "my-issuer", - }) - require.NoError(t, err) - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "issuer_name": "", - }) - require.NoError(t, err) -} - -func TestIssuersWithoutCRLBits(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Importing a root without CRL signing bits should work fine. - customBundleWithoutCRLBits := ` ------BEGIN CERTIFICATE----- -MIIDGTCCAgGgAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhyb290 -LW5ldzAeFw0yMjA4MjQxMjEzNTVaFw0yMzA5MDMxMjEzNTVaMBMxETAPBgNVBAMM -CHJvb3QtbmV3MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAojTA/Mx7 -LVW/Zgn/N4BqZbaF82MrTIBFug3ob7mqycNRlWp4/PH8v37+jYn8e691HUsKjden -rDTrO06kiQKiJinAzmlLJvgcazE3aXoh7wSzVG9lFHYvljEmVj+yDbkeaqaCktup -skuNjxCoN9BLmKzZIwVCHn92ZHlhN6LI7CNaU3SDJdu7VftWF9Ugzt9FIvI+6Gcn -/WNE9FWvZ9o7035rZ+1vvTn7/tgxrj2k3XvD51Kq4tsSbqjnSf3QieXT6E6uvtUE -TbPp3xjBElgBCKmeogR1l28rs1aujqqwzZ0B/zOeF8ptaH0aZOIBsVDJR8yTwHzq -s34hNdNfKLHzOwIDAQABo3gwdjAdBgNVHQ4EFgQUF4djNmx+1+uJINhZ82pN+7jz -H8EwHwYDVR0jBBgwFoAUF4djNmx+1+uJINhZ82pN+7jzH8EwDwYDVR0TAQH/BAUw -AwEB/zAOBgNVHQ8BAf8EBAMCAoQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZI -hvcNAQELBQADggEBAICQovBz4KLWlLmXeZ2Vf6WfQYyGNgGyJa10XNXtWQ5dM2NU -OLAit4x1c2dz+aFocc8ZsX/ikYi/bruT2rsGWqMAGC4at3U4GuaYGO5a6XzMKIDC -nxIlbiO+Pn6Xum7fAqUri7+ZNf/Cygmc5sByi3MAAIkszeObUDZFTJL7gEOuXIMT -rKIXCINq/U+qc7m9AQ8vKhF1Ddj+dLGLzNQ5j3cKfilPs/wRaYqbMQvnmarX+5Cs -k1UL6kWSQsiP3+UWaBlcWkmD6oZ3fIG7c0aMxf7RISq1eTAM9XjH3vMxWQJlS5q3 -2weJ2LYoPe/DwX5CijR0IezapBCrin1BscJMLFQ= ------END CERTIFICATE----- ------BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCiNMD8zHstVb9m -Cf83gGpltoXzYytMgEW6DehvuarJw1GVanj88fy/fv6Nifx7r3UdSwqN16esNOs7 -TqSJAqImKcDOaUsm+BxrMTdpeiHvBLNUb2UUdi+WMSZWP7INuR5qpoKS26myS42P -EKg30EuYrNkjBUIef3ZkeWE3osjsI1pTdIMl27tV+1YX1SDO30Ui8j7oZyf9Y0T0 -Va9n2jvTfmtn7W+9Ofv+2DGuPaTde8PnUqri2xJuqOdJ/dCJ5dPoTq6+1QRNs+nf -GMESWAEIqZ6iBHWXbyuzVq6OqrDNnQH/M54Xym1ofRpk4gGxUMlHzJPAfOqzfiE1 -018osfM7AgMBAAECggEAAVd6kZZaN69IZITIc1vHRYa2rlZpKS2JP7c8Vd3Z/4Fz -ZZvnJ7LgVAmUYg5WPZ2sOqBNLfKVN/oke5Q0dALgdxYl7dWQIhPjHeRFbZFtjqEV -OXZGBniamMO/HSKGWGrqFf7BM/H7AhClUwQgjnzVSz+B+LJJidM+SVys3n1xuDmC -EP+iOda+bAHqHv/7oCELQKhLmCvPc9v2fDy+180ttdo8EHuxwVnKiyR/ryKFhSyx -K1wgAPQ9jO+V+GESL90rqpX/r501REsIOOpm4orueelHTD4+dnHxvUPqJ++9aYGX -79qBNPPUhxrQI1yoHxwW0cTxW5EqkZ9bT2lSd5rjcQKBgQDNyPBpidkHPrYemQDT -RldtS6FiW/jc1It/CRbjU4A6Gi7s3Cda43pEUObKNLeXMyLQaMf4GbDPDX+eh7B8 -RkUq0Q/N0H4bn1hbxYSUdgv0j/6czpMo6rLcJHGwOTSpHGsNsxSLL7xlpgzuzqrG -FzEgjMA1aD3w8B9+/77AoSLoMQKBgQDJyYMw82+euLYRbR5Wc/SbrWfh2n1Mr2BG -pp1ZNYorXE5CL4ScdLcgH1q/b8r5XGwmhMcpeA+geAAaKmk1CGG+gPLoq20c9Q1Y -Ykq9tUVJasIkelvbb/SPxyjkJdBwylzcPP14IJBsqQM0be+yVqLJJVHSaoKhXZcl -IW2xgCpjKwKBgFpeX5U5P+F6nKebMU2WmlYY3GpBUWxIummzKCX0SV86mFjT5UR4 -mPzfOjqaI/V2M1eqbAZ74bVLjDumAs7QXReMb5BGetrOgxLqDmrT3DQt9/YMkXtq -ddlO984XkRSisjB18BOfhvBsl0lX4I7VKHHO3amWeX0RNgOjc7VMDfRBAoGAWAQH -r1BfvZHACLXZ58fISCdJCqCsysgsbGS8eW77B5LJp+DmLQBT6DUE9j+i/0Wq/ton -rRTrbAkrsj4RicpQKDJCwe4UN+9DlOu6wijRQgbJC/Q7IOoieJxcX7eGxcve2UnZ -HY7GsD7AYRwa02UquCYJHIjM1enmxZFhMW1AD+UCgYEAm4jdNz5e4QjA4AkNF+cB -ZenrAZ0q3NbTyiSsJEAtRe/c5fNFpmXo3mqgCannarREQYYDF0+jpSoTUY8XAc4q -wL7EZNzwxITLqBnnHQbdLdAvYxB43kvWTy+JRK8qY9LAMCCFeDoYwXkWV4Wkx/b0 -TgM7RZnmEjNdeaa4M52o7VY= ------END PRIVATE KEY----- - ` - resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": customBundleWithoutCRLBits, - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/import/bundle"), logical.UpdateOperation), resp, true) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["imported_issuers"]) - require.NotEmpty(t, resp.Data["imported_keys"]) - require.NotEmpty(t, resp.Data["mapping"]) - - // Shouldn't have crl-signing on the newly imported issuer's usage. - resp, err = CBRead(b, s, "issuer/default") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["usage"]) - require.NotContains(t, resp.Data["usage"], "crl-signing") - - // Modifying to set CRL should fail. - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "usage": "issuing-certificates,crl-signing", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // Modifying to set issuing-certificates and ocsp-signing should succeed. - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "usage": "issuing-certificates,ocsp-signing", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotEmpty(t, resp.Data) - require.NotEmpty(t, resp.Data["usage"]) - require.NotContains(t, resp.Data["usage"], "crl-signing") -} - -func TestBackend_IfModifiedSinceHeaders(t *testing.T) { - t.Parallel() - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - RequestResponseCallback: schema.ResponseValidatingCallback(t), - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - - // Mount PKI. - err := client.Sys().Mount("pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - // Required to allow the header to be passed through. - PassthroughRequestHeaders: []string{"if-modified-since"}, - AllowedResponseHeaders: []string{"Last-Modified"}, - }, - }) - require.NoError(t, err) - - // Get a time before CA generation. Subtract two seconds to ensure - // the value in the seconds field is different than the time the CA - // is actually generated at. - beforeOldCAGeneration := time.Now().Add(-2 * time.Second) - - // Generate an internal CA. This one is the default. - resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "Root X1", - "key_type": "ec", - "issuer_name": "old-root", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - - // CA is generated, but give a grace window. - afterOldCAGeneration := time.Now().Add(2 * time.Second) - - // When you _save_ headers, client returns a copy. But when you go to - // reset them, it doesn't create a new copy (and instead directly - // assigns). This means we have to continually refresh our view of the - // last headers, otherwise the headers added after the last set operation - // leak into this copy... Yuck! - lastHeaders := client.Headers() - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/old-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Reading the CA should work, without a header. - resp, err := client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", beforeOldCAGeneration.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - // Ensure that the CA is elided if we give it the present time (plus a - // grace window). - client.AddHeader("If-Modified-Since", afterOldCAGeneration.Format(time.RFC1123)) - t.Logf("headers: %v", client.Headers()) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // Wait three seconds. This ensures we have adequate grace period - // to distinguish the two cases, even with grace periods. - time.Sleep(3 * time.Second) - - // Generating a second root. This one isn't the default. - beforeNewCAGeneration := time.Now().Add(-2 * time.Second) - - // Generate an internal CA. This one is the default. - _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "Root X1", - "key_type": "ec", - "issuer_name": "new-root", - }) - require.NoError(t, err) - - // As above. - afterNewCAGeneration := time.Now().Add(2 * time.Second) - - // New root isn't the default, so it has fewer paths. - for _, path := range []string{"pki/issuer/new-root/json", "pki/issuer/new-root/crl", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Reading the CA should work, without a header. - resp, err := client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", beforeNewCAGeneration.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - // Ensure that the CA is elided if we give it the present time (plus a - // grace window). - client.AddHeader("If-Modified-Since", afterNewCAGeneration.Format(time.RFC1123)) - t.Logf("headers: %v", client.Headers()) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // Wait three seconds. This ensures we have adequate grace period - // to distinguish the two cases, even with grace periods. - time.Sleep(3 * time.Second) - - // Now swap the default issuers around. - _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ - "default": "new-root", - }) - require.NoError(t, err) - - // Reading both with the last modified date should return new values. - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", afterOldCAGeneration.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", afterNewCAGeneration.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // Wait for things to settle, record the present time, and wait for the - // clock to definitely tick over again. - time.Sleep(2 * time.Second) - preRevocationTimestamp := time.Now() - time.Sleep(2 * time.Second) - - // The above tests should say everything is cached. - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - - // Ensure that the CA is returned correctly if we give it the new time. - client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // We could generate some leaves and verify the revocation updates the - // CRL. But, revoking the issuer behaves the same, so let's do that - // instead. - _, err = client.Logical().Write("pki/issuer/old-root/revoke", map[string]interface{}{}) - require.NoError(t, err) - - // CA should still be valid. - for _, path := range []string{"pki/cert/ca", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json"} { - t.Logf("path: %v", path) - - // Ensure that the CA is returned correctly if we give it the old time. - client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // CRL should be invalidated - for _, path := range []string{"pki/cert/crl", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // If we send some time in the future, everything should be cached again! - futureTime := time.Now().Add(30 * time.Second) - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - - // Ensure that the CA is returned correctly if we give it the new time. - client.AddHeader("If-Modified-Since", futureTime.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - beforeThreeWaySwap := time.Now().Add(-2 * time.Second) - - // Now, do a three-way swap of names (old->tmp; new->old; tmp->new). This - // should result in all names/CRLs being invalidated. - _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/old-root", map[string]interface{}{ - "issuer_name": "tmp-root", - }) - require.NoError(t, err) - _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/new-root", map[string]interface{}{ - "issuer_name": "old-root", - }) - require.NoError(t, err) - _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/tmp-root", map[string]interface{}{ - "issuer_name": "new-root", - }) - require.NoError(t, err) - - afterThreeWaySwap := time.Now().Add(2 * time.Second) - - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Ensure that the CA is returned if we give it the pre-update time. - client.AddHeader("If-Modified-Since", beforeThreeWaySwap.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - // Ensure that the CA is elided correctly if we give it the after time. - client.AddHeader("If-Modified-Since", afterThreeWaySwap.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - - // Finally, rebuild the delta CRL and ensure that only that is - // invalidated. We first need to enable it though, and wait for - // all CRLs to rebuild. - _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ - "auto_rebuild": true, - "enable_delta": true, - }) - require.NoError(t, err) - time.Sleep(4 * time.Second) - beforeDeltaRotation := time.Now().Add(-2 * time.Second) - - resp, err = client.Logical().Read("pki/crl/rotate-delta") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["success"], true) - - afterDeltaRotation := time.Now().Add(2 * time.Second) - - for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl"} { - t.Logf("path: %v", path) - - for _, when := range []time.Time{beforeDeltaRotation, afterDeltaRotation} { - client.AddHeader("If-Modified-Since", when.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } - } - - for _, path := range []string{"pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { - t.Logf("path: %v", path) - field := "certificate" - if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { - field = "crl" - } - - // Ensure that the CRL is present if we give it the pre-update time. - client.AddHeader("If-Modified-Since", beforeDeltaRotation.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data[field]) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - - client.AddHeader("If-Modified-Since", afterDeltaRotation.Format(time.RFC1123)) - resp, err = client.Logical().Read(path) - require.NoError(t, err) - require.Nil(t, resp) - client.SetHeaders(lastHeaders) - lastHeaders = client.Headers() - } -} - -func TestBackend_InitializeCertificateCounts(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - ctx := context.Background() - - // Set up an Issuer and Role - // We need a root certificate to write/revoke certificates with - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - if resp == nil { - t.Fatal("expected ca info") - } - - // Create a role - _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ - "allowed_domains": "myvault.com", - "allow_bare_domains": true, - "allow_subdomains": true, - "max_ttl": "2h", - }) - if err != nil { - t.Fatal(err) - } - - // Put certificates A, B, C, D, E in backend - var certificates []string = []string{"a", "b", "c", "d", "e"} - serials := make([]string, 5) - for i, cn := range certificates { - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ - "common_name": cn + ".myvault.com", - }) - if err != nil { - t.Fatal(err) - } - serials[i] = resp.Data["serial_number"].(string) - } - - // Turn on certificate counting: - CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ - "maintain_stored_certificate_counts": true, - "publish_stored_certificate_count_metrics": false, - }) - // Assert initialize from clean is correct: - b.initializeStoredCertificateCounts(ctx) - - // Revoke certificates A + B - revocations := serials[0:2] - for _, key := range revocations { - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": key, - }) - if err != nil { - t.Fatal(err) - } - } - - if b.certCount.Load() != 6 { - t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", b.certCount.Load()) - } - if b.revokedCertCount.Load() != 2 { - t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", b.revokedCertCount.Load()) - } - - // Simulates listing while initialize in progress, by "restarting it" - b.certCount.Store(0) - b.revokedCertCount.Store(0) - b.certsCounted.Store(false) - - // Revoke certificates C, D - dirtyRevocations := serials[2:4] - for _, key := range dirtyRevocations { - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": key, - }) - if err != nil { - t.Fatal(err) - } - } - - // Put certificates F, G in the backend - dirtyCertificates := []string{"f", "g"} - for _, cn := range dirtyCertificates { - resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ - "common_name": cn + ".myvault.com", - }) - if err != nil { - t.Fatal(err) - } - } - - // Run initialize - b.initializeStoredCertificateCounts(ctx) - - // Test certificate count - if b.certCount.Load() != 8 { - t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", b.certCount.Load()) - } - - if b.revokedCertCount.Load() != 4 { - t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", b.revokedCertCount.Load()) - } - - return -} - -// Verify that our default values are consistent when creating an issuer and when we do an -// empty POST update to it. This will hopefully identify if we have different default values -// for fields across the two APIs. -func TestBackend_VerifyIssuerUpdateDefaultsMatchCreation(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "myvault.com", - }) - requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") - - resp, err = CBRead(b, s, "issuer/default") - requireSuccessNonNilResponse(t, resp, err, "failed reading default issuer") - preUpdateValues := resp.Data - - // This field gets reset during issuer update to the empty string - // (meaning Go will auto-detect the rev-sig-algo). - preUpdateValues["revocation_signature_algorithm"] = "" - - resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{}) - requireSuccessNonNilResponse(t, resp, err, "failed updating default issuer with no values") - - resp, err = CBRead(b, s, "issuer/default") - requireSuccessNonNilResponse(t, resp, err, "failed reading default issuer") - postUpdateValues := resp.Data - - require.Equal(t, preUpdateValues, postUpdateValues, - "A value was updated based on the empty update of an issuer, "+ - "most likely we have a different set of field parameters across create and update of issuers.") -} - -func TestBackend_VerifyPSSKeysIssuersFailImport(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // PKCS8 parsing fails on this key due to rsaPSS OID - rsaOIDKey := ` ------BEGIN PRIVATE KEY----- -MIIEugIBADALBgkqhkiG9w0BAQoEggSmMIIEogIBAAKCAQEAtN0/NPuJHLuyEdBr -tUikXoXOV741XZcNvLAIVBIqDA0ege2gXt9A15FGUI4X3u6kT16Fl6MRdtUZ/qNS -Vs15nK9A1PI/AVekMgTVFTnoCzs550CKN8iRk9Om+lwHimpyXxKkFW69v8fsXwKE -Bsz69jjT7HV9VZQ7fQhmE79brAMuwKP1fUQKdHq5OBKtQ7Cl3Gmipp0izCsVuQIE -kBHvT3UUgyaSp2n+FONpOiyuBoYUH5tVEv9sZzBqSsrYBJYF+GvfnFy9AcTdqRe2 -VX2SjjWjDF84T30OBA798gIFIPwu9R4OjWOlPeh2bo2kGeo3AITjwFZ28m7kS7kc -OtvHpwIDAQABAoIBAFQxmjbj0RQbG+3HBBzD0CBgUYnu9ZC3vKFVoMriGci6YrVB -FSKU8u5mpkDhpKMWnE6GRdItCvgyg4NSLAZUaIRT4O5ARqwtTDYsobTb2/U+gNnx -5WXKbFpQcK6jIK+ClfNEDjYb8yDPxG0GEsfHrBvqoFy25L1t37N4sWwH7HjJyZIe -Hbqx4NVDur9qgqaUwkfSeufn4ycHqFtkzKNzCUarDkST9cxE6/1AKfhl09PPuMEa -lAY2JLiEplQL5sh9cxG5FObJbutJo5EIhR2OdM0VcPf0MTD9LXKRoGR3SNlG7IlS -llJzBjlh4J1ByMX32btKMHzEvlhyrMI90E1SEGECgYEAx1yDQWe4/b1MBqCxA3d0 -20dDmUHSRQFhkd/Mzkl5dPzRkG42W3ryNbMKdeuL0ZgK9AhfaLCjcj1i+44O7dHb -qBTVwfRrer2uoQVCqqJ6z8PGxPJJxTaqh9QuJxkoQ0i43ZNPcjc2M2sWLn+lkkdE -MaGMiyrmjIQEC6tmgCtZ1VUCgYEA6D9xoT9VuAnQjDvW2tO5N2U2H/8ZyRd1pC3z -H1CzjwShhxsP4YOUaVdw59K95JL4SMxSmpRrhthlW3cRaiT/exBcXLEvz0Qu0OhW -a6155ZFjK3UaLDKlwvmtuoAsuAFqX084LO0B1oxvUJESgyPncQ36fv2lZGV7A66z -Uo+BKQsCgYB2yGBMMAjA5nDN4iCV+C7gF+3m+pjWFKSVzcqxfoWndptGeuRYTUDT -TgIFkHqWPwkHrZVrQxOflYPMbi/m8wr1crSKA5+mWi4aMpAuKvERqYxc/B+IKbIh -jAKTuSGMNWAwZP0JCGx65mso+VUleuDe0Wpz4PPM9TuT2GQSKcI0oQKBgHAHcouC -npmo+lU65DgoWzaydrpWdpy+2Tt6AsW/Su4ZIMWoMy/oJaXuzQK2cG0ay/NpxArW -v0uLhNDrDZZzBF3blYIM4nALhr205UMJqjwntnuXACoDwFvdzoShIXEdFa+l6gYZ -yYIxudxWLmTd491wDb5GIgrcvMsY8V1I5dfjAoGAM9g2LtdqgPgK33dCDtZpBm8m -y4ri9PqHxnpps9WJ1dO6MW/YbW+a7vbsmNczdJ6XNLEfy2NWho1dw3xe7ztFVDjF -cWNUzs1+/6aFsi41UX7EFn3zAFhQUPxT59hXspuWuKbRAWc5fMnxbCfI/Cr8wTLJ -E/0kiZ4swUMyI4tYSbM= ------END PRIVATE KEY----- -` - _, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rsaOIDKey, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key") - - _, err = CBWrite(b, s, "keys/import", map[string]interface{}{ - "key": rsaOIDKey, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key") - - // Importing a cert with rsaPSS OID should also fail - rsaOIDCert := ` ------BEGIN CERTIFICATE----- -MIIDfjCCAjGgAwIBAgIBATBCBgkqhkiG9w0BAQowNaAPMA0GCWCGSAFlAwQCAQUA -oRwwGgYJKoZIhvcNAQEIMA0GCWCGSAFlAwQCAQUAogQCAgDeMBMxETAPBgNVBAMM -CHJvb3Qtb2xkMB4XDTIyMDkxNjE0MDEwM1oXDTIzMDkyNjE0MDEwM1owEzERMA8G -A1UEAwwIcm9vdC1vbGQwggEgMAsGCSqGSIb3DQEBCgOCAQ8AMIIBCgKCAQEAtN0/ -NPuJHLuyEdBrtUikXoXOV741XZcNvLAIVBIqDA0ege2gXt9A15FGUI4X3u6kT16F -l6MRdtUZ/qNSVs15nK9A1PI/AVekMgTVFTnoCzs550CKN8iRk9Om+lwHimpyXxKk -FW69v8fsXwKEBsz69jjT7HV9VZQ7fQhmE79brAMuwKP1fUQKdHq5OBKtQ7Cl3Gmi -pp0izCsVuQIEkBHvT3UUgyaSp2n+FONpOiyuBoYUH5tVEv9sZzBqSsrYBJYF+Gvf -nFy9AcTdqRe2VX2SjjWjDF84T30OBA798gIFIPwu9R4OjWOlPeh2bo2kGeo3AITj -wFZ28m7kS7kcOtvHpwIDAQABo3UwczAdBgNVHQ4EFgQUVGkTAUJ8inxIVGBlfxf4 -cDhRSnowHwYDVR0jBBgwFoAUVGkTAUJ8inxIVGBlfxf4cDhRSnowDAYDVR0TBAUw -AwEB/zAOBgNVHQ8BAf8EBAMCAYYwEwYDVR0lBAwwCgYIKwYBBQUHAwEwQgYJKoZI -hvcNAQEKMDWgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglghkgB -ZQMEAgEFAKIEAgIA3gOCAQEAQZ3iQ3NjvS4FYJ5WG41huZI0dkvNFNan+ZYWlYHJ -MIQhbFogb/UQB0rlsuldG0+HF1RDXoYNuThfzt5hiBWYEtMBNurezvnOn4DF0hrl -Uk3sBVnvTalVXg+UVjqh9hBGB75JYJl6a5Oa2Zrq++4qGNwjd0FqgnoXzqS5UGuB -TJL8nlnXPuOIK3VHoXEy7l9GtvEzKcys0xa7g1PYpaJ5D2kpbBJmuQGmU6CDcbP+ -m0hI4QDfVfHtnBp2VMCvhj0yzowtwF4BFIhv4EXZBU10mzxVj0zyKKft9++X8auH -nebuK22ZwzbPe4NhOvAdfNDElkrrtGvTnzkDB7ezPYjelA== ------END CERTIFICATE----- -` - _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rsaOIDCert, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID cert") - - _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rsaOIDKey + "\n" + rsaOIDCert, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key+cert") - - _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": rsaOIDCert + "\n" + rsaOIDKey, - }) - require.Error(t, err, "expected error importing PKCS8 rsaPSS OID cert+key") - - // After all these errors, we should have zero issuers and keys. - resp, err := CBList(b, s, "issuers") - require.NoError(t, err) - require.Equal(t, nil, resp.Data["keys"]) - - resp, err = CBList(b, s, "keys") - require.NoError(t, err) - require.Equal(t, nil, resp.Data["keys"]) - - // If we create a new PSS root, we should be able to issue an intermediate - // under it. - resp, err = CBWrite(b, s, "root/generate/exported", map[string]interface{}{ - "use_pss": "true", - "common_name": "root x1 - pss", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - require.NotEmpty(t, resp.Data["private_key"]) - - resp, err = CBWrite(b, s, "intermediate/generate/exported", map[string]interface{}{ - "use_pss": "true", - "common_name": "int x1 - pss", - "key_type": "ec", - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["csr"]) - require.NotEmpty(t, resp.Data["private_key"]) - - resp, err = CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ - "use_pss": "true", - "common_name": "int x1 - pss", - "csr": resp.Data["csr"].(string), - }) - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.NotEmpty(t, resp.Data["certificate"]) - - resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ - "pem_bundle": resp.Data["certificate"].(string), - }) - require.NoError(t, err) - - // Finally, if we were to take an rsaPSS OID'd CSR and use it against this - // mount, it will fail. - _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": true, - "ttl": "85s", - "key_type": "any", - }) - require.NoError(t, err) - - // Issuing a leaf from a CSR with rsaPSS OID should fail... - rsaOIDCSR := `-----BEGIN CERTIFICATE REQUEST----- -MIICkTCCAUQCAQAwGTEXMBUGA1UEAwwOcmFuY2hlci5teS5vcmcwggEgMAsGCSqG -SIb3DQEBCgOCAQ8AMIIBCgKCAQEAtzHuGEUK55lXI08yp9DXoye9yCZbkJZO+Hej -1TWGEkbX4hzauRJeNp2+wn8xU5y8ITjWSIXEVDHeezosLCSy0Y2QT7/V45zWPUYY -ld0oUnPiwsb9CPFlBRFnX3dO9SS5MONIrNCJGKXmLdF3lgSl8zPT6J/hWM+JBjHO -hBzK6L8IYwmcEujrQfnOnOztzgMEBJtWG8rnI8roz1adpczTddDKGymh2QevjhlL -X9CLeYSSQZInOMsgaDYl98Hn00K5x0CBp8ADzzXtaPSQ9nsnihN8VvZ/wHw6YbBS -BSHa6OD+MrYnw3Sao6/YgBRNT2glIX85uro4ARW9zGB9/748dwIDAQABoAAwQgYJ -KoZIhvcNAQEKMDWgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglg -hkgBZQMEAgEFAKIEAgIA3gOCAQEARGAa0HiwzWCpvAdLOVc4/srEyOYFZPLbtv+Y -ezZIaUBNaWhOvkunqpa48avmcbGlji7r6fxJ5sT28lHt7ODWcJfn1XPAnqesXErm -EBuOIhCv6WiwVyGeTVynuHYkHyw3rIL/zU7N8+zIFV2G2M1UAv5D/eyh/74cr9Of -+nvm9jAbkHix8UwOBCFY2LLNl6bXvbIeJEdDOEtA9UmDXs8QGBg4lngyqcE2Z7rz -+5N/x4guMk2FqblbFGiCc5fLB0Gp6lFFOqhX9Q8nLJ6HteV42xGJUUtsFpppNCRm -82dGIH2PTbXZ0k7iAAwLaPjzOv1v58Wq90o35d4iEsOfJ8v98Q== ------END CERTIFICATE REQUEST-----` - - _, err = CBWrite(b, s, "issuer/default/sign/testing", map[string]interface{}{ - "common_name": "example.com", - "csr": rsaOIDCSR, - }) - require.Error(t, err) - - _, err = CBWrite(b, s, "issuer/default/sign-verbatim", map[string]interface{}{ - "common_name": "example.com", - "use_pss": true, - "csr": rsaOIDCSR, - }) - require.Error(t, err) - - _, err = CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ - "common_name": "faulty x1 - pss", - "use_pss": true, - "csr": rsaOIDCSR, - }) - require.Error(t, err) - - // Vault has a weird API for signing self-signed certificates. Ensure - // that doesn't accept rsaPSS OID'd certificates either. - _, err = CBWrite(b, s, "issuer/default/sign-self-issued", map[string]interface{}{ - "use_pss": true, - "certificate": rsaOIDCert, - }) - require.Error(t, err) - - // Issuing a regular leaf should succeed. - _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": true, - "ttl": "85s", - "key_type": "rsa", - "use_pss": "true", - }) - require.NoError(t, err) - - resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ - "common_name": "example.com", - "use_pss": "true", - }) - requireSuccessNonNilResponse(t, resp, err, "failed to issue PSS leaf") -} - -func TestPKI_EmptyCRLConfigUpgraded(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Write an empty CRLConfig into storage. - crlConfigEntry, err := logical.StorageEntryJSON("config/crl", &crlConfig{}) - require.NoError(t, err) - err = s.Put(ctx, crlConfigEntry) - require.NoError(t, err) - - resp, err := CBRead(b, s, "config/crl") - require.NoError(t, err) - require.NotNil(t, resp) - require.NotNil(t, resp.Data) - require.Equal(t, resp.Data["expiry"], defaultCrlConfig.Expiry) - require.Equal(t, resp.Data["disable"], defaultCrlConfig.Disable) - require.Equal(t, resp.Data["ocsp_disable"], defaultCrlConfig.OcspDisable) - require.Equal(t, resp.Data["auto_rebuild"], defaultCrlConfig.AutoRebuild) - require.Equal(t, resp.Data["auto_rebuild_grace_period"], defaultCrlConfig.AutoRebuildGracePeriod) - require.Equal(t, resp.Data["enable_delta"], defaultCrlConfig.EnableDelta) - require.Equal(t, resp.Data["delta_rebuild_interval"], defaultCrlConfig.DeltaRebuildInterval) -} - -func TestPKI_ListRevokedCerts(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Test empty cluster - resp, err := CBList(b, s, "certs/revoked") - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("certs/revoked"), logical.ListOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err, "failed listing empty cluster") - require.Empty(t, resp.Data, "response map contained data that we did not expect") - - // Set up a mount that we can revoke under (We will create 3 leaf certs, 2 of which will be revoked) - resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "test.com", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "error generating root CA") - requireFieldsSetInResp(t, resp, "serial_number") - issuerSerial := resp.Data["serial_number"] - - resp, err = CBWrite(b, s, "roles/test", map[string]interface{}{ - "allowed_domains": "test.com", - "allow_subdomains": "true", - "max_ttl": "1h", - }) - requireSuccessNonNilResponse(t, resp, err, "error setting up pki role") - - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "test1.test.com", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing cert 1") - requireFieldsSetInResp(t, resp, "serial_number") - serial1 := resp.Data["serial_number"] - - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "test2.test.com", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing cert 2") - requireFieldsSetInResp(t, resp, "serial_number") - serial2 := resp.Data["serial_number"] - - resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ - "common_name": "test3.test.com", - }) - requireSuccessNonNilResponse(t, resp, err, "error issuing cert 2") - requireFieldsSetInResp(t, resp, "serial_number") - serial3 := resp.Data["serial_number"] - - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{"serial_number": serial1}) - requireSuccessNonNilResponse(t, resp, err, "error revoking cert 1") - - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{"serial_number": serial2}) - requireSuccessNonNilResponse(t, resp, err, "error revoking cert 2") - - // Test that we get back the expected revoked serial numbers. - resp, err = CBList(b, s, "certs/revoked") - requireSuccessNonNilResponse(t, resp, err, "failed listing revoked certs") - requireFieldsSetInResp(t, resp, "keys") - revokedKeys := resp.Data["keys"].([]string) - - require.Contains(t, revokedKeys, serial1) - require.Contains(t, revokedKeys, serial2) - require.Equal(t, 2, len(revokedKeys), "Expected 2 revoked entries got %d: %v", len(revokedKeys), revokedKeys) - - // Test that listing our certs returns a different response - resp, err = CBList(b, s, "certs") - requireSuccessNonNilResponse(t, resp, err, "failed listing written certs") - requireFieldsSetInResp(t, resp, "keys") - certKeys := resp.Data["keys"].([]string) - - require.Contains(t, certKeys, serial1) - require.Contains(t, certKeys, serial2) - require.Contains(t, certKeys, serial3) - require.Contains(t, certKeys, issuerSerial) - require.Equal(t, 4, len(certKeys), "Expected 4 cert entries got %d: %v", len(certKeys), certKeys) -} - -func TestPKI_TemplatedAIAs(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // Setting templated AIAs should succeed. - resp, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ - "path": "http://localhost:8200/v1/pki", - "aia_path": "http://localhost:8200/cdn/pki", - }) - require.NoError(t, err) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.UpdateOperation), resp, true) - - resp, err = CBRead(b, s, "config/cluster") - require.NoError(t, err) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.ReadOperation), resp, true) - - aiaData := map[string]interface{}{ - "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", - "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", - "ocsp_servers": "{{cluster_path}}/ocsp", - "enable_templating": true, - } - _, err = CBWrite(b, s, "config/urls", aiaData) - require.NoError(t, err) - - // Root generation should succeed, but without AIA info. - rootData := map[string]interface{}{ - "common_name": "Long-Lived Root X1", - "issuer_name": "long-root-x1", - "key_type": "ec", - } - resp, err = CBWrite(b, s, "root/generate/internal", rootData) - require.NoError(t, err) - _, err = CBDelete(b, s, "root") - require.NoError(t, err) - - // Clearing the config and regenerating the root should still succeed. - _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ - "crl_distribution_points": "{{cluster_path}}/issuer/my-root-id/crl/der", - "issuing_certificates": "{{cluster_aia_path}}/issuer/my-root-id/der", - "ocsp_servers": "{{cluster_path}}/ocsp", - "enable_templating": true, - }) - require.NoError(t, err) - resp, err = CBWrite(b, s, "root/generate/internal", rootData) - requireSuccessNonNilResponse(t, resp, err) - issuerId := string(resp.Data["issuer_id"].(issuerID)) - - // Now write the original AIA config and sign a leaf. - _, err = CBWrite(b, s, "config/urls", aiaData) - require.NoError(t, err) - _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allow_any_name": "true", - "key_type": "ec", - "ttl": "50m", - }) - require.NoError(t, err) - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "example.com", - }) - requireSuccessNonNilResponse(t, resp, err) - - // Validate the AIA info is correctly templated. - cert := parseCert(t, resp.Data["certificate"].(string)) - require.Equal(t, cert.OCSPServer, []string{"http://localhost:8200/v1/pki/ocsp"}) - require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost:8200/cdn/pki/issuer/" + issuerId + "/der"}) - require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost:8200/v1/pki/issuer/" + issuerId + "/crl/der"}) - - // Modify our issuer to set custom AIAs: these URLs are bad. - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "enable_aia_url_templating": "false", - "crl_distribution_points": "a", - "issuing_certificates": "b", - "ocsp_servers": "c", - }) - require.Error(t, err) - - // These URLs are good. - _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "enable_aia_url_templating": "false", - "crl_distribution_points": "http://localhost/a", - "issuing_certificates": "http://localhost/b", - "ocsp_servers": "http://localhost/c", - }) - - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "example.com", - }) - requireSuccessNonNilResponse(t, resp, err) - - // Validate the AIA info is correctly templated. - cert = parseCert(t, resp.Data["certificate"].(string)) - require.Equal(t, cert.OCSPServer, []string{"http://localhost/c"}) - require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost/b"}) - require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost/a"}) - - // These URLs are bad, but will fail at issuance time due to AIA templating. - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "enable_aia_url_templating": "true", - "crl_distribution_points": "a", - "issuing_certificates": "b", - "ocsp_servers": "c", - }) - requireSuccessNonNilResponse(t, resp, err) - require.NotEmpty(t, resp.Warnings) - _, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "example.com", - }) - require.Error(t, err) -} - -func requireSubjectUserIDAttr(t *testing.T, cert string, target string) { - xCert := parseCert(t, cert) - - for _, attr := range xCert.Subject.Names { - var userID string - if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { - if target == "" { - t.Fatalf("expected no UserID (OID: %v) subject attributes in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, cert) - } - - switch aValue := attr.Value.(type) { - case string: - userID = aValue - case []byte: - userID = string(aValue) - default: - t.Fatalf("unknown type for UserID attribute: %v\nCert: %v", attr, cert) - } - - if userID == target { - return - } - } - } - - if target != "" { - t.Fatalf("failed to find UserID (OID: %v) matching %v in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, target, cert) - } -} - -func TestUserIDsInLeafCerts(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - // 1. Setup root issuer. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Vault Root CA", - "key_type": "ec", - "ttl": "7200h", - }) - requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") - - // 2. Allow no user IDs. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs should work. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with user ID should fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // 3. Allow any user IDs. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "*", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with one user ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Issue cert with two user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid,robot", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") - - // 4. Allow one specific user ID. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "humanoid", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with approved ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Issue cert with non-approved user ID should fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "robot", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // - Issue cert with one approved and one non-approved should also fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid,robot", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // 5. Allow two specific user IDs. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "humanoid,robot", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with one approved ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Issue cert with other user ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "robot", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") - - // - Issue cert with unknown user ID will fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "robot2", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // - Issue cert with both should succeed. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid,robot", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") - - // 6. Use a glob. - resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ - "allowed_user_ids": "human*", - "key_type": "ec", - "use_csr_sans": true, // setup for further testing. - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up role") - - // - Issue cert without user IDs. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") - - // - Issue cert with approved ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "humanoid", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Issue cert with another approved ID. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "human", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human") - - // - Issue cert with literal glob. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "human*", - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human*") - - // - Still no robotic certs are allowed; will fail. - resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ - "common_name": "localhost", - "user_ids": "robot", - }) - require.Error(t, err) - require.True(t, resp.IsError()) - - // Create a CSR and validate it works with both sign/ and sign-verbatim. - csrTemplate := x509.CertificateRequest{ - Subject: pkix.Name{ - CommonName: "localhost", - ExtraNames: []pkix.AttributeTypeAndValue{ - { - Type: certutil.SubjectPilotUserIDAttributeOID, - Value: "humanoid", - }, - }, - }, - } - _, _, csrPem := generateCSR(t, &csrTemplate, "ec", 256) - - // Should work with role-based signing. - resp, err = CBWrite(b, s, "sign/testing", map[string]interface{}{ - "csr": csrPem, - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign/testing"), logical.UpdateOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") - - // - Definitely will work with sign-verbatim. - resp, err = CBWrite(b, s, "sign-verbatim", map[string]interface{}{ - "csr": csrPem, - }) - requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") - requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") -} - -// TestStandby_Operations test proper forwarding for PKI requests from a standby node to the -// active node within a cluster. -func TestStandby_Operations(t *testing.T) { - conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - }, nil, teststorage.InmemBackendSetup) - cluster := vault.NewTestCluster(t, conf, opts) - cluster.Start() - defer cluster.Cleanup() - - testhelpers.WaitForActiveNodeAndStandbys(t, cluster) - standbyCores := testhelpers.DeriveStandbyCores(t, cluster) - require.Greater(t, len(standbyCores), 0, "Need at least one standby core.") - client := standbyCores[0].Client - - mountPKIEndpoint(t, client, "pki") - - _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "root-ca.com", - "ttl": "600h", - }) - require.NoError(t, err, "error setting up pki role: %v", err) - - _, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "no_store": "false", // make sure we store this cert - "ttl": "5h", - "key_type": "ec", - }) - require.NoError(t, err, "error setting up pki role: %v", err) - - resp, err := client.Logical().Write("pki/issue/example", map[string]interface{}{ - "common_name": "test.example.com", - }) - require.NoError(t, err, "error issuing certificate: %v", err) - require.NotNil(t, resp, "got nil response from issuing request") - serialOfCert := resp.Data["serial_number"].(string) - - resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": serialOfCert, - }) - require.NoError(t, err, "error revoking certificate: %v", err) - require.NotNil(t, resp, "got nil response from revoke request") -} - -type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) - -func isPermDenied(err error) bool { - return err != nil && strings.Contains(err.Error(), "permission denied") -} - -func isUnsupportedPathOperation(err error) bool { - return err != nil && (strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation")) -} - -func isDeniedOp(err error) bool { - return isPermDenied(err) || isUnsupportedPathOperation(err) -} - -func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { - client.SetToken("") - resp, err := client.Logical().ReadWithContext(ctx, path) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().ListWithContext(ctx, path) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if err == nil || !isPermDenied(err) { - t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) - } -} - -func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { - // Should be able to read both with and without a token. - client.SetToken("") - resp, err := client.Logical().ReadWithContext(ctx, path) - if err != nil && isPermDenied(err) { - // Read will sometimes return permission denied, when the handler - // does not support the given operation. Retry with the token. - client.SetToken(token) - resp2, err2 := client.Logical().ReadWithContext(ctx, path) - if err2 != nil && !isUnsupportedPathOperation(err2) { - t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) - } - client.SetToken("") - } - resp, err = client.Logical().ListWithContext(ctx, path) - if err != nil && isPermDenied(err) { - // List will sometimes return permission denied, when the handler - // does not support the given operation. Retry with the token. - client.SetToken(token) - resp2, err2 := client.Logical().ListWithContext(ctx, path) - if err2 != nil && !isUnsupportedPathOperation(err2) { - t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) - } - client.SetToken("") - } - - // These should all be denied. - resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { - t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) - } - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) - } - - // Retrying with token should allow read/list, but not modification still. - client.SetToken(token) - resp, err = client.Logical().ReadWithContext(ctx, path) - if err != nil && isPermDenied(err) { - t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().ListWithContext(ctx, path) - if err != nil && isPermDenied(err) { - t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) - } - - // Should all be denied. - resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { - t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) - } - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if err == nil || !isDeniedOp(err) { - t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) - } -} - -func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { - client.SetToken("") - resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err != nil && isPermDenied(err) { - t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) - } - - // These should all be denied. However, on OSS, we might end up with - // a regular 404, which looks like err == resp == nil; hence we only - // fail when there's a non-nil response and/or a non-nil err. - resp, err = client.Logical().ReadWithContext(ctx, path) - if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { - t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().ListWithContext(ctx, path) - if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { - t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { - t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { - t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) - } - - // Retrying with token should allow writing, but nothing else. - client.SetToken(token) - resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) - if err != nil && isPermDenied(err) { - t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) - } - - // These should all be denied. - resp, err = client.Logical().ReadWithContext(ctx, path) - if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { - t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().ListWithContext(ctx, path) - if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { - if resp != nil || err != nil { - t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) - } - } - resp, err = client.Logical().DeleteWithContext(ctx, path) - if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { - t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) - } - resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) - if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { - t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) - } -} - -type pathAuthChecker int - -const ( - shouldBeAuthed pathAuthChecker = iota - shouldBeUnauthedReadList - shouldBeUnauthedWriteOnly -) - -var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ - shouldBeAuthed: pathShouldBeAuthed, - shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, - shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, -} - -func TestProperAuthing(t *testing.T) { - t.Parallel() - ctx := context.Background() - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - token := client.Token() - - // Mount PKI. - err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "60h", - }, - }) - if err != nil { - t.Fatal(err) - } - - // Setup basic configuration. - _, err = client.Logical().WriteWithContext(ctx, "pki/root/generate/internal", map[string]interface{}{ - "ttl": "40h", - "common_name": "myvault.com", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{ - "allow_localhost": true, - }) - if err != nil { - t.Fatal(err) - } - - resp, err := client.Logical().WriteWithContext(ctx, "pki/issue/test", map[string]interface{}{ - "common_name": "localhost", - }) - if err != nil || resp == nil { - t.Fatal(err) - } - serial := resp.Data["serial_number"].(string) - eabKid := "13b80844-e60d-42d2-b7e9-152a8e834b90" - paths := map[string]pathAuthChecker{ - "ca_chain": shouldBeUnauthedReadList, - "cert/ca_chain": shouldBeUnauthedReadList, - "ca": shouldBeUnauthedReadList, - "ca/pem": shouldBeUnauthedReadList, - "cert/" + serial: shouldBeUnauthedReadList, - "cert/" + serial + "/raw": shouldBeUnauthedReadList, - "cert/" + serial + "/raw/pem": shouldBeUnauthedReadList, - "cert/crl": shouldBeUnauthedReadList, - "cert/crl/raw": shouldBeUnauthedReadList, - "cert/crl/raw/pem": shouldBeUnauthedReadList, - "cert/delta-crl": shouldBeUnauthedReadList, - "cert/delta-crl/raw": shouldBeUnauthedReadList, - "cert/delta-crl/raw/pem": shouldBeUnauthedReadList, - "cert/unified-crl": shouldBeUnauthedReadList, - "cert/unified-crl/raw": shouldBeUnauthedReadList, - "cert/unified-crl/raw/pem": shouldBeUnauthedReadList, - "cert/unified-delta-crl": shouldBeUnauthedReadList, - "cert/unified-delta-crl/raw": shouldBeUnauthedReadList, - "cert/unified-delta-crl/raw/pem": shouldBeUnauthedReadList, - "certs/": shouldBeAuthed, - "certs/revoked/": shouldBeAuthed, - "certs/revocation-queue/": shouldBeAuthed, - "certs/unified-revoked/": shouldBeAuthed, - "config/acme": shouldBeAuthed, - "config/auto-tidy": shouldBeAuthed, - "config/ca": shouldBeAuthed, - "config/cluster": shouldBeAuthed, - "config/crl": shouldBeAuthed, - "config/issuers": shouldBeAuthed, - "config/keys": shouldBeAuthed, - "config/urls": shouldBeAuthed, - "crl": shouldBeUnauthedReadList, - "crl/pem": shouldBeUnauthedReadList, - "crl/delta": shouldBeUnauthedReadList, - "crl/delta/pem": shouldBeUnauthedReadList, - "crl/rotate": shouldBeAuthed, - "crl/rotate-delta": shouldBeAuthed, - "intermediate/cross-sign": shouldBeAuthed, - "intermediate/generate/exported": shouldBeAuthed, - "intermediate/generate/internal": shouldBeAuthed, - "intermediate/generate/existing": shouldBeAuthed, - "intermediate/generate/kms": shouldBeAuthed, - "intermediate/set-signed": shouldBeAuthed, - "issue/test": shouldBeAuthed, - "issuer/default": shouldBeAuthed, - "issuer/default/der": shouldBeUnauthedReadList, - "issuer/default/json": shouldBeUnauthedReadList, - "issuer/default/pem": shouldBeUnauthedReadList, - "issuer/default/crl": shouldBeUnauthedReadList, - "issuer/default/crl/pem": shouldBeUnauthedReadList, - "issuer/default/crl/der": shouldBeUnauthedReadList, - "issuer/default/crl/delta": shouldBeUnauthedReadList, - "issuer/default/crl/delta/der": shouldBeUnauthedReadList, - "issuer/default/crl/delta/pem": shouldBeUnauthedReadList, - "issuer/default/unified-crl": shouldBeUnauthedReadList, - "issuer/default/unified-crl/pem": shouldBeUnauthedReadList, - "issuer/default/unified-crl/der": shouldBeUnauthedReadList, - "issuer/default/unified-crl/delta": shouldBeUnauthedReadList, - "issuer/default/unified-crl/delta/der": shouldBeUnauthedReadList, - "issuer/default/unified-crl/delta/pem": shouldBeUnauthedReadList, - "issuer/default/issue/test": shouldBeAuthed, - "issuer/default/resign-crls": shouldBeAuthed, - "issuer/default/revoke": shouldBeAuthed, - "issuer/default/sign-intermediate": shouldBeAuthed, - "issuer/default/sign-revocation-list": shouldBeAuthed, - "issuer/default/sign-self-issued": shouldBeAuthed, - "issuer/default/sign-verbatim": shouldBeAuthed, - "issuer/default/sign-verbatim/test": shouldBeAuthed, - "issuer/default/sign/test": shouldBeAuthed, - "issuers/": shouldBeUnauthedReadList, - "issuers/generate/intermediate/exported": shouldBeAuthed, - "issuers/generate/intermediate/internal": shouldBeAuthed, - "issuers/generate/intermediate/existing": shouldBeAuthed, - "issuers/generate/intermediate/kms": shouldBeAuthed, - "issuers/generate/root/exported": shouldBeAuthed, - "issuers/generate/root/internal": shouldBeAuthed, - "issuers/generate/root/existing": shouldBeAuthed, - "issuers/generate/root/kms": shouldBeAuthed, - "issuers/import/cert": shouldBeAuthed, - "issuers/import/bundle": shouldBeAuthed, - "key/default": shouldBeAuthed, - "keys/": shouldBeAuthed, - "keys/generate/internal": shouldBeAuthed, - "keys/generate/exported": shouldBeAuthed, - "keys/generate/kms": shouldBeAuthed, - "keys/import": shouldBeAuthed, - "ocsp": shouldBeUnauthedWriteOnly, - "ocsp/dGVzdAo=": shouldBeUnauthedReadList, - "revoke": shouldBeAuthed, - "revoke-with-key": shouldBeAuthed, - "roles/test": shouldBeAuthed, - "roles/": shouldBeAuthed, - "root": shouldBeAuthed, - "root/generate/exported": shouldBeAuthed, - "root/generate/internal": shouldBeAuthed, - "root/generate/existing": shouldBeAuthed, - "root/generate/kms": shouldBeAuthed, - "root/replace": shouldBeAuthed, - "root/rotate/internal": shouldBeAuthed, - "root/rotate/exported": shouldBeAuthed, - "root/rotate/existing": shouldBeAuthed, - "root/rotate/kms": shouldBeAuthed, - "root/sign-intermediate": shouldBeAuthed, - "root/sign-self-issued": shouldBeAuthed, - "sign-verbatim": shouldBeAuthed, - "sign-verbatim/test": shouldBeAuthed, - "sign/test": shouldBeAuthed, - "tidy": shouldBeAuthed, - "tidy-cancel": shouldBeAuthed, - "tidy-status": shouldBeAuthed, - "unified-crl": shouldBeUnauthedReadList, - "unified-crl/pem": shouldBeUnauthedReadList, - "unified-crl/delta": shouldBeUnauthedReadList, - "unified-crl/delta/pem": shouldBeUnauthedReadList, - "unified-ocsp": shouldBeUnauthedWriteOnly, - "unified-ocsp/dGVzdAo=": shouldBeUnauthedReadList, - "eab/": shouldBeAuthed, - "eab/" + eabKid: shouldBeAuthed, - } - - entPaths := getEntProperAuthingPaths(serial) - maps.Copy(paths, entPaths) - - // Add ACME based paths to the test suite - ossAcmePrefixes := []string{"acme/", "issuer/default/acme/", "roles/test/acme/", "issuer/default/roles/test/acme/"} - entAcmePrefixes := getEntAcmePrefixes() - for _, acmePrefix := range append(ossAcmePrefixes, entAcmePrefixes...) { - paths[acmePrefix+"directory"] = shouldBeUnauthedReadList - paths[acmePrefix+"new-nonce"] = shouldBeUnauthedReadList - paths[acmePrefix+"new-account"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"revoke-cert"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"new-order"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"orders"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"account/hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo="] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"authorization/29da8c38-7a09-465e-b9a6-3d76802b1afd"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"challenge/29da8c38-7a09-465e-b9a6-3d76802b1afd/http-01"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90/finalize"] = shouldBeUnauthedWriteOnly - paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90/cert"] = shouldBeUnauthedWriteOnly - - // Make sure this new-eab path is auth'd - paths[acmePrefix+"new-eab"] = shouldBeAuthed - } - - for path, checkerType := range paths { - checker := pathAuthChckerMap[checkerType] - checker(t, client, "pki/"+path, token) - } - - client.SetToken(token) - openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") - if err != nil { - t.Fatalf("failed to get openapi data: %v", err) - } - - validatedPath := false - for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { - if !strings.HasPrefix(openapi_path, "/pki/") { - t.Logf("Skipping path: %v", openapi_path) - continue - } - - t.Logf("Validating path: %v", openapi_path) - validatedPath = true - // Substitute values in from our testing map. - raw_path := openapi_path[5:] - if strings.Contains(raw_path, "roles/") && strings.Contains(raw_path, "{name}") { - raw_path = strings.ReplaceAll(raw_path, "{name}", "test") - } - if strings.Contains(raw_path, "{role}") { - raw_path = strings.ReplaceAll(raw_path, "{role}", "test") - } - if strings.Contains(raw_path, "ocsp/") && strings.Contains(raw_path, "{req}") { - raw_path = strings.ReplaceAll(raw_path, "{req}", "dGVzdAo=") - } - if strings.Contains(raw_path, "{issuer_ref}") { - raw_path = strings.ReplaceAll(raw_path, "{issuer_ref}", "default") - } - if strings.Contains(raw_path, "{key_ref}") { - raw_path = strings.ReplaceAll(raw_path, "{key_ref}", "default") - } - if strings.Contains(raw_path, "{exported}") { - raw_path = strings.ReplaceAll(raw_path, "{exported}", "internal") - } - if strings.Contains(raw_path, "{serial}") { - raw_path = strings.ReplaceAll(raw_path, "{serial}", serial) - } - if strings.Contains(raw_path, "acme/account/") && strings.Contains(raw_path, "{kid}") { - raw_path = strings.ReplaceAll(raw_path, "{kid}", "hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo=") - } - if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{auth_id}") { - raw_path = strings.ReplaceAll(raw_path, "{auth_id}", "29da8c38-7a09-465e-b9a6-3d76802b1afd") - } - if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{challenge_type}") { - raw_path = strings.ReplaceAll(raw_path, "{challenge_type}", "http-01") - } - if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{order_id}") { - raw_path = strings.ReplaceAll(raw_path, "{order_id}", "13b80844-e60d-42d2-b7e9-152a8e834b90") - } - if strings.Contains(raw_path, "eab") && strings.Contains(raw_path, "{key_id}") { - raw_path = strings.ReplaceAll(raw_path, "{key_id}", eabKid) - } - if strings.Contains(raw_path, "external-policy/") && strings.Contains(raw_path, "{policy}") { - raw_path = strings.ReplaceAll(raw_path, "{policy}", "a-policy") - } - - raw_path = entProperAuthingPathReplacer(raw_path) - - handler, present := paths[raw_path] - if !present { - t.Fatalf("OpenAPI reports PKI mount contains %v -> %v but was not tested to be authed or not authed.", - openapi_path, raw_path) - } - - openapi_data := raw_data.(map[string]interface{}) - hasList := false - rawGetData, hasGet := openapi_data["get"] - if hasGet { - getData := rawGetData.(map[string]interface{}) - getParams, paramsPresent := getData["parameters"].(map[string]interface{}) - if getParams != nil && paramsPresent { - if _, hasList = getParams["list"]; hasList { - // LIST is exclusive from GET on the same endpoint usually. - hasGet = false - } - } - } - _, hasPost := openapi_data["post"] - _, hasDelete := openapi_data["delete"] - - if handler == shouldBeUnauthedReadList { - if hasPost || hasDelete { - t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities: %v->%v", openapi_path, raw_path) - } - } else if handler == shouldBeUnauthedWriteOnly { - if hasGet || hasList { - t.Fatalf("Unauthed write-only endpoints should not have GET/LIST capabilities: %v->%v", openapi_path, raw_path) - } - } - } - - if !validatedPath { - t.Fatalf("Expected to have validated at least one path.") - } -} - -func TestPatchIssuer(t *testing.T) { - t.Parallel() - - type TestCase struct { - Field string - Before interface{} - Patched interface{} - } - testCases := []TestCase{ - { - Field: "issuer_name", - Before: "root", - Patched: "root-new", - }, - { - Field: "leaf_not_after_behavior", - Before: "err", - Patched: "permit", - }, - { - Field: "usage", - Before: "crl-signing,issuing-certificates,ocsp-signing,read-only", - Patched: "issuing-certificates,read-only", - }, - { - Field: "revocation_signature_algorithm", - Before: "ECDSAWithSHA256", - Patched: "ECDSAWithSHA384", - }, - { - Field: "issuing_certificates", - Before: []string{"http://localhost/v1/pki-1/ca"}, - Patched: []string{"http://localhost/v1/pki/ca"}, - }, - { - Field: "crl_distribution_points", - Before: []string{"http://localhost/v1/pki-1/crl"}, - Patched: []string{"http://localhost/v1/pki/crl"}, - }, - { - Field: "ocsp_servers", - Before: []string{"http://localhost/v1/pki-1/ocsp"}, - Patched: []string{"http://localhost/v1/pki/ocsp"}, - }, - { - Field: "enable_aia_url_templating", - Before: false, - Patched: true, - }, - { - Field: "manual_chain", - Before: []string(nil), - Patched: []string{"self"}, - }, - } - - for index, testCase := range testCases { - t.Logf("index: %v / tc: %v", index, testCase) - - b, s := CreateBackendWithStorage(t) - - // 1. Setup root issuer. - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "common_name": "Vault Root CA", - "key_type": "ec", - "ttl": "7200h", - "issuer_name": "root", - }) - requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") - id := string(resp.Data["issuer_id"].(issuerID)) - - // 2. Enable Cluster paths - resp, err = CBWrite(b, s, "config/urls", map[string]interface{}{ - "path": "https://localhost/v1/pki", - "aia_path": "http://localhost/v1/pki", - }) - requireSuccessNonNilResponse(t, resp, err, "failed updating AIA config") - - // 3. Add AIA information - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - "issuing_certificates": "http://localhost/v1/pki-1/ca", - "crl_distribution_points": "http://localhost/v1/pki-1/crl", - "ocsp_servers": "http://localhost/v1/pki-1/ocsp", - }) - requireSuccessNonNilResponse(t, resp, err, "failed setting up issuer") - - // 4. Read the issuer before. - resp, err = CBRead(b, s, "issuer/default") - requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer before") - require.Equal(t, testCase.Before, resp.Data[testCase.Field], "bad expectations") - - // 5. Perform modification. - resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ - testCase.Field: testCase.Patched, - }) - requireSuccessNonNilResponse(t, resp, err, "failed patching root issuer") - - if testCase.Field != "manual_chain" { - require.Equal(t, testCase.Patched, resp.Data[testCase.Field], "failed persisting value") - } else { - // self->id - require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") - } - - // 6. Ensure it stuck - resp, err = CBRead(b, s, "issuer/default") - requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer after") - - if testCase.Field != "manual_chain" { - require.Equal(t, testCase.Patched, resp.Data[testCase.Field]) - } else { - // self->id - require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") - } - } -} - -func TestGenerateRootCAWithAIA(t *testing.T) { - // Generate a root CA at /pki-root - b_root, s_root := CreateBackendWithStorage(t) - - // Setup templated AIA information - _, err := CBWrite(b_root, s_root, "config/cluster", map[string]interface{}{ - "path": "https://localhost:8200", - "aia_path": "https://localhost:8200", - }) - require.NoError(t, err, "failed to write AIA settings") - - _, err = CBWrite(b_root, s_root, "config/urls", map[string]interface{}{ - "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", - "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", - "ocsp_servers": "{{cluster_path}}/ocsp", - "enable_templating": true, - }) - require.NoError(t, err, "failed to write AIA settings") - - // Write a root issuer, this should succeed. - resp, err := CBWrite(b_root, s_root, "root/generate/exported", map[string]interface{}{ - "common_name": "root myvault.com", - "key_type": "ec", - }) - requireSuccessNonNilResponse(t, resp, err, "expected root generation to succeed") -} - -var ( - initTest sync.Once - rsaCAKey string - rsaCACert string - ecCAKey string - ecCACert string - edCAKey string - edCACert string -) +```release-note:bug +ui: When Kv v2 secret is an object, fix so details view defaults to readOnly JSON editor. +``` \ No newline at end of file diff --git a/builtin/logical/pki/fields.go b/builtin/logical/pki/fields.go index e637ac27fa90..784e2e38f4c7 100644 --- a/builtin/logical/pki/fields.go +++ b/builtin/logical/pki/fields.go @@ -1,645 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "time" - - "github.com/hashicorp/vault/sdk/framework" -) - -const ( - issuerRefParam = "issuer_ref" - keyNameParam = "key_name" - keyRefParam = "key_ref" - keyIdParam = "key_id" - keyTypeParam = "key_type" - keyBitsParam = "key_bits" - skidParam = "subject_key_id" -) - -// addIssueAndSignCommonFields adds fields common to both CA and non-CA issuing -// and signing -func addIssueAndSignCommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["exclude_cn_from_sans"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `If true, the Common Name will not be -included in DNS or Email Subject Alternate Names. -Defaults to false (CN is included).`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Exclude Common Name from Subject Alternative Names (SANs)", - }, - } - - fields["format"] = &framework.FieldSchema{ - Type: framework.TypeString, - Default: "pem", - Description: `Format for returned data. Can be "pem", "der", -or "pem_bundle". If "pem_bundle", any private -key and issuing cert will be appended to the -certificate pem. If "der", the value will be -base64 encoded. Defaults to "pem".`, - AllowedValues: []interface{}{"pem", "der", "pem_bundle"}, - DisplayAttrs: &framework.DisplayAttributes{ - Value: "pem", - }, - } - - fields["private_key_format"] = &framework.FieldSchema{ - Type: framework.TypeString, - Default: "der", - Description: `Format for the returned private key. -Generally the default will be controlled by the "format" -parameter as either base64-encoded DER or PEM-encoded DER. -However, this can be set to "pkcs8" to have the returned -private key contain base64-encoded pkcs8 or PEM-encoded -pkcs8 instead. Defaults to "der".`, - AllowedValues: []interface{}{"", "der", "pem", "pkcs8"}, - DisplayAttrs: &framework.DisplayAttributes{ - Value: "der", - }, - } - - fields["ip_sans"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `The requested IP SANs, if any, in a -comma-delimited list`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "IP Subject Alternative Names (SANs)", - }, - } - - fields["uri_sans"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `The requested URI SANs, if any, in a -comma-delimited list.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "URI Subject Alternative Names (SANs)", - }, - } - - fields["other_sans"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Requested other SANs, in an array with the format -;UTF8: for each entry.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Other SANs", - }, - } - - return fields -} - -// addNonCACommonFields adds fields with help text specific to non-CA -// certificate issuing and signing -func addNonCACommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields = addIssueAndSignCommonFields(fields) - - fields["role"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The desired role with configuration for this -request`, - } - - fields["common_name"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The requested common name; if you want more than -one, specify the alternative names in the -alt_names map. If email protection is enabled -in the role, this may be an email address.`, - } - - fields["alt_names"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The requested Subject Alternative Names, if any, -in a comma-delimited list. If email protection -is enabled for the role, this may contain -email addresses.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "DNS/Email Subject Alternative Names (SANs)", - }, - } - - fields["serial_number"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The Subject's requested serial number, if any. -See RFC 4519 Section 2.31 'serialNumber' for a description of this field. -If you want more than one, specify alternative names in the alt_names -map using OID 2.5.4.5. This has no impact on the final certificate's -Serial Number field.`, - } - - fields["ttl"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The requested Time To Live for the certificate; -sets the expiration date. If not specified -the role default, backend default, or system -default TTL is used, in that order. Cannot -be larger than the role max TTL.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "TTL", - }, - } - - fields["not_after"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Set the not after field of the certificate with specified date value. -The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ`, - } - - fields["remove_roots_from_chain"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to remove self-signed CA certificates in the output -of the ca_chain field.`, - } - - fields["user_ids"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `The requested user_ids value to place in the subject, -if any, in a comma-delimited list. Restricted by allowed_user_ids. -Any values are added with OID 0.9.2342.19200300.100.1.1.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "User ID(s)", - }, - } - - fields = addIssuerRefField(fields) - - return fields -} - -// addCACommonFields adds fields with help text specific to CA -// certificate issuing and signing -func addCACommonFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields = addIssueAndSignCommonFields(fields) - - fields["alt_names"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The requested Subject Alternative Names, if any, -in a comma-delimited list. May contain both -DNS names and email addresses.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "DNS/Email Subject Alternative Names (SANs)", - }, - } - - fields["common_name"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The requested common name; if you want more than -one, specify the alternative names in the alt_names -map. If not specified when signing, the common -name will be taken from the CSR; other names -must still be specified in alt_names or ip_sans.`, - } - - fields["ttl"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The requested Time To Live for the certificate; -sets the expiration date. If not specified -the role default, backend default, or system -default TTL is used, in that order. Cannot -be larger than the mount max TTL. Note: -this only has an effect when generating -a CA cert or signing a CA cert, not when -generating a CSR for an intermediate CA.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "TTL", - }, - } - - fields["ou"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `If set, OU (OrganizationalUnit) will be set to -this value.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "OU (Organizational Unit)", - }, - } - - fields["organization"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `If set, O (Organization) will be set to -this value.`, - } - - fields["country"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `If set, Country will be set to -this value.`, - } - - fields["locality"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `If set, Locality will be set to -this value.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Locality/City", - }, - } - - fields["province"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `If set, Province will be set to -this value.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Province/State", - }, - } - - fields["street_address"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `If set, Street Address will be set to -this value.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Street Address", - }, - } - - fields["postal_code"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `If set, Postal Code will be set to -this value.`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Postal Code", - }, - } - - fields["serial_number"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The Subject's requested serial number, if any. -See RFC 4519 Section 2.31 'serialNumber' for a description of this field. -If you want more than one, specify alternative names in the alt_names -map using OID 2.5.4.5. This has no impact on the final certificate's -Serial Number field.`, - } - - fields["not_after"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Set the not after field of the certificate with specified date value. -The value format should be given in UTC format YYYY-MM-ddTHH:MM:SSZ`, - } - - fields["not_before_duration"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Default: 30, - Description: `The duration before now which the certificate needs to be backdated by.`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 30, - }, - } - - return fields -} - -// addCAKeyGenerationFields adds fields with help text specific to CA key -// generation and exporting -func addCAKeyGenerationFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["exported"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Must be "internal", "exported" or "kms". If set to -"exported", the generated private key will be -returned. This is your *only* chance to retrieve -the private key!`, - AllowedValues: []interface{}{"internal", "external", "kms"}, - } - - fields["managed_key_name"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The name of the managed key to use when the exported -type is kms. When kms type is the key type, this field or managed_key_id -is required. Ignored for other types.`, - } - - fields["managed_key_id"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The name of the managed key to use when the exported -type is kms. When kms type is the key type, this field or managed_key_name -is required. Ignored for other types.`, - } - - fields["key_bits"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use. Allowed values are -0 (universal default); with rsa key_type: 2048 (default), 3072, or -4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with -ed25519.`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 0, - }, - } - - fields["signature_bits"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use in the signature -algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for -SHA-2-512. Defaults to 0 to automatically detect based on key length -(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 0, - }, - } - - fields["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - } - - fields["key_type"] = &framework.FieldSchema{ - Type: framework.TypeString, - Default: "rsa", - Description: `The type of key to use; defaults to RSA. "rsa" -"ec" and "ed25519" are the only valid values.`, - AllowedValues: []interface{}{"rsa", "ec", "ed25519"}, - DisplayAttrs: &framework.DisplayAttributes{ - Value: "rsa", - }, - } - - fields = addKeyRefNameFields(fields) - - return fields -} - -// addCAIssueFields adds fields common to CA issuing, e.g. when returning -// an actual certificate -func addCAIssueFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["max_path_length"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: -1, - Description: "The maximum allowable path length", - } - - fields["permitted_dns_domains"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `Domains for which this certificate is allowed to sign or issue child certificates. If set, all DNS names (subject and alt) on child certs must be exact matches or subsets of the given domains (see https://tools.ietf.org/html/rfc5280#section-4.2.1.10).`, - DisplayAttrs: &framework.DisplayAttributes{ - Name: "Permitted DNS Domains", - }, - } - - fields = addIssuerNameField(fields) - - return fields -} - -func addIssuerRefNameFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields = addIssuerNameField(fields) - fields = addIssuerRefField(fields) - return fields -} - -func addIssuerNameField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["issuer_name"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Provide a name to the generated or existing issuer, the name -must be unique across all issuers and not be the reserved value 'default'`, - } - return fields -} - -func addIssuerRefField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields[issuerRefParam] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Reference to a existing issuer; either "default" -for the configured default issuer, an identifier or the name assigned -to the issuer.`, - Default: defaultRef, - } - return fields -} - -func addKeyRefNameFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields = addKeyNameField(fields) - fields = addKeyRefField(fields) - return fields -} - -func addKeyNameField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields[keyNameParam] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Provide a name to the generated or existing key, the name -must be unique across all keys and not be the reserved value 'default'`, - } - - return fields -} - -func addKeyRefField(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields[keyRefParam] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `Reference to a existing key; either "default" -for the configured default key, an identifier or the name assigned -to the key.`, - Default: defaultRef, - } - return fields -} - -func addTidyFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["tidy_cert_store"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to enable tidying up -the certificate store`, - } - - fields["tidy_revocation_list"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Deprecated; synonym for 'tidy_revoked_certs`, - } - - fields["tidy_revoked_certs"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to expire all revoked -and expired certificates, removing them both from the CRL and from storage. The -CRL will be rotated if this causes any values to be removed.`, - } - - fields["tidy_revoked_cert_issuer_associations"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to validate issuer associations -on revocation entries. This helps increase the performance of CRL building -and OCSP responses.`, - } - - fields["tidy_expired_issuers"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to automatically remove expired issuers -past the issuer_safety_buffer. No keys will be removed as part of this -operation.`, - } - - fields["tidy_move_legacy_ca_bundle"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to move the legacy ca_bundle from -/config/ca_bundle to /config/ca_bundle.bak. This prevents downgrades -to pre-Vault 1.11 versions (as older PKI engines do not know about -the new multi-issuer storage layout), but improves the performance -on seal wrapped PKI mounts. This will only occur if at least -issuer_safety_buffer time has occurred after the initial storage -migration. - -This backup is saved in case of an issue in future migrations. -Operators may consider removing it via sys/raw if they desire. -The backup will be removed via a DELETE /root call, but note that -this removes ALL issuers within the mount (and is thus not desirable -in most operational scenarios).`, - } - - fields["tidy_acme"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to enable tidying ACME accounts, -orders and authorizations. ACME orders are tidied (deleted) -safety_buffer after the certificate associated with them expires, -or after the order and relevant authorizations have expired if no -certificate was produced. Authorizations are tidied with the -corresponding order. - -When a valid ACME Account is at least acme_account_safety_buffer -old, and has no remaining orders associated with it, the account is -marked as revoked. After another acme_account_safety_buffer has -passed from the revocation or deactivation date, a revoked or -deactivated ACME account is deleted.`, - Default: false, - } - - fields["safety_buffer"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The amount of extra time that must have passed -beyond certificate expiration before it is removed -from the backend storage and/or revocation list. -Defaults to 72 hours.`, - Default: int(defaultTidyConfig.SafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int - } - - fields["issuer_safety_buffer"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The amount of extra time that must have passed -beyond issuer's expiration before it is removed -from the backend storage. -Defaults to 8760 hours (1 year).`, - Default: int(defaultTidyConfig.IssuerSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int - } - - fields["acme_account_safety_buffer"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The amount of time that must pass after creation -that an account with no orders is marked revoked, and the amount of time -after being marked revoked or deactivated.`, - Default: int(defaultTidyConfig.AcmeAccountSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int - } - - fields["pause_duration"] = &framework.FieldSchema{ - Type: framework.TypeString, - Description: `The amount of time to wait between processing -certificates. This allows operators to change the execution profile -of tidy to take consume less resources by slowing down how long it -takes to run. Note that the entire list of certificates will be -stored in memory during the entire tidy operation, but resources to -read/process/update existing entries will be spread out over a -greater period of time. By default this is zero seconds.`, - Default: "0s", - } - - fields["tidy_revocation_queue"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to remove stale revocation queue entries -that haven't been confirmed by any active cluster. Only runs on the -active primary node`, - Default: defaultTidyConfig.RevocationQueue, - } - - fields["revocation_queue_safety_buffer"] = &framework.FieldSchema{ - Type: framework.TypeDurationSecond, - Description: `The amount of time that must pass from the -cross-cluster revocation request being initiated to when it will be -slated for removal. Setting this too low may remove valid revocation -requests before the owning cluster has a chance to process them, -especially if the cluster is offline.`, - Default: int(defaultTidyConfig.QueueSafetyBuffer / time.Second), // TypeDurationSecond currently requires defaults to be int - } - - fields["tidy_cross_cluster_revoked_certs"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Description: `Set to true to enable tidying up -the cross-cluster revoked certificate store. Only runs on the active -primary node.`, - } - - return fields -} - -// generate the entire list of schema fields we need for CSR sign verbatim, this is also -// leveraged by ACME internally. -func getCsrSignVerbatimSchemaFields() map[string]*framework.FieldSchema { - fields := map[string]*framework.FieldSchema{} - fields = addNonCACommonFields(fields) - fields = addSignVerbatimRoleFields(fields) - - fields["csr"] = &framework.FieldSchema{ - Type: framework.TypeString, - Default: "", - Description: `PEM-format CSR to be signed. Values will be -taken verbatim from the CSR, except for -basic constraints.`, - } - - return fields -} - -// addSignVerbatimRoleFields provides the fields and defaults to be used by anything that is building up the fields -// and their corresponding default values when generating/using a sign-verbatim type role such as buildSignVerbatimRole. -func addSignVerbatimRoleFields(fields map[string]*framework.FieldSchema) map[string]*framework.FieldSchema { - fields["key_usage"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: []string{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, - Description: `A comma-separated string or list of key usages (not extended -key usages). Valid values can be found at -https://golang.org/pkg/crypto/x509/#KeyUsage --- simply drop the "KeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list.`, - } - - fields["ext_key_usage"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Default: []string{}, - Description: `A comma-separated string or list of extended key usages. Valid values can be found at -https://golang.org/pkg/crypto/x509/#ExtKeyUsage --- simply drop the "ExtKeyUsage" part of the name. -To remove all key usages from being set, set -this value to an empty list.`, - } - - fields["ext_key_usage_oids"] = &framework.FieldSchema{ - Type: framework.TypeCommaStringSlice, - Description: `A comma-separated string or list of extended key usage oids.`, - } - - fields["signature_bits"] = &framework.FieldSchema{ - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use in the signature -algorithm; accepts 256 for SHA-2-256, 384 for SHA-2-384, and 512 for -SHA-2-512. Defaults to 0 to automatically detect based on key length -(SHA-2-256 for RSA keys, and matching the curve size for NIST P-Curves).`, - DisplayAttrs: &framework.DisplayAttributes{ - Value: 0, - }, - } - - fields["use_pss"] = &framework.FieldSchema{ - Type: framework.TypeBool, - Default: false, - Description: `Whether or not to use PSS signatures when using a -RSA key-type issuer. Defaults to false.`, - } - - return fields -} +```release-note:bug +ui: Fix payload sent when disabling replication +``` diff --git a/builtin/logical/pki/path_manage_keys.go b/builtin/logical/pki/path_manage_keys.go index 63e8ac668a58..d1433cfcd1f9 100644 --- a/builtin/logical/pki/path_manage_keys.go +++ b/builtin/logical/pki/path_manage_keys.go @@ -1,320 +1,2 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "bytes" - "context" - "encoding/pem" - "net/http" - "strings" - - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" -) - -func pathGenerateKey(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "keys/generate/(internal|exported|kms)", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "generate", - OperationSuffix: "internal-key|exported-key|kms-key", - }, - - Fields: map[string]*framework.FieldSchema{ - keyNameParam: { - Type: framework.TypeString, - Description: "Optional name to be used for this key", - }, - keyTypeParam: { - Type: framework.TypeString, - Default: "rsa", - Description: `The type of key to use; defaults to RSA. "rsa" -"ec" and "ed25519" are the only valid values.`, - AllowedValues: []interface{}{"rsa", "ec", "ed25519"}, - DisplayAttrs: &framework.DisplayAttributes{ - Value: "rsa", - }, - }, - keyBitsParam: { - Type: framework.TypeInt, - Default: 0, - Description: `The number of bits to use. Allowed values are -0 (universal default); with rsa key_type: 2048 (default), 3072, or -4096; with ec key_type: 224, 256 (default), 384, or 521; ignored with -ed25519.`, - }, - "managed_key_name": { - Type: framework.TypeString, - Description: `The name of the managed key to use when the exported -type is kms. When kms type is the key type, this field or managed_key_id -is required. Ignored for other types.`, - }, - "managed_key_id": { - Type: framework.TypeString, - Description: `The name of the managed key to use when the exported -type is kms. When kms type is the key type, this field or managed_key_name -is required. Ignored for other types.`, - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathGenerateKeyHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "key_id": { - Type: framework.TypeString, - Description: `ID assigned to this key.`, - Required: true, - }, - "key_name": { - Type: framework.TypeString, - Description: `Name assigned to this key.`, - Required: true, - }, - "key_type": { - Type: framework.TypeString, - Description: `The type of key to use; defaults to RSA. "rsa" - "ec" and "ed25519" are the only valid values.`, - Required: true, - }, - "private_key": { - Type: framework.TypeString, - Description: `The private key string`, - Required: false, - }, - }, - }}, - }, - - ForwardPerformanceStandby: true, - ForwardPerformanceSecondary: true, - }, - }, - - HelpSynopsis: pathGenerateKeyHelpSyn, - HelpDescription: pathGenerateKeyHelpDesc, - } -} - -const ( - pathGenerateKeyHelpSyn = `Generate a new private key used for signing.` - pathGenerateKeyHelpDesc = `This endpoint will generate a new key pair of the specified type (internal, exported, or kms).` -) - -func (b *backend) pathGenerateKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - // Since we're planning on updating issuers here, grab the lock so we've - // got a consistent view. - b.issuersLock.Lock() - defer b.issuersLock.Unlock() - - if b.useLegacyBundleCaStorage() { - return logical.ErrorResponse("Can not generate keys until migration has completed"), nil - } - - sc := b.makeStorageContext(ctx, req.Storage) - keyName, err := getKeyName(sc, data) - if err != nil { // Fail Immediately if Key Name is in Use, etc... - return logical.ErrorResponse(err.Error()), nil - } - - exportPrivateKey := false - var keyBundle certutil.KeyBundle - var actualPrivateKeyType certutil.PrivateKeyType - switch { - case strings.HasSuffix(req.Path, "/exported"): - exportPrivateKey = true - fallthrough - case strings.HasSuffix(req.Path, "/internal"): - keyType := data.Get(keyTypeParam).(string) - keyBits := data.Get(keyBitsParam).(int) - - keyBits, _, err := certutil.ValidateDefaultOrValueKeyTypeSignatureLength(keyType, keyBits, 0) - if err != nil { - return logical.ErrorResponse("Validation for key_type, key_bits failed: %s", err.Error()), nil - } - - // Internal key generation, stored in storage - keyBundle, err = certutil.CreateKeyBundle(keyType, keyBits, b.GetRandomReader()) - if err != nil { - return nil, err - } - - actualPrivateKeyType = keyBundle.PrivateKeyType - case strings.HasSuffix(req.Path, "/kms"): - keyId, err := getManagedKeyId(data) - if err != nil { - return nil, err - } - - keyBundle, actualPrivateKeyType, err = createKmsKeyBundle(ctx, b, keyId) - if err != nil { - return nil, err - } - default: - return logical.ErrorResponse("Unknown type of key to generate"), nil - } - - privateKeyPemString, err := keyBundle.ToPrivateKeyPemString() - if err != nil { - return nil, err - } - - key, _, err := sc.importKey(privateKeyPemString, keyName, keyBundle.PrivateKeyType) - if err != nil { - return nil, err - } - responseData := map[string]interface{}{ - keyIdParam: key.ID, - keyNameParam: key.Name, - keyTypeParam: string(actualPrivateKeyType), - } - if exportPrivateKey { - responseData["private_key"] = privateKeyPemString - } - return &logical.Response{ - Data: responseData, - }, nil -} - -func pathImportKey(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "keys/import", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "import", - OperationSuffix: "key", - }, - - Fields: map[string]*framework.FieldSchema{ - keyNameParam: { - Type: framework.TypeString, - Description: "Optional name to be used for this key", - }, - "pem_bundle": { - Type: framework.TypeString, - Description: `PEM-format, unencrypted secret key`, - }, - }, - - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathImportKeyHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "key_id": { - Type: framework.TypeString, - Description: `ID assigned to this key.`, - Required: true, - }, - "key_name": { - Type: framework.TypeString, - Description: `Name assigned to this key.`, - Required: true, - }, - "key_type": { - Type: framework.TypeString, - Description: `The type of key to use; defaults to RSA. "rsa" - "ec" and "ed25519" are the only valid values.`, - Required: true, - }, - }, - }}, - }, - ForwardPerformanceStandby: true, - ForwardPerformanceSecondary: true, - }, - }, - - HelpSynopsis: pathImportKeyHelpSyn, - HelpDescription: pathImportKeyHelpDesc, - } -} - -const ( - pathImportKeyHelpSyn = `Import the specified key.` - pathImportKeyHelpDesc = `This endpoint allows importing a specified issuer key from a pem bundle. -If key_name is set, that will be set on the key, assuming the key did not exist previously.` -) - -func (b *backend) pathImportKeyHandler(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { - // Since we're planning on updating issuers here, grab the lock so we've - // got a consistent view. - b.issuersLock.Lock() - defer b.issuersLock.Unlock() - - if b.useLegacyBundleCaStorage() { - return logical.ErrorResponse("Cannot import keys until migration has completed"), nil - } - - sc := b.makeStorageContext(ctx, req.Storage) - pemBundle := data.Get("pem_bundle").(string) - keyName, err := getKeyName(sc, data) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - if len(pemBundle) < 64 { - // It is almost nearly impossible to store a complete key in - // less than 64 bytes. It is definitely impossible to do so when PEM - // encoding has been applied. Detect this and give a better warning - // than "provided PEM block contained no data" in this case. This is - // because the PEM headers contain 5*4 + 6 + 4 + 2 + 2 = 34 characters - // minimum (five dashes, "BEGIN" + space + at least one character - // identifier, "END" + space + at least one character identifier, and - // a pair of new lines). That would leave 30 bytes for Base64 data, - // meaning at most a 22-byte DER key. Even with a 128-bit key, 6 bytes - // is not sufficient for the required ASN.1 structure and OID encoding. - // - // However, < 64 bytes is probably a good length for a file path so - // suggest that is the case. - return logical.ErrorResponse("provided data for import was too short; perhaps a path was passed to the API rather than the contents of a PEM file"), nil - } - - pemBytes := []byte(pemBundle) - var pemBlock *pem.Block - - var keys []string - for len(bytes.TrimSpace(pemBytes)) > 0 { - pemBlock, pemBytes = pem.Decode(pemBytes) - if pemBlock == nil { - return logical.ErrorResponse("provided PEM block contained no data"), nil - } - - pemBlockString := string(pem.EncodeToMemory(pemBlock)) - keys = append(keys, pemBlockString) - } - - if len(keys) != 1 { - return logical.ErrorResponse("only a single key can be present within the pem_bundle for importing"), nil - } - - key, existed, err := importKeyFromBytes(sc, keys[0], keyName) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - resp := logical.Response{ - Data: map[string]interface{}{ - keyIdParam: key.ID, - keyNameParam: key.Name, - keyTypeParam: key.PrivateKeyType, - }, - } - - if existed { - resp.AddWarning("Key already imported, use key/ endpoint to update name.") - } - - return &resp, nil -} +```release-note:change +logging: Vault server, Agent and Proxy now honor log file value and only add a timestamp on rotation. \ No newline at end of file diff --git a/builtin/logical/pki/path_manage_keys_test.go b/builtin/logical/pki/path_manage_keys_test.go index 68d31ef862f2..1b295b985687 100644 --- a/builtin/logical/pki/path_manage_keys_test.go +++ b/builtin/logical/pki/path_manage_keys_test.go @@ -1,441 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "context" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/pem" - "fmt" - "testing" - - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - - "github.com/hashicorp/vault/sdk/helper/certutil" - - "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" -) - -func TestPKI_PathManageKeys_GenerateInternalKeys(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - tests := []struct { - name string - keyType string - keyBits []int - wantLogicalErr bool - }{ - {"all-defaults", "", []int{0}, false}, - {"rsa", "rsa", []int{0, 2048, 3072, 4096}, false}, - {"ec", "ec", []int{0, 224, 256, 384, 521}, false}, - {"ed25519", "ed25519", []int{0}, false}, - {"error-rsa", "rsa", []int{-1, 343444}, true}, - {"error-ec", "ec", []int{-1, 3434324}, true}, - {"error-bad-type", "dskjfkdsfjdkf", []int{0}, true}, - } - for _, tt := range tests { - tt := tt - for _, keyBitParam := range tt.keyBits { - keyName := fmt.Sprintf("%s-%d", tt.name, keyBitParam) - t.Run(keyName, func(t *testing.T) { - data := make(map[string]interface{}) - if tt.keyType != "" { - data["key_type"] = tt.keyType - } - if keyBitParam != 0 { - data["key_bits"] = keyBitParam - } - keyName = genUuid() + "-" + tt.keyType + "-key-name" - data["key_name"] = keyName - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/generate/internal", - Storage: s, - Data: data, - MountPoint: "pki/", - }) - require.NoError(t, err, - "Failed generating key with values key_type:%s key_bits:%d key_name:%s", tt.keyType, keyBitParam, keyName) - require.NotNil(t, resp, - "Got nil response generating key with values key_type:%s key_bits:%d key_name:%s", tt.keyType, keyBitParam, keyName) - if tt.wantLogicalErr { - require.True(t, resp.IsError(), "expected logical error but the request passed:\n%#v", resp) - } else { - require.False(t, resp.IsError(), - "Got logical error response when not expecting one, "+ - "generating key with values key_type:%s key_bits:%d key_name:%s\n%s", tt.keyType, keyBitParam, keyName, resp.Error()) - - // Special case our all-defaults - if tt.keyType == "" { - tt.keyType = "rsa" - } - - require.Equal(t, tt.keyType, resp.Data["key_type"], "key_type field contained an invalid type") - require.NotEmpty(t, resp.Data["key_id"], "returned an empty key_id field, should never happen") - require.Equal(t, keyName, resp.Data["key_name"], "key name was not processed correctly") - require.Nil(t, resp.Data["private_key"], "private_key field should not appear in internal generation type.") - } - }) - } - } -} - -func TestPKI_PathManageKeys_GenerateExportedKeys(t *testing.T) { - t.Parallel() - // We tested a lot of the logic above within the internal test, so just make sure we honor the exported contract - b, s := CreateBackendWithStorage(t) - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/generate/exported", - Storage: s, - Data: map[string]interface{}{ - "key_type": "ec", - "key_bits": 224, - }, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("keys/generate/exported"), logical.UpdateOperation), resp, true) - - require.NoError(t, err, "Failed generating exported key") - require.NotNil(t, resp, "Got nil response generating exported key") - require.Equal(t, "ec", resp.Data["key_type"], "key_type field contained an invalid type") - require.NotEmpty(t, resp.Data["key_id"], "returned an empty key_id field, should never happen") - require.Empty(t, resp.Data["key_name"], "key name should have been empty but was not") - require.NotEmpty(t, resp.Data["private_key"], "private_key field should not be empty in exported generation type.") - - // Make sure we can decode our private key as expected - keyData := resp.Data["private_key"].(string) - block, rest := pem.Decode([]byte(keyData)) - require.Empty(t, rest, "should not have had any trailing data") - require.NotEmpty(t, block, "failed decoding pem block") - - key, err := x509.ParseECPrivateKey(block.Bytes) - require.NoError(t, err, "failed parsing pem block as ec private key") - require.Equal(t, elliptic.P224(), key.Curve, "got unexpected curve value in returned private key") -} - -func TestPKI_PathManageKeys_ImportKeyBundle(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - bundle1, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) - require.NoError(t, err, "failed generating an ec key bundle") - bundle2, err := certutil.CreateKeyBundle("rsa", 2048, rand.Reader) - require.NoError(t, err, "failed generating an rsa key bundle") - pem1, err := bundle1.ToPrivateKeyPemString() - require.NoError(t, err, "failed converting ec key to pem") - pem2, err := bundle2.ToPrivateKeyPemString() - require.NoError(t, err, "failed converting rsa key to pem") - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/import", - Storage: s, - Data: map[string]interface{}{ - "key_name": "my-ec-key", - "pem_bundle": pem1, - }, - MountPoint: "pki/", - }) - - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("keys/import"), logical.UpdateOperation), resp, true) - - require.NoError(t, err, "Failed importing ec key") - require.NotNil(t, resp, "Got nil response importing ec key") - require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) - require.NotEmpty(t, resp.Data["key_id"], "key id for ec import response was empty") - require.Equal(t, "my-ec-key", resp.Data["key_name"], "key_name was incorrect for ec key") - require.Equal(t, certutil.ECPrivateKey, resp.Data["key_type"]) - keyId1 := resp.Data["key_id"].(keyID) - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/import", - Storage: s, - Data: map[string]interface{}{ - "key_name": "my-rsa-key", - "pem_bundle": pem2, - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "Failed importing rsa key") - require.NotNil(t, resp, "Got nil response importing rsa key") - require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) - require.NotEmpty(t, resp.Data["key_id"], "key id for rsa import response was empty") - require.Equal(t, "my-rsa-key", resp.Data["key_name"], "key_name was incorrect for ec key") - require.Equal(t, certutil.RSAPrivateKey, resp.Data["key_type"]) - keyId2 := resp.Data["key_id"].(keyID) - - require.NotEqual(t, keyId1, keyId2) - - // Attempt to reimport the same key with a different name. - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/import", - Storage: s, - Data: map[string]interface{}{ - "key_name": "my-new-ec-key", - "pem_bundle": pem1, - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "Failed importing the same ec key") - require.NotNil(t, resp, "Got nil response importing the same ec key") - require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) - require.NotEmpty(t, resp.Data["key_id"], "key id for ec import response was empty") - // Note we should receive back the original name, not the new updated name. - require.Equal(t, "my-ec-key", resp.Data["key_name"], "key_name was incorrect for ec key") - require.Equal(t, certutil.ECPrivateKey, resp.Data["key_type"]) - keyIdReimport := resp.Data["key_id"] - require.Equal(t, keyId1, keyIdReimport, "the re-imported key did not return the same key id") - - // Make sure we can not reuse an existing name across different keys. - bundle3, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) - require.NoError(t, err, "failed generating an ec key bundle") - pem3, err := bundle3.ToPrivateKeyPemString() - require.NoError(t, err, "failed converting rsa key to pem") - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/import", - Storage: s, - Data: map[string]interface{}{ - "key_name": "my-ec-key", - "pem_bundle": pem3, - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "Failed importing the same ec key") - require.NotNil(t, resp, "Got nil response importing the same ec key") - require.True(t, resp.IsError(), "should have received an error response importing a key with a re-used name") - - // Delete the key to make sure re-importing gets another ID - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.DeleteOperation, - Path: "key/" + keyId2.String(), - Storage: s, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed deleting keyId 2") - require.Nil(t, resp, "Got non-nil response deleting the key: %#v", resp) - - // Deleting a non-existent key should be okay... - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.DeleteOperation, - Path: "key/" + keyId2.String(), - Storage: s, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed deleting keyId 2") - require.Nil(t, resp, "Got non-nil response deleting the key: %#v", resp) - - // Let's reimport key 2 post-deletion to make sure we re-generate a new key id - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/import", - Storage: s, - Data: map[string]interface{}{ - "key_name": "my-rsa-key", - "pem_bundle": pem2, - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "Failed importing rsa key") - require.NotNil(t, resp, "Got nil response importing rsa key") - require.False(t, resp.IsError(), "received an error response: %v", resp.Error()) - require.NotEmpty(t, resp.Data["key_id"], "key id for rsa import response was empty") - require.Equal(t, "my-rsa-key", resp.Data["key_name"], "key_name was incorrect for ec key") - require.Equal(t, certutil.RSAPrivateKey, resp.Data["key_type"]) - keyId2Reimport := resp.Data["key_id"].(keyID) - - require.NotEqual(t, keyId2, keyId2Reimport, "re-importing key 2 did not generate a new key id") -} - -func TestPKI_PathManageKeys_DeleteDefaultKeyWarns(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/generate/internal", - Storage: s, - Data: map[string]interface{}{"key_type": "ec"}, - MountPoint: "pki/", - }) - require.NoError(t, err, "Failed generating key") - require.NotNil(t, resp, "Got nil response generating key") - require.False(t, resp.IsError(), "resp contained errors generating key: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.DeleteOperation, - Path: "key/" + keyId.String(), - Storage: s, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed deleting default key") - require.NotNil(t, resp, "Got nil response deleting the default key") - require.False(t, resp.IsError(), "expected no errors deleting default key: %#v", resp.Error()) - require.NotEmpty(t, resp.Warnings, "expected warnings to be populated on deleting default key") -} - -func TestPKI_PathManageKeys_DeleteUsedKeyFails(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "issuers/generate/root/internal", - Storage: s, - Data: map[string]interface{}{"common_name": "test.com"}, - MountPoint: "pki/", - }) - require.NoError(t, err, "Failed generating issuer") - require.NotNil(t, resp, "Got nil response generating issuer") - require.False(t, resp.IsError(), "resp contained errors generating issuer: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.DeleteOperation, - Path: "key/" + keyId.String(), - Storage: s, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed deleting key used by an issuer") - require.NotNil(t, resp, "Got nil response deleting key used by an issuer") - require.True(t, resp.IsError(), "expected an error deleting key used by an issuer") -} - -func TestPKI_PathManageKeys_UpdateKeyDetails(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/generate/internal", - Storage: s, - Data: map[string]interface{}{"key_type": "ec"}, - MountPoint: "pki/", - }) - require.NoError(t, err, "Failed generating key") - require.NotNil(t, resp, "Got nil response generating key") - require.False(t, resp.IsError(), "resp contained errors generating key: %#v", resp.Error()) - keyId := resp.Data["key_id"].(keyID) - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "key/" + keyId.String(), - Storage: s, - Data: map[string]interface{}{"key_name": "new-name"}, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("key/"+keyId.String()), logical.UpdateOperation), resp, true) - - require.NoError(t, err, "failed updating key with new name") - require.NotNil(t, resp, "Got nil response updating key with new name") - require.False(t, resp.IsError(), "unexpected error updating key with new name: %#v", resp.Error()) - - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.ReadOperation, - Path: "key/" + keyId.String(), - Storage: s, - MountPoint: "pki/", - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("key/"+keyId.String()), logical.ReadOperation), resp, true) - - require.NoError(t, err, "failed reading key after name update") - require.NotNil(t, resp, "Got nil response reading key after name update") - require.False(t, resp.IsError(), "unexpected error reading key: %#v", resp.Error()) - keyName := resp.Data["key_name"].(string) - - require.Equal(t, "new-name", keyName, "failed to update key_name expected: new-name was: %s", keyName) - - // Make sure we do not allow updates to invalid name values - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "key/" + keyId.String(), - Storage: s, - Data: map[string]interface{}{"key_name": "a-bad\\-name"}, - MountPoint: "pki/", - }) - require.NoError(t, err, "failed updating key with a bad name") - require.NotNil(t, resp, "Got nil response updating key with a bad name") - require.True(t, resp.IsError(), "expected an error updating key with a bad name, but did not get one.") -} - -func TestPKI_PathManageKeys_ImportKeyBundleBadData(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/import", - Storage: s, - Data: map[string]interface{}{ - "key_name": "my-ec-key", - "pem_bundle": "this-is-not-a-pem-bundle", - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "got a 500 error type response from a bad pem bundle") - require.NotNil(t, resp, "Got nil response importing a bad pem bundle") - require.True(t, resp.IsError(), "should have received an error response importing a bad pem bundle") - - // Make sure we also bomb on a proper certificate - bundle := genCertBundle(t, b, s) - resp, err = b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/import", - Storage: s, - Data: map[string]interface{}{ - "pem_bundle": bundle.Certificate, - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "got a 500 error type response from a certificate pem bundle") - require.NotNil(t, resp, "Got nil response importing a certificate bundle") - require.True(t, resp.IsError(), "should have received an error response importing a certificate pem bundle") -} - -func TestPKI_PathManageKeys_ImportKeyRejectsMultipleKeys(t *testing.T) { - t.Parallel() - b, s := CreateBackendWithStorage(t) - - bundle1, err := certutil.CreateKeyBundle("ec", 224, rand.Reader) - require.NoError(t, err, "failed generating an ec key bundle") - bundle2, err := certutil.CreateKeyBundle("rsa", 2048, rand.Reader) - require.NoError(t, err, "failed generating an rsa key bundle") - pem1, err := bundle1.ToPrivateKeyPemString() - require.NoError(t, err, "failed converting ec key to pem") - pem2, err := bundle2.ToPrivateKeyPemString() - require.NoError(t, err, "failed converting rsa key to pem") - - importPem := pem1 + "\n" + pem2 - - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "keys/import", - Storage: s, - Data: map[string]interface{}{ - "key_name": "my-ec-key", - "pem_bundle": importPem, - }, - MountPoint: "pki/", - }) - require.NoError(t, err, "got a 500 error type response from a bad pem bundle") - require.NotNil(t, resp, "Got nil response importing a bad pem bundle") - require.True(t, resp.IsError(), "should have received an error response importing a pem bundle with more than 1 key") - - ctx := context.Background() - sc := b.makeStorageContext(ctx, s) - keys, _ := sc.listKeys() - for _, keyId := range keys { - id, _ := sc.fetchKeyById(keyId) - t.Logf("%s:%s", id.ID, id.Name) - } -} +```release-note:improvement +ui: Update AlertInline component to use Helios Design System Alert component +``` diff --git a/builtin/logical/pki/path_ocsp.go b/builtin/logical/pki/path_ocsp.go index 98adcc5fe4f2..5fcde924fae2 100644 --- a/builtin/logical/pki/path_ocsp.go +++ b/builtin/logical/pki/path_ocsp.go @@ -1,532 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "bytes" - "context" - "crypto" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "errors" - "fmt" - "io" - "math/big" - "net/http" - "strings" - "time" - - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/helper/errutil" - "github.com/hashicorp/vault/sdk/logical" - "golang.org/x/crypto/ocsp" -) - -const ( - ocspReqParam = "req" - ocspResponseContentType = "application/ocsp-response" - maximumRequestSize = 2048 // A normal simple request is 87 bytes, so give us some buffer -) - -type ocspRespInfo struct { - serialNumber *big.Int - ocspStatus int - revocationTimeUTC *time.Time - issuerID issuerID -} - -// These response variables should not be mutated, instead treat them as constants -var ( - OcspUnauthorizedResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusUnauthorized, - logical.HTTPRawBody: ocsp.UnauthorizedErrorResponse, - }, - } - OcspMalformedResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusBadRequest, - logical.HTTPRawBody: ocsp.MalformedRequestErrorResponse, - }, - } - OcspInternalErrorResponse = &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusInternalServerError, - logical.HTTPRawBody: ocsp.InternalErrorErrorResponse, - }, - } - - ErrMissingOcspUsage = errors.New("issuer entry did not have the OCSPSigning usage") - ErrIssuerHasNoKey = errors.New("issuer has no key") - ErrUnknownIssuer = errors.New("unknown issuer") -) - -func buildPathOcspGet(b *backend) *framework.Path { - pattern := "ocsp/" + framework.MatchAllRegex(ocspReqParam) - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "query", - OperationSuffix: "ocsp-with-get-req", - } - - return buildOcspGetWithPath(b, pattern, displayAttrs) -} - -func buildPathUnifiedOcspGet(b *backend) *framework.Path { - pattern := "unified-ocsp/" + framework.MatchAllRegex(ocspReqParam) - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "query", - OperationSuffix: "unified-ocsp-with-get-req", - } - - return buildOcspGetWithPath(b, pattern, displayAttrs) -} - -func buildOcspGetWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { - return &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, - Fields: map[string]*framework.FieldSchema{ - ocspReqParam: { - Type: framework.TypeString, - Description: "base-64 encoded ocsp request", - }, - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.ReadOperation: &framework.PathOperation{ - Callback: b.ocspHandler, - }, - }, - - HelpSynopsis: pathOcspHelpSyn, - HelpDescription: pathOcspHelpDesc, - } -} - -func buildPathOcspPost(b *backend) *framework.Path { - pattern := "ocsp" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "query", - OperationSuffix: "ocsp", - } - - return buildOcspPostWithPath(b, pattern, displayAttrs) -} - -func buildPathUnifiedOcspPost(b *backend) *framework.Path { - pattern := "unified-ocsp" - - displayAttrs := &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKI, - OperationVerb: "query", - OperationSuffix: "unified-ocsp", - } - - return buildOcspPostWithPath(b, pattern, displayAttrs) -} - -func buildOcspPostWithPath(b *backend, pattern string, displayAttrs *framework.DisplayAttributes) *framework.Path { - return &framework.Path{ - Pattern: pattern, - DisplayAttrs: displayAttrs, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.ocspHandler, - }, - }, - - HelpSynopsis: pathOcspHelpSyn, - HelpDescription: pathOcspHelpDesc, - } -} - -func (b *backend) ocspHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - sc := b.makeStorageContext(ctx, request.Storage) - cfg, err := b.crlBuilder.getConfigWithUpdate(sc) - if err != nil || cfg.OcspDisable || (isUnifiedOcspPath(request) && !cfg.UnifiedCRL) { - return OcspUnauthorizedResponse, nil - } - - derReq, err := fetchDerEncodedRequest(request, data) - if err != nil { - return OcspMalformedResponse, nil - } - - ocspReq, err := ocsp.ParseRequest(derReq) - if err != nil { - return OcspMalformedResponse, nil - } - - useUnifiedStorage := canUseUnifiedStorage(request, cfg) - - ocspStatus, err := getOcspStatus(sc, ocspReq, useUnifiedStorage) - if err != nil { - return logAndReturnInternalError(b, err), nil - } - - caBundle, issuer, err := lookupOcspIssuer(sc, ocspReq, ocspStatus.issuerID) - if err != nil { - if errors.Is(err, ErrUnknownIssuer) { - // Since we were not able to find a matching issuer for the incoming request - // generate an Unknown OCSP response. This might turn into an Unauthorized if - // we find out that we don't have a default issuer or it's missing the proper Usage flags - return generateUnknownResponse(cfg, sc, ocspReq), nil - } - if errors.Is(err, ErrMissingOcspUsage) { - // If we did find a matching issuer but aren't allowed to sign, the spec says - // we should be responding with an Unauthorized response as we don't have the - // ability to sign the response. - // https://www.rfc-editor.org/rfc/rfc5019#section-2.2.3 - return OcspUnauthorizedResponse, nil - } - return logAndReturnInternalError(b, err), nil - } - - byteResp, err := genResponse(cfg, caBundle, ocspStatus, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) - if err != nil { - return logAndReturnInternalError(b, err), nil - } - - return &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusOK, - logical.HTTPRawBody: byteResp, - }, - }, nil -} - -func canUseUnifiedStorage(req *logical.Request, cfg *crlConfig) bool { - if isUnifiedOcspPath(req) { - return true - } - - // We are operating on the existing /pki/ocsp path, both of these fields need to be enabled - // for us to use the unified path. - return shouldLocalPathsUseUnified(cfg) -} - -func isUnifiedOcspPath(req *logical.Request) bool { - return strings.HasPrefix(req.Path, "unified-ocsp") -} - -func generateUnknownResponse(cfg *crlConfig, sc *storageContext, ocspReq *ocsp.Request) *logical.Response { - // Generate an Unknown OCSP response, signing with the default issuer from the mount as we did - // not match the request's issuer. If no default issuer can be used, return with Unauthorized as there - // isn't much else we can do at this point. - config, err := sc.getIssuersConfig() - if err != nil { - return logAndReturnInternalError(sc.Backend, err) - } - - if config.DefaultIssuerId == "" { - // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - - caBundle, issuer, err := getOcspIssuerParsedBundle(sc, config.DefaultIssuerId) - if err != nil { - if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { - // We must have raced on a delete/update of the default issuer, anyways - // no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - return logAndReturnInternalError(sc.Backend, err) - } - - if !issuer.Usage.HasUsage(OCSPSigningUsage) { - // If we don't have any issuers or default issuers set, no way to sign a response so Unauthorized it is. - return OcspUnauthorizedResponse - } - - info := &ocspRespInfo{ - serialNumber: ocspReq.SerialNumber, - ocspStatus: ocsp.Unknown, - } - - byteResp, err := genResponse(cfg, caBundle, info, ocspReq.HashAlgorithm, issuer.RevocationSigAlg) - if err != nil { - return logAndReturnInternalError(sc.Backend, err) - } - - return &logical.Response{ - Data: map[string]interface{}{ - logical.HTTPContentType: ocspResponseContentType, - logical.HTTPStatusCode: http.StatusOK, - logical.HTTPRawBody: byteResp, - }, - } -} - -func fetchDerEncodedRequest(request *logical.Request, data *framework.FieldData) ([]byte, error) { - switch request.Operation { - case logical.ReadOperation: - // The param within the GET request should have a base64 encoded version of a DER request. - base64Req := data.Get(ocspReqParam).(string) - if base64Req == "" { - return nil, errors.New("no base64 encoded ocsp request was found") - } - - if len(base64Req) >= maximumRequestSize { - return nil, errors.New("request is too large") - } - - return base64.StdEncoding.DecodeString(base64Req) - case logical.UpdateOperation: - // POST bodies should contain the binary form of the DER request. - // NOTE: Writing an empty update request to Vault causes a nil request.HTTPRequest, and that object - // says that it is possible for its Body element to be nil as well, so check both just in case. - if request.HTTPRequest == nil { - return nil, errors.New("no data in request") - } - rawBody := request.HTTPRequest.Body - if rawBody == nil { - return nil, errors.New("no data in request body") - } - defer rawBody.Close() - - requestBytes, err := io.ReadAll(io.LimitReader(rawBody, maximumRequestSize)) - if err != nil { - return nil, err - } - - if len(requestBytes) >= maximumRequestSize { - return nil, errors.New("request is too large") - } - return requestBytes, nil - default: - return nil, fmt.Errorf("unsupported request method: %s", request.Operation) - } -} - -func logAndReturnInternalError(b *backend, err error) *logical.Response { - // Since OCSP might be a high traffic endpoint, we will log at debug level only - // any internal errors we do get. There is no way for us to return to the end-user - // errors, so we rely on the log statement to help in debugging possible - // issues in the field. - b.Logger().Debug("OCSP internal error", "error", err) - return OcspInternalErrorResponse -} - -func getOcspStatus(sc *storageContext, ocspReq *ocsp.Request, useUnifiedStorage bool) (*ocspRespInfo, error) { - revEntryRaw, err := fetchCertBySerialBigInt(sc, revokedPath, ocspReq.SerialNumber) - if err != nil { - return nil, err - } - - info := ocspRespInfo{ - serialNumber: ocspReq.SerialNumber, - ocspStatus: ocsp.Good, - } - - if revEntryRaw != nil { - var revEntry revocationInfo - if err := revEntryRaw.DecodeJSON(&revEntry); err != nil { - return nil, err - } - - info.ocspStatus = ocsp.Revoked - info.revocationTimeUTC = &revEntry.RevocationTimeUTC - info.issuerID = revEntry.CertificateIssuer // This might be empty if the CRL hasn't been rebuilt - } else if useUnifiedStorage { - dashSerial := normalizeSerialFromBigInt(ocspReq.SerialNumber) - unifiedEntry, err := getUnifiedRevocationBySerial(sc, dashSerial) - if err != nil { - return nil, err - } - - if unifiedEntry != nil { - info.ocspStatus = ocsp.Revoked - info.revocationTimeUTC = &unifiedEntry.RevocationTimeUTC - info.issuerID = unifiedEntry.CertificateIssuer - } - } - - return &info, nil -} - -func lookupOcspIssuer(sc *storageContext, req *ocsp.Request, optRevokedIssuer issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { - reqHash := req.HashAlgorithm - if !reqHash.Available() { - return nil, nil, x509.ErrUnsupportedAlgorithm - } - - // This will prime up issuerIds, with either the optRevokedIssuer value if set - // or if we are operating in legacy storage mode, the shim bundle id or finally - // a list of all our issuers in this mount. - issuerIds, err := lookupIssuerIds(sc, optRevokedIssuer) - if err != nil { - return nil, nil, err - } - - matchedButNoUsage := false - for _, issuerId := range issuerIds { - parsedBundle, issuer, err := getOcspIssuerParsedBundle(sc, issuerId) - if err != nil { - // A bit touchy here as if we get an ErrUnknownIssuer for an issuer id that we picked up - // from a revocation entry, we still return an ErrUnknownOcspIssuer as we can't validate - // the end-user actually meant this specific issuer's cert with serial X. - if errors.Is(err, ErrUnknownIssuer) || errors.Is(err, ErrIssuerHasNoKey) { - // This skips either bad issuer ids, or root certs with no keys that we can't use. - continue - } - return nil, nil, err - } - - // Make sure the client and Vault are talking about the same issuer, otherwise - // we might have a case of a matching serial number for a different issuer which - // we should not respond back in the affirmative about. - matches, err := doesRequestMatchIssuer(parsedBundle, req) - if err != nil { - return nil, nil, err - } - - if matches { - if !issuer.Usage.HasUsage(OCSPSigningUsage) { - matchedButNoUsage = true - // We found a matching issuer, but it's not allowed to sign the - // response, there might be another issuer that we rotated - // that will match though, so keep iterating. - continue - } - - return parsedBundle, issuer, nil - } - } - - if matchedButNoUsage { - // We matched an issuer but it did not have an OCSP signing usage set so bail. - return nil, nil, ErrMissingOcspUsage - } - - return nil, nil, ErrUnknownIssuer -} - -func getOcspIssuerParsedBundle(sc *storageContext, issuerId issuerID) (*certutil.ParsedCertBundle, *issuerEntry, error) { - issuer, bundle, err := sc.fetchCertBundleByIssuerId(issuerId, true) - if err != nil { - switch err.(type) { - case errutil.UserError: - // Most likely the issuer id no longer exists skip it - return nil, nil, ErrUnknownIssuer - default: - return nil, nil, err - } - } - - if issuer.KeyID == "" { - // No point if the key does not exist from the issuer to use as a signer. - return nil, nil, ErrIssuerHasNoKey - } - - caBundle, err := parseCABundle(sc.Context, sc.Backend, bundle) - if err != nil { - return nil, nil, err - } - - return caBundle, issuer, nil -} - -func lookupIssuerIds(sc *storageContext, optRevokedIssuer issuerID) ([]issuerID, error) { - if optRevokedIssuer != "" { - return []issuerID{optRevokedIssuer}, nil - } - - if sc.Backend.useLegacyBundleCaStorage() { - return []issuerID{legacyBundleShimID}, nil - } - - return sc.listIssuers() -} - -func doesRequestMatchIssuer(parsedBundle *certutil.ParsedCertBundle, req *ocsp.Request) (bool, error) { - // issuer name hashing taken from golang.org/x/crypto/ocsp. - var pkInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString - } - if _, err := asn1.Unmarshal(parsedBundle.Certificate.RawSubjectPublicKeyInfo, &pkInfo); err != nil { - return false, err - } - - h := req.HashAlgorithm.New() - h.Write(pkInfo.PublicKey.RightAlign()) - issuerKeyHash := h.Sum(nil) - - h.Reset() - h.Write(parsedBundle.Certificate.RawSubject) - issuerNameHash := h.Sum(nil) - - return bytes.Equal(req.IssuerKeyHash, issuerKeyHash) && bytes.Equal(req.IssuerNameHash, issuerNameHash), nil -} - -func genResponse(cfg *crlConfig, caBundle *certutil.ParsedCertBundle, info *ocspRespInfo, reqHash crypto.Hash, revSigAlg x509.SignatureAlgorithm) ([]byte, error) { - curTime := time.Now() - duration, err := parseutil.ParseDurationSecond(cfg.OcspExpiry) - if err != nil { - return nil, err - } - - // x/crypto/ocsp lives outside of the standard library's crypto/x509 and includes - // ripped-off variants of many internal structures and functions. These - // lack support for PSS signatures altogether, so if we have revSigAlg - // that uses PSS, downgrade it to PKCS#1v1.5. This fixes the lack of - // support in x/ocsp, at the risk of OCSP requests failing due to lack - // of PKCS#1v1.5 (in say, PKCS#11 HSMs or GCP). - // - // Other restrictions, such as hash function selection, will still work - // however. - switch revSigAlg { - case x509.SHA256WithRSAPSS: - revSigAlg = x509.SHA256WithRSA - case x509.SHA384WithRSAPSS: - revSigAlg = x509.SHA384WithRSA - case x509.SHA512WithRSAPSS: - revSigAlg = x509.SHA512WithRSA - } - - // Due to a bug in Go's ocsp.ParseResponse(...), we do not provision - // Certificate any more on the response to help Go based OCSP clients. - // This was technically unnecessary, as the Certificate given here - // both signed the OCSP response and issued the leaf cert, and so - // should already be trusted by the client. - // - // See also: https://github.com/golang/go/issues/59641 - template := ocsp.Response{ - IssuerHash: reqHash, - Status: info.ocspStatus, - SerialNumber: info.serialNumber, - ThisUpdate: curTime, - NextUpdate: curTime.Add(duration), - ExtraExtensions: []pkix.Extension{}, - SignatureAlgorithm: revSigAlg, - } - - if info.ocspStatus == ocsp.Revoked { - template.RevokedAt = *info.revocationTimeUTC - template.RevocationReason = ocsp.Unspecified - } - - return ocsp.CreateResponse(caBundle.Certificate, caBundle.Certificate, template, caBundle.PrivateKey) -} - -const pathOcspHelpSyn = ` -Query a certificate's revocation status through OCSP' -` - -const pathOcspHelpDesc = ` -This endpoint expects DER encoded OCSP requests and returns DER encoded OCSP responses -` +```release-note:bug +eventlogger: Update library to v0.2.7 to address race condition +``` diff --git a/builtin/logical/pki/path_ocsp_test.go b/builtin/logical/pki/path_ocsp_test.go index b62e7d4b5a17..ab5ce613c404 100644 --- a/builtin/logical/pki/path_ocsp_test.go +++ b/builtin/logical/pki/path_ocsp_test.go @@ -1,710 +1,4 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "bytes" - "context" - "crypto" - "crypto/x509" - "encoding/base64" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "testing" - - "github.com/hashicorp/go-secure-stdlib/parseutil" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/ocsp" -) - -// If the ocsp_disabled flag is set to true in the crl configuration make sure we always -// return an Unauthorized error back as we assume an end-user disabling the feature does -// not want us to act as the OCSP authority and the RFC specifies this is the appropriate response. -func TestOcsp_Disabled(t *testing.T) { - t.Parallel() - type testArgs struct { - reqType string - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - tests = append(tests, testArgs{ - reqType: reqType, - }) - } - for _, tt := range tests { - localTT := tt - t.Run(localTT.reqType, func(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "rsa") - resp, err := CBWrite(b, s, "config/crl", map[string]interface{}{ - "ocsp_disable": "true", - }) - requireSuccessNonNilResponse(t, resp, err) - resp, err = SendOcspRequest(t, b, s, localTT.reqType, testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) - }) - } -} - -// If we can't find the issuer within the request and have no default issuer to sign an Unknown response -// with return an UnauthorizedErrorResponse/according to/the RFC, similar to if we are disabled (lack of authority) -// This behavior differs from CRLs when an issuer is removed from a mount. -func TestOcsp_UnknownIssuerWithNoDefault(t *testing.T) { - t.Parallel() - - _, _, testEnv := setupOcspEnv(t, "ec") - // Create another completely empty mount so the created issuer/certificate above is unknown - b, s := CreateBackendWithStorage(t) - - resp, err := SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// If the issuer in the request does exist, but the request coming in associates the serial with the -// wrong issuer return an Unknown response back to the caller. -func TestOcsp_WrongIssuerInRequest(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer2, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Unknown, ocspResp.Status) -} - -// Verify that requests we can't properly decode result in the correct response of MalformedRequestError -func TestOcsp_MalformedRequests(t *testing.T) { - t.Parallel() - type testArgs struct { - reqType string - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - tests = append(tests, testArgs{ - reqType: reqType, - }) - } - for _, tt := range tests { - localTT := tt - t.Run(localTT.reqType, func(t *testing.T) { - b, s, _ := setupOcspEnv(t, "rsa") - badReq := []byte("this is a bad request") - var resp *logical.Response - var err error - switch localTT.reqType { - case "get": - resp, err = sendOcspGetRequest(b, s, badReq) - case "post": - resp, err = sendOcspPostRequest(b, s, badReq) - default: - t.Fatalf("bad request type") - } - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 400, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.MalformedRequestErrorResponse, respDer) - }) - } -} - -// Validate that we properly handle a revocation entry that contains an issuer ID that no longer exists, -// the best we can do in this use case is to respond back with the default issuer that we don't know -// the issuer that they are requesting (we can't guarantee that the client is actually requesting a serial -// from that issuer) -func TestOcsp_InvalidIssuerIdInRevocationEntry(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - ctx := context.Background() - - // Revoke the entry - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Twiddle the entry so that the issuer id is no longer valid. - storagePath := revokedPath + normalizeSerial(serial) - var revInfo revocationInfo - revEntry, err := s.Get(ctx, storagePath) - require.NoError(t, err, "failed looking up storage path: %s", storagePath) - err = revEntry.DecodeJSON(&revInfo) - require.NoError(t, err, "failed decoding storage entry: %v", revEntry) - revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" - revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) - require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) - err = s.Put(ctx, revEntry) - require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) - - // Send the request - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Unknown, ocspResp.Status) -} - -// Validate that we properly handle an unknown issuer use-case but that the default issuer -// does not have the OCSP usage flag set, we can't do much else other than reply with an -// Unauthorized response. -func TestOcsp_UnknownIssuerIdWithDefaultHavingOcspUsageRemoved(t *testing.T) { - t.Parallel() - - b, s, testEnv := setupOcspEnv(t, "ec") - ctx := context.Background() - - // Revoke the entry - serial := serialFromCert(testEnv.leafCertIssuer1) - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serial, - }) - schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("revoke"), logical.UpdateOperation), resp, true) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Twiddle the entry so that the issuer id is no longer valid. - storagePath := revokedPath + normalizeSerial(serial) - var revInfo revocationInfo - revEntry, err := s.Get(ctx, storagePath) - require.NoError(t, err, "failed looking up storage path: %s", storagePath) - err = revEntry.DecodeJSON(&revInfo) - require.NoError(t, err, "failed decoding storage entry: %v", revEntry) - revInfo.CertificateIssuer = "00000000-0000-0000-0000-000000000000" - revEntry, err = logical.StorageEntryJSON(storagePath, revInfo) - require.NoError(t, err, "failed re-encoding revocation info: %v", revInfo) - err = s.Put(ctx, revEntry) - require.NoError(t, err, "failed writing out new revocation entry: %v", revEntry) - - // Update our issuers to no longer have the OcspSigning usage - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer1") - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId2.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer2") - - // Send the request - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - require.NoError(t, err) - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify that if we do have a revoked certificate entry for the request, that matches an -// issuer but that issuer does not have the OcspUsage flag set that we return an Unauthorized -// response back to the caller -func TestOcsp_RevokedCertHasIssuerWithoutOcspUsage(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Revoke our certificate - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Update our issuer to no longer have the OcspSigning usage - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") - requireFieldsSetInResp(t, resp, "usage") - - // Do not assume a specific ordering for usage... - usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) - require.NoError(t, err, "failed parsing usage return value") - require.True(t, usages.HasUsage(IssuanceUsage)) - require.True(t, usages.HasUsage(CRLSigningUsage)) - require.False(t, usages.HasUsage(OCSPSigningUsage)) - - // Request an OCSP request from it, we should get an Unauthorized response back - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify if our matching issuer for a revocation entry has no key associated with it that -// we bail with an Unauthorized response. -func TestOcsp_RevokedCertHasIssuerWithoutAKey(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Revoke our certificate - resp, err := CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - // Delete the key associated with our issuer - resp, err = CBRead(b, s, "issuer/"+testEnv.issuerId1.String()) - requireSuccessNonNilResponse(t, resp, err, "failed reading issuer") - requireFieldsSetInResp(t, resp, "key_id") - keyId := resp.Data["key_id"].(keyID) - - // This is a bit naughty but allow me to delete the key... - sc := b.makeStorageContext(context.Background(), s) - issuer, err := sc.fetchIssuerById(testEnv.issuerId1) - require.NoError(t, err, "failed to get issuer from storage") - issuer.KeyID = "" - err = sc.writeIssuer(issuer) - require.NoError(t, err, "failed to write issuer update") - - resp, err = CBDelete(b, s, "key/"+keyId.String()) - requireSuccessNonNilResponse(t, resp, err, "failed deleting key") - - // Request an OCSP request from it, we should get an Unauthorized response back - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 401, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - require.Equal(t, ocsp.UnauthorizedErrorResponse, respDer) -} - -// Verify if for some reason an end-user has rotated an existing certificate using the same -// key so our algo matches multiple issuers and one has OCSP usage disabled. We expect that -// even if a prior issuer issued the certificate, the new matching issuer can respond and sign -// the response to the caller on its behalf. -// -// NOTE: This test is a bit at the mercy of iteration order of the issuer ids. -// -// If it becomes flaky, most likely something is wrong in the code -// and not the test. -func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { - b, s, testEnv := setupOcspEnv(t, "ec") - - // Create a matching issuer as issuer1 with the same backing key - resp, err := CBWrite(b, s, "root/rotate/existing", map[string]interface{}{ - "key_ref": testEnv.keyId1, - "ttl": "40h", - "common_name": "example-ocsp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "rotate issuer failed") - requireFieldsSetInResp(t, resp, "issuer_id") - rotatedCert := parseCert(t, resp.Data["certificate"].(string)) - - // Remove ocsp signing from our issuer - resp, err = CBPatch(b, s, "issuer/"+testEnv.issuerId1.String(), map[string]interface{}{ - "usage": "read-only,issuing-certificates,crl-signing", - }) - requireSuccessNonNilResponse(t, resp, err, "failed resetting usage flags on issuer") - requireFieldsSetInResp(t, resp, "usage") - // Do not assume a specific ordering for usage... - usages, err := NewIssuerUsageFromNames(strings.Split(resp.Data["usage"].(string), ",")) - require.NoError(t, err, "failed parsing usage return value") - require.True(t, usages.HasUsage(IssuanceUsage)) - require.True(t, usages.HasUsage(CRLSigningUsage)) - require.False(t, usages.HasUsage(OCSPSigningUsage)) - - // Request an OCSP request from it, we should get a Good response back, from the rotated cert - resp, err = SendOcspRequest(t, b, s, "get", testEnv.leafCertIssuer1, testEnv.issuer1, crypto.SHA1) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, crypto.SHA1, ocspResp.IssuerHash) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - - requireOcspSignatureAlgoForKey(t, rotatedCert.SignatureAlgorithm, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, rotatedCert) -} - -// Make sure OCSP GET/POST requests work through the entire stack, and not just -// through the quicker backend layer the other tests are doing. -func TestOcsp_HigherLevel(t *testing.T) { - t.Parallel() - coreConfig := &vault.CoreConfig{ - LogicalBackends: map[string]logical.Factory{ - "pki": Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - client := cluster.Cores[0].Client - mountPKIEndpoint(t, client, "pki") - resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ - "key_type": "ec", - "common_name": "root-ca.com", - "ttl": "600h", - }) - - require.NoError(t, err, "error generating root ca: %v", err) - require.NotNil(t, resp, "expected ca info from root") - - issuerCert := parseCert(t, resp.Data["certificate"].(string)) - - resp, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ - "allowed_domains": "example.com", - "allow_subdomains": "true", - "no_store": "false", // make sure we store this cert - "max_ttl": "1h", - "key_type": "ec", - }) - require.NoError(t, err, "error setting up pki role: %v", err) - - resp, err = client.Logical().Write("pki/issue/example", map[string]interface{}{ - "common_name": "test.example.com", - "ttl": "15m", - }) - require.NoError(t, err, "error issuing certificate: %v", err) - require.NotNil(t, resp, "got nil response from issuing request") - certToRevoke := parseCert(t, resp.Data["certificate"].(string)) - serialNum := resp.Data["serial_number"].(string) - - // Revoke the certificate - resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ - "serial_number": serialNum, - }) - require.NoError(t, err, "error revoking certificate: %v", err) - require.NotNil(t, resp, "got nil response from revoke") - - // Make sure that OCSP handler responds properly - ocspReq := generateRequest(t, crypto.SHA256, certToRevoke, issuerCert) - ocspPostReq := client.NewRequest(http.MethodPost, "/v1/pki/ocsp") - ocspPostReq.Headers.Set("Content-Type", "application/ocsp-request") - ocspPostReq.BodyBytes = ocspReq - rawResp, err := client.RawRequest(ocspPostReq) - require.NoError(t, err, "failed sending ocsp post request") - - require.Equal(t, 200, rawResp.StatusCode) - require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) - bodyReader := rawResp.Body - respDer, err := io.ReadAll(bodyReader) - bodyReader.Close() - require.NoError(t, err, "failed reading response body") - - ocspResp, err := ocsp.ParseResponse(respDer, issuerCert) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) - - // Test OCSP Get request for ocsp - urlEncoded := base64.StdEncoding.EncodeToString(ocspReq) - if strings.Contains(urlEncoded, "//") { - // workaround known redirect bug that is difficult to fix - t.Skipf("VAULT-13630 - Skipping GET OCSP test with encoded issuer cert containing // triggering redirection bug") - } - - ocspGetReq := client.NewRequest(http.MethodGet, "/v1/pki/ocsp/"+urlEncoded) - ocspGetReq.Headers.Set("Content-Type", "application/ocsp-request") - rawResp, err = client.RawRequest(ocspGetReq) - require.NoError(t, err, "failed sending ocsp get request") - - require.Equal(t, 200, rawResp.StatusCode) - require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) - bodyReader = rawResp.Body - respDer, err = io.ReadAll(bodyReader) - bodyReader.Close() - require.NoError(t, err, "failed reading response body") - - ocspResp, err = ocsp.ParseResponse(respDer, issuerCert) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) -} - -func TestOcsp_ValidRequests(t *testing.T) { - type caKeyConf struct { - keyType string - keyBits int - sigBits int - } - t.Parallel() - type testArgs struct { - reqType string - keyConf caKeyConf - reqHash crypto.Hash - } - var tests []testArgs - for _, reqType := range []string{"get", "post"} { - for _, keyConf := range []caKeyConf{ - {"rsa", 0, 0}, - {"rsa", 0, 384}, - {"rsa", 0, 512}, - {"ec", 0, 0}, - {"ec", 521, 0}, - } { - // "ed25519" is not supported at the moment in x/crypto/ocsp - for _, requestHash := range []crypto.Hash{crypto.SHA1, crypto.SHA256, crypto.SHA384, crypto.SHA512} { - tests = append(tests, testArgs{ - reqType: reqType, - keyConf: keyConf, - reqHash: requestHash, - }) - } - } - } - for _, tt := range tests { - localTT := tt - testName := fmt.Sprintf("%s-%s-keybits-%d-sigbits-%d-reqHash-%s", localTT.reqType, localTT.keyConf.keyType, - localTT.keyConf.keyBits, - localTT.keyConf.sigBits, - localTT.reqHash) - t.Run(testName, func(t *testing.T) { - runOcspRequestTest(t, localTT.reqType, localTT.keyConf.keyType, localTT.keyConf.keyBits, - localTT.keyConf.sigBits, localTT.reqHash) - }) - } -} - -func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKeyBits int, caKeySigBits int, requestHash crypto.Hash) { - b, s, testEnv := setupOcspEnvWithCaKeyConfig(t, caKeyType, caKeyBits, caKeySigBits) - - // Non-revoked cert - resp, err := SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer := resp.Data["http_raw_body"].([]byte) - - ocspResp, err := ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer1.SignatureAlgorithm, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) - - // Now revoke it - resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ - "serial_number": serialFromCert(testEnv.leafCertIssuer1), - }) - requireSuccessNonNilResponse(t, resp, err, "revoke") - - resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer1, testEnv.issuer1, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request with revoked") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer = resp.Data["http_raw_body"].([]byte) - - ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer1) - require.NoError(t, err, "parsing ocsp get response with revoked") - - require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer1.SignatureAlgorithm, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer1) - - // Request status for our second issuer - resp, err = SendOcspRequest(t, b, s, requestType, testEnv.leafCertIssuer2, testEnv.issuer2, requestHash) - requireSuccessNonNilResponse(t, resp, err, "ocsp get request") - requireFieldsSetInResp(t, resp, "http_content_type", "http_status_code", "http_raw_body") - require.Equal(t, 200, resp.Data["http_status_code"]) - require.Equal(t, ocspResponseContentType, resp.Data["http_content_type"]) - respDer = resp.Data["http_raw_body"].([]byte) - - ocspResp, err = ocsp.ParseResponse(respDer, testEnv.issuer2) - require.NoError(t, err, "parsing ocsp get response") - - require.Equal(t, ocsp.Good, ocspResp.Status) - require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, 0, ocspResp.RevocationReason) - require.Equal(t, testEnv.leafCertIssuer2.SerialNumber, ocspResp.SerialNumber) - - // Verify that our thisUpdate and nextUpdate fields are updated as expected - thisUpdate := ocspResp.ThisUpdate - nextUpdate := ocspResp.NextUpdate - require.True(t, thisUpdate.Before(nextUpdate), - fmt.Sprintf("thisUpdate %s, should have been before nextUpdate: %s", thisUpdate, nextUpdate)) - nextUpdateDiff := nextUpdate.Sub(thisUpdate) - expectedDiff, err := parseutil.ParseDurationSecond(defaultCrlConfig.OcspExpiry) - require.NoError(t, err, "failed to parse default ocsp expiry value") - require.Equal(t, expectedDiff, nextUpdateDiff, - fmt.Sprintf("the delta between thisUpdate %s and nextUpdate: %s should have been around: %s but was %s", - thisUpdate, nextUpdate, defaultCrlConfig.OcspExpiry, nextUpdateDiff)) - - requireOcspSignatureAlgoForKey(t, testEnv.issuer2.SignatureAlgorithm, ocspResp.SignatureAlgorithm) - requireOcspResponseSignedBy(t, ocspResp, testEnv.issuer2) -} - -func requireOcspSignatureAlgoForKey(t *testing.T, expected x509.SignatureAlgorithm, actual x509.SignatureAlgorithm) { - t.Helper() - - require.Equal(t, expected.String(), actual.String()) -} - -type ocspTestEnv struct { - issuer1 *x509.Certificate - issuer2 *x509.Certificate - - issuerId1 issuerID - issuerId2 issuerID - - leafCertIssuer1 *x509.Certificate - leafCertIssuer2 *x509.Certificate - - keyId1 keyID - keyId2 keyID -} - -func setupOcspEnv(t *testing.T, keyType string) (*backend, logical.Storage, *ocspTestEnv) { - return setupOcspEnvWithCaKeyConfig(t, keyType, 0, 0) -} - -func setupOcspEnvWithCaKeyConfig(t *testing.T, keyType string, caKeyBits int, caKeySigBits int) (*backend, logical.Storage, *ocspTestEnv) { - b, s := CreateBackendWithStorage(t) - var issuerCerts []*x509.Certificate - var leafCerts []*x509.Certificate - var issuerIds []issuerID - var keyIds []keyID - - for i := 0; i < 2; i++ { - resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ - "key_type": keyType, - "key_bits": caKeyBits, - "signature_bits": caKeySigBits, - "ttl": "40h", - "common_name": "example-ocsp.com", - }) - requireSuccessNonNilResponse(t, resp, err, "root/generate/internal") - requireFieldsSetInResp(t, resp, "issuer_id", "key_id") - issuerId := resp.Data["issuer_id"].(issuerID) - keyId := resp.Data["key_id"].(keyID) - - resp, err = CBWrite(b, s, "roles/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ - "allow_bare_domains": true, - "allow_subdomains": true, - "allowed_domains": "foobar.com", - "no_store": false, - "generate_lease": false, - "issuer_ref": issuerId, - "key_type": keyType, - }) - requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) - - resp, err = CBWrite(b, s, "issue/test"+strconv.FormatInt(int64(i), 10), map[string]interface{}{ - "common_name": "test.foobar.com", - }) - requireSuccessNonNilResponse(t, resp, err, "roles/test"+strconv.FormatInt(int64(i), 10)) - requireFieldsSetInResp(t, resp, "certificate", "issuing_ca", "serial_number") - leafCert := parseCert(t, resp.Data["certificate"].(string)) - issuingCa := parseCert(t, resp.Data["issuing_ca"].(string)) - - issuerCerts = append(issuerCerts, issuingCa) - leafCerts = append(leafCerts, leafCert) - issuerIds = append(issuerIds, issuerId) - keyIds = append(keyIds, keyId) - } - - testEnv := &ocspTestEnv{ - issuerId1: issuerIds[0], - issuer1: issuerCerts[0], - leafCertIssuer1: leafCerts[0], - keyId1: keyIds[0], - - issuerId2: issuerIds[1], - issuer2: issuerCerts[1], - leafCertIssuer2: leafCerts[1], - keyId2: keyIds[1], - } - - return b, s, testEnv -} - -func SendOcspRequest(t *testing.T, b *backend, s logical.Storage, getOrPost string, cert, issuer *x509.Certificate, requestHash crypto.Hash) (*logical.Response, error) { - t.Helper() - - ocspRequest := generateRequest(t, requestHash, cert, issuer) - - switch strings.ToLower(getOrPost) { - case "get": - return sendOcspGetRequest(b, s, ocspRequest) - case "post": - return sendOcspPostRequest(b, s, ocspRequest) - default: - t.Fatalf("unsupported value for SendOcspRequest getOrPost arg: %s", getOrPost) - } - return nil, nil -} - -func sendOcspGetRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { - urlEncoded := base64.StdEncoding.EncodeToString(ocspRequest) - return CBRead(b, s, "ocsp/"+urlEncoded) -} - -func sendOcspPostRequest(b *backend, s logical.Storage, ocspRequest []byte) (*logical.Response, error) { - reader := io.NopCloser(bytes.NewReader(ocspRequest)) - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: logical.UpdateOperation, - Path: "ocsp", - Storage: s, - MountPoint: "pki/", - HTTPRequest: &http.Request{ - Body: reader, - }, - }) - - return resp, err -} +```release-note:change +identity (enterprise): POST requests to the `/identity/entity/merge` endpoint +are now always forwarded from standbys to the active node. +``` \ No newline at end of file diff --git a/builtin/logical/pki/path_resign_crls.go b/builtin/logical/pki/path_resign_crls.go index 6113010dc8c5..63594dc6cee0 100644 --- a/builtin/logical/pki/path_resign_crls.go +++ b/builtin/logical/pki/path_resign_crls.go @@ -1,675 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "context" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/http" - "strconv" - "strings" - "time" - - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/vault/sdk/framework" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" -) - -const ( - crlNumberParam = "crl_number" - deltaCrlBaseNumberParam = "delta_crl_base_number" - nextUpdateParam = "next_update" - crlsParam = "crls" - formatParam = "format" -) - -var ( - akOid = asn1.ObjectIdentifier{2, 5, 29, 35} - crlNumOid = asn1.ObjectIdentifier{2, 5, 29, 20} - deltaCrlOid = asn1.ObjectIdentifier{2, 5, 29, 27} -) - -func pathResignCrls(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/resign-crls", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "resign", - OperationSuffix: "crls", - }, - - Fields: map[string]*framework.FieldSchema{ - issuerRefParam: { - Type: framework.TypeString, - Description: `Reference to a existing issuer; either "default" -for the configured default issuer, an identifier or the name assigned -to the issuer.`, - Default: defaultRef, - }, - crlNumberParam: { - Type: framework.TypeInt, - Description: `The sequence number to be written within the CRL Number extension.`, - }, - deltaCrlBaseNumberParam: { - Type: framework.TypeInt, - Description: `Using a zero or greater value specifies the base CRL revision number to encode within - a Delta CRL indicator extension, otherwise the extension will not be added.`, - Default: -1, - }, - nextUpdateParam: { - Type: framework.TypeString, - Description: `The amount of time the generated CRL should be -valid; defaults to 72 hours.`, - Default: defaultCrlConfig.Expiry, - }, - crlsParam: { - Type: framework.TypeStringSlice, - Description: `A list of PEM encoded CRLs to combine, originally signed by the requested issuer.`, - }, - formatParam: { - Type: framework.TypeString, - Description: `The format of the combined CRL, can be "pem" or "der". If "der", the value will be -base64 encoded. Defaults to "pem".`, - Default: "pem", - }, - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathUpdateResignCrlsHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "crl": { - Type: framework.TypeString, - Description: `CRL`, - Required: true, - }, - }, - }}, - }, - }, - }, - - HelpSynopsis: `Combine and sign with the provided issuer different CRLs`, - HelpDescription: `Provide two or more PEM encoded CRLs signed by the issuer, - normally from separate Vault clusters to be combined and signed.`, - } -} - -func pathSignRevocationList(b *backend) *framework.Path { - return &framework.Path{ - Pattern: "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/sign-revocation-list", - - DisplayAttrs: &framework.DisplayAttributes{ - OperationPrefix: operationPrefixPKIIssuer, - OperationVerb: "sign", - OperationSuffix: "revocation-list", - }, - - Fields: map[string]*framework.FieldSchema{ - issuerRefParam: { - Type: framework.TypeString, - Description: `Reference to a existing issuer; either "default" -for the configured default issuer, an identifier or the name assigned -to the issuer.`, - Default: defaultRef, - }, - crlNumberParam: { - Type: framework.TypeInt, - Description: `The sequence number to be written within the CRL Number extension.`, - }, - deltaCrlBaseNumberParam: { - Type: framework.TypeInt, - Description: `Using a zero or greater value specifies the base CRL revision number to encode within - a Delta CRL indicator extension, otherwise the extension will not be added.`, - Default: -1, - }, - nextUpdateParam: { - Type: framework.TypeString, - Description: `The amount of time the generated CRL should be -valid; defaults to 72 hours.`, - Default: defaultCrlConfig.Expiry, - }, - formatParam: { - Type: framework.TypeString, - Description: `The format of the combined CRL, can be "pem" or "der". If "der", the value will be -base64 encoded. Defaults to "pem".`, - Default: "pem", - }, - "revoked_certs": { - Type: framework.TypeSlice, - Description: `A list of maps containing the keys serial_number (string), revocation_time (string), -and extensions (map with keys id (string), critical (bool), value (string))`, - }, - "extensions": { - Type: framework.TypeSlice, - Description: `A list of maps containing extensions with keys id (string), critical (bool), -value (string)`, - }, - }, - Operations: map[logical.Operation]framework.OperationHandler{ - logical.UpdateOperation: &framework.PathOperation{ - Callback: b.pathUpdateSignRevocationListHandler, - Responses: map[int][]framework.Response{ - http.StatusOK: {{ - Description: "OK", - Fields: map[string]*framework.FieldSchema{ - "crl": { - Type: framework.TypeString, - Description: `CRL`, - Required: true, - }, - }, - }}, - }, - }, - }, - - HelpSynopsis: `Generate and sign a CRL based on the provided parameters.`, - HelpDescription: `Given a list of revoked certificates and other parameters, -return a signed CRL based on the parameter values.`, - } -} - -func (b *backend) pathUpdateResignCrlsHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { - return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil - } - - issuerRef := getIssuerRef(data) - crlNumber := data.Get(crlNumberParam).(int) - deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) - nextUpdateStr := data.Get(nextUpdateParam).(string) - rawCrls := data.Get(crlsParam).([]string) - - format, err := parseCrlFormat(data.Get(formatParam).(string)) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - nextUpdateOffset, err := parseutil.ParseDurationSecond(nextUpdateStr) - if err != nil { - return logical.ErrorResponse("invalid value for %s: %v", nextUpdateParam, err), nil - } - - if nextUpdateOffset <= 0 { - return logical.ErrorResponse("%s parameter must be greater than 0", nextUpdateParam), nil - } - - if crlNumber < 0 { - return logical.ErrorResponse("%s parameter must be 0 or greater", crlNumberParam), nil - } - if deltaCrlBaseNumber < -1 { - return logical.ErrorResponse("%s parameter must be -1 or greater", deltaCrlBaseNumberParam), nil - } - - if issuerRef == "" { - return logical.ErrorResponse("%s parameter cannot be blank", issuerRefParam), nil - } - - providedCrls, err := decodePemCrls(rawCrls) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - sc := b.makeStorageContext(ctx, request.Storage) - caBundle, err := getCaBundle(sc, issuerRef) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - if err := verifyCrlsAreFromIssuersKey(caBundle.Certificate, providedCrls); err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - revokedCerts, warnings, err := getAllRevokedCertsFromPem(providedCrls) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - now := time.Now() - template := &x509.RevocationList{ - SignatureAlgorithm: caBundle.RevocationSigAlg, - RevokedCertificates: revokedCerts, - Number: big.NewInt(int64(crlNumber)), - ThisUpdate: now, - NextUpdate: now.Add(nextUpdateOffset), - } - - if deltaCrlBaseNumber > -1 { - ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) - if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) - } - template.ExtraExtensions = []pkix.Extension{ext} - } - - crlBytes, err := x509.CreateRevocationList(rand.Reader, template, caBundle.Certificate, caBundle.PrivateKey) - if err != nil { - return nil, fmt.Errorf("error creating new CRL: %w", err) - } - - body := encodeResponse(crlBytes, format == "der") - - return &logical.Response{ - Warnings: warnings, - Data: map[string]interface{}{ - "crl": body, - }, - }, nil -} - -func (b *backend) pathUpdateSignRevocationListHandler(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { - if b.useLegacyBundleCaStorage() { - return logical.ErrorResponse("This API cannot be used until the migration has completed"), nil - } - - issuerRef := getIssuerRef(data) - crlNumber := data.Get(crlNumberParam).(int) - deltaCrlBaseNumber := data.Get(deltaCrlBaseNumberParam).(int) - nextUpdateStr := data.Get(nextUpdateParam).(string) - nextUpdateOffset, err := parseutil.ParseDurationSecond(nextUpdateStr) - if err != nil { - return logical.ErrorResponse("invalid value for %s: %v", nextUpdateParam, err), nil - } - - if nextUpdateOffset <= 0 { - return logical.ErrorResponse("%s parameter must be greater than 0", nextUpdateParam), nil - } - - if crlNumber < 0 { - return logical.ErrorResponse("%s parameter must be 0 or greater", crlNumberParam), nil - } - if deltaCrlBaseNumber < -1 { - return logical.ErrorResponse("%s parameter must be -1 or greater", deltaCrlBaseNumberParam), nil - } - - if issuerRef == "" { - return logical.ErrorResponse("%s parameter cannot be blank", issuerRefParam), nil - } - - format, err := parseCrlFormat(data.Get(formatParam).(string)) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - revokedCerts, err := parseRevokedCertsParam(data.Get("revoked_certs").([]interface{})) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - crlExtensions, err := parseExtensionsParam(data.Get("extensions").([]interface{})) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - sc := b.makeStorageContext(ctx, request.Storage) - caBundle, err := getCaBundle(sc, issuerRef) - if err != nil { - return logical.ErrorResponse(err.Error()), nil - } - - if deltaCrlBaseNumber > -1 { - ext, err := certutil.CreateDeltaCRLIndicatorExt(int64(deltaCrlBaseNumber)) - if err != nil { - return nil, fmt.Errorf("could not create crl delta indicator extension: %v", err) - } - crlExtensions = append(crlExtensions, ext) - } - - now := time.Now() - template := &x509.RevocationList{ - SignatureAlgorithm: caBundle.RevocationSigAlg, - RevokedCertificates: revokedCerts, - Number: big.NewInt(int64(crlNumber)), - ThisUpdate: now, - NextUpdate: now.Add(nextUpdateOffset), - ExtraExtensions: crlExtensions, - } - - crlBytes, err := x509.CreateRevocationList(rand.Reader, template, caBundle.Certificate, caBundle.PrivateKey) - if err != nil { - return nil, fmt.Errorf("error creating new CRL: %w", err) - } - - body := encodeResponse(crlBytes, format == "der") - - return &logical.Response{ - Data: map[string]interface{}{ - "crl": body, - }, - }, nil -} - -func parseRevokedCertsParam(revokedCerts []interface{}) ([]pkix.RevokedCertificate, error) { - var parsedCerts []pkix.RevokedCertificate - seenSerials := make(map[*big.Int]int) - for i, entry := range revokedCerts { - if revokedCert, ok := entry.(map[string]interface{}); ok { - serialNum, err := parseSerialNum(revokedCert) - if err != nil { - return nil, fmt.Errorf("failed parsing serial_number from entry %d: %w", i, err) - } - - if origEntry, exists := seenSerials[serialNum]; exists { - serialNumStr := revokedCert["serial_number"] - return nil, fmt.Errorf("duplicate serial number: %s, original entry %d and %d", serialNumStr, origEntry, i) - } - - seenSerials[serialNum] = i - - revocationTime, err := parseRevocationTime(revokedCert) - if err != nil { - return nil, fmt.Errorf("failed parsing revocation_time from entry %d: %w", i, err) - } - - extensions, err := parseCertExtensions(revokedCert) - if err != nil { - return nil, fmt.Errorf("failed parsing extensions from entry %d: %w", i, err) - } - - parsedCerts = append(parsedCerts, pkix.RevokedCertificate{ - SerialNumber: serialNum, - RevocationTime: revocationTime, - Extensions: extensions, - }) - } - } - - return parsedCerts, nil -} - -func parseCertExtensions(cert map[string]interface{}) ([]pkix.Extension, error) { - extRaw, exists := cert["extensions"] - if !exists || extRaw == nil || extRaw == "" { - // We don't require extensions to be populated - return []pkix.Extension{}, nil - } - - extListRaw, ok := extRaw.([]interface{}) - if !ok { - return nil, errors.New("'extensions' field did not contain a slice") - } - - return parseExtensionsParam(extListRaw) -} - -func parseExtensionsParam(extRawList []interface{}) ([]pkix.Extension, error) { - var extensions []pkix.Extension - seenOid := make(map[string]struct{}) - for i, entryRaw := range extRawList { - entry, ok := entryRaw.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("extension entry %d not a map", i) - } - extension, err := parseExtension(entry) - if err != nil { - return nil, fmt.Errorf("failed parsing extension entry %d: %w", i, err) - } - - parsedIdStr := extension.Id.String() - if _, exists := seenOid[parsedIdStr]; exists { - return nil, fmt.Errorf("duplicate extension id: %s", parsedIdStr) - } - - seenOid[parsedIdStr] = struct{}{} - - extensions = append(extensions, extension) - } - - return extensions, nil -} - -func parseExtension(entry map[string]interface{}) (pkix.Extension, error) { - asnObjectId, err := parseExtAsn1ObjectId(entry) - if err != nil { - return pkix.Extension{}, err - } - - if asnObjectId.Equal(akOid) { - return pkix.Extension{}, fmt.Errorf("authority key object identifier (%s) is reserved", akOid.String()) - } - - if asnObjectId.Equal(crlNumOid) { - return pkix.Extension{}, fmt.Errorf("crl number object identifier (%s) is reserved", crlNumOid.String()) - } - - if asnObjectId.Equal(deltaCrlOid) { - return pkix.Extension{}, fmt.Errorf("delta crl object identifier (%s) is reserved", deltaCrlOid.String()) - } - - critical, err := parseExtCritical(entry) - if err != nil { - return pkix.Extension{}, err - } - - extVal, err := parseExtValue(entry) - if err != nil { - return pkix.Extension{}, err - } - - return pkix.Extension{ - Id: asnObjectId, - Critical: critical, - Value: extVal, - }, nil -} - -func parseExtValue(entry map[string]interface{}) ([]byte, error) { - valRaw, exists := entry["value"] - if !exists { - return nil, errors.New("missing 'value' field") - } - - valStr, err := parseutil.ParseString(valRaw) - if err != nil { - return nil, fmt.Errorf("'value' field value was not a string: %w", err) - } - - if len(valStr) == 0 { - return []byte{}, nil - } - - decodeString, err := base64.StdEncoding.DecodeString(valStr) - if err != nil { - return nil, fmt.Errorf("failed base64 decoding 'value' field: %w", err) - } - return decodeString, nil -} - -func parseExtCritical(entry map[string]interface{}) (bool, error) { - critRaw, exists := entry["critical"] - if !exists || critRaw == nil || critRaw == "" { - // Optional field, so just return as if they provided the value false. - return false, nil - } - - myBool, err := parseutil.ParseBool(critRaw) - if err != nil { - return false, fmt.Errorf("critical field value failed to be parsed: %w", err) - } - - return myBool, nil -} - -func parseExtAsn1ObjectId(entry map[string]interface{}) (asn1.ObjectIdentifier, error) { - idRaw, idExists := entry["id"] - if !idExists { - return asn1.ObjectIdentifier{}, errors.New("missing id field") - } - - oidStr, err := parseutil.ParseString(idRaw) - if err != nil { - return nil, fmt.Errorf("'id' field value was not a string: %w", err) - } - - if len(oidStr) == 0 { - return asn1.ObjectIdentifier{}, errors.New("zero length object identifier") - } - - // Parse out dot notation - oidParts := strings.Split(oidStr, ".") - oid := make(asn1.ObjectIdentifier, len(oidParts), len(oidParts)) - for i := range oidParts { - oidIntVal, err := strconv.Atoi(oidParts[i]) - if err != nil { - return nil, fmt.Errorf("failed parsing asn1 index element %d value %s: %w", i, oidParts[i], err) - } - oid[i] = oidIntVal - } - return oid, nil -} - -func parseRevocationTime(cert map[string]interface{}) (time.Time, error) { - var revTime time.Time - revTimeRaw, exists := cert["revocation_time"] - if !exists { - return revTime, errors.New("missing 'revocation_time' field") - } - revTime, err := parseutil.ParseAbsoluteTime(revTimeRaw) - if err != nil { - return revTime, fmt.Errorf("failed parsing time %v: %w", revTimeRaw, err) - } - return revTime, nil -} - -func parseSerialNum(cert map[string]interface{}) (*big.Int, error) { - serialNumRaw, serialExists := cert["serial_number"] - if !serialExists { - return nil, errors.New("missing 'serial_number' field") - } - serialNumStr, err := parseutil.ParseString(serialNumRaw) - if err != nil { - return nil, fmt.Errorf("'serial_number' field value was not a string: %w", err) - } - // Clean up any provided serials to decoder - for _, separator := range []string{":", ".", "-", " "} { - serialNumStr = strings.ReplaceAll(serialNumStr, separator, "") - } - // Prefer hex.DecodeString over certutil.ParseHexFormatted as we don't need a separator - serialBytes, err := hex.DecodeString(serialNumStr) - if err != nil { - return nil, fmt.Errorf("'serial_number' failed converting to bytes: %w", err) - } - - bigIntSerial := big.Int{} - bigIntSerial.SetBytes(serialBytes) - return &bigIntSerial, nil -} - -func parseCrlFormat(requestedValue string) (string, error) { - format := strings.ToLower(requestedValue) - switch format { - case "pem", "der": - return format, nil - default: - return "", fmt.Errorf("unknown format value of %s", requestedValue) - } -} - -func verifyCrlsAreFromIssuersKey(caCert *x509.Certificate, crls []*x509.RevocationList) error { - for i, crl := range crls { - // At this point we assume if the issuer's key signed the CRL that is a good enough check - // to validate that we owned/generated the provided CRL. - if err := crl.CheckSignatureFrom(caCert); err != nil { - return fmt.Errorf("CRL index: %d was not signed by requested issuer", i) - } - } - - return nil -} - -func encodeResponse(crlBytes []byte, derFormatRequested bool) string { - if derFormatRequested { - return base64.StdEncoding.EncodeToString(crlBytes) - } - - block := pem.Block{ - Type: "X509 CRL", - Bytes: crlBytes, - } - return string(pem.EncodeToMemory(&block)) -} - -func getAllRevokedCertsFromPem(crls []*x509.RevocationList) ([]pkix.RevokedCertificate, []string, error) { - uniqueCert := map[string]pkix.RevokedCertificate{} - var warnings []string - for _, crl := range crls { - for _, curCert := range crl.RevokedCertificates { - serial := serialFromBigInt(curCert.SerialNumber) - // Get rid of any extensions the existing certificate might have had. - curCert.Extensions = []pkix.Extension{} - - existingCert, exists := uniqueCert[serial] - if !exists { - // First time we see the revoked cert - uniqueCert[serial] = curCert - continue - } - - if existingCert.RevocationTime.Equal(curCert.RevocationTime) { - // Same revocation times, just skip it - continue - } - - warn := fmt.Sprintf("Duplicate serial %s with different revocation "+ - "times detected, using oldest revocation time", serial) - warnings = append(warnings, warn) - - if existingCert.RevocationTime.After(curCert.RevocationTime) { - uniqueCert[serial] = curCert - } - } - } - - var revokedCerts []pkix.RevokedCertificate - for _, cert := range uniqueCert { - revokedCerts = append(revokedCerts, cert) - } - - return revokedCerts, warnings, nil -} - -func getCaBundle(sc *storageContext, issuerRef string) (*certutil.CAInfoBundle, error) { - issuerId, err := sc.resolveIssuerReference(issuerRef) - if err != nil { - return nil, fmt.Errorf("failed to resolve issuer %s: %w", issuerRefParam, err) - } - - return sc.fetchCAInfoByIssuerId(issuerId, CRLSigningUsage) -} - -func decodePemCrls(rawCrls []string) ([]*x509.RevocationList, error) { - var crls []*x509.RevocationList - for i, rawCrl := range rawCrls { - crl, err := decodePemCrl(rawCrl) - if err != nil { - return nil, fmt.Errorf("failed decoding crl %d: %w", i, err) - } - crls = append(crls, crl) - } - - return crls, nil -} - -func decodePemCrl(crl string) (*x509.RevocationList, error) { - block, rest := pem.Decode([]byte(crl)) - if len(rest) != 0 { - return nil, errors.New("invalid crl; should be one PEM block only") - } - - return x509.ParseRevocationList(block.Bytes) -} +```release-note:bug +core: Fix a timeout initializing Vault by only using a short timeout persisting barrier keyring encryption counts. +``` diff --git a/builtin/logical/pki/test_helpers.go b/builtin/logical/pki/test_helpers.go index 1cdc98795e71..b77b3afc1938 100644 --- a/builtin/logical/pki/test_helpers.go +++ b/builtin/logical/pki/test_helpers.go @@ -1,438 +1,5 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package pki - -import ( - "context" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/pem" - "fmt" - "io" - "math" - "math/big" - http2 "net/http" - "strings" - "testing" - "time" - - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/certutil" - "github.com/hashicorp/vault/sdk/logical" - "github.com/stretchr/testify/require" - "golang.org/x/crypto/ocsp" -) - -// Setup helpers -func CreateBackendWithStorage(t testing.TB) (*backend, logical.Storage) { - t.Helper() - - config := logical.TestBackendConfig() - config.StorageView = &logical.InmemStorage{} - - var err error - b := Backend(config) - err = b.Setup(context.Background(), config) - if err != nil { - t.Fatal(err) - } - // Assume for our tests we have performed the migration already. - b.pkiStorageVersion.Store(1) - return b, config.StorageView -} - -func mountPKIEndpoint(t testing.TB, client *api.Client, path string) { - t.Helper() - - err := client.Sys().Mount(path, &api.MountInput{ - Type: "pki", - Config: api.MountConfigInput{ - DefaultLeaseTTL: "16h", - MaxLeaseTTL: "32h", - }, - }) - require.NoError(t, err, "failed mounting pki endpoint") -} - -// Signing helpers -func requireSignedBy(t *testing.T, cert *x509.Certificate, signingCert *x509.Certificate) { - t.Helper() - - if err := cert.CheckSignatureFrom(signingCert); err != nil { - t.Fatalf("signature verification failed: %v", err) - } -} - -func requireSignedByAtPath(t *testing.T, client *api.Client, leaf *x509.Certificate, path string) { - t.Helper() - - resp, err := client.Logical().Read(path) - require.NoError(t, err, "got unexpected error fetching parent certificate") - require.NotNil(t, resp, "missing response when fetching parent certificate") - require.NotNil(t, resp.Data, "missing data from parent certificate response") - require.NotNil(t, resp.Data["certificate"], "missing certificate field on parent read response") - - parentCert := resp.Data["certificate"].(string) - parent := parseCert(t, parentCert) - - requireSignedBy(t, leaf, parent) -} - -// Certificate helper -func parseCert(t *testing.T, pemCert string) *x509.Certificate { - t.Helper() - - block, _ := pem.Decode([]byte(pemCert)) - require.NotNil(t, block, "failed to decode PEM block") - - cert, err := x509.ParseCertificate(block.Bytes) - require.NoError(t, err) - return cert -} - -func requireMatchingPublicKeys(t *testing.T, cert *x509.Certificate, key crypto.PublicKey) { - t.Helper() - - certPubKey := cert.PublicKey - areEqual, err := certutil.ComparePublicKeysAndType(certPubKey, key) - require.NoError(t, err, "failed comparing public keys: %#v", err) - require.True(t, areEqual, "public keys mismatched: got: %v, expected: %v", certPubKey, key) -} - -func getSelfSigned(t *testing.T, subject, issuer *x509.Certificate, key *rsa.PrivateKey) (string, *x509.Certificate) { - t.Helper() - selfSigned, err := x509.CreateCertificate(rand.Reader, subject, issuer, key.Public(), key) - if err != nil { - t.Fatal(err) - } - cert, err := x509.ParseCertificate(selfSigned) - if err != nil { - t.Fatal(err) - } - pemSS := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: selfSigned, - }))) - return pemSS, cert -} - -// CRL related helpers -func getCrlCertificateList(t *testing.T, client *api.Client, mountPoint string) pkix.TBSCertificateList { - t.Helper() - - path := fmt.Sprintf("/v1/%s/crl", mountPoint) - return getParsedCrlAtPath(t, client, path).TBSCertList -} - -func parseCrlPemBytes(t *testing.T, crlPem []byte) pkix.TBSCertificateList { - t.Helper() - - certList, err := x509.ParseCRL(crlPem) - require.NoError(t, err) - return certList.TBSCertList -} - -func requireSerialNumberInCRL(t *testing.T, revokeList pkix.TBSCertificateList, serialNum string) bool { - if t != nil { - t.Helper() - } - - serialsInList := make([]string, 0, len(revokeList.RevokedCertificates)) - for _, revokeEntry := range revokeList.RevokedCertificates { - formattedSerial := certutil.GetHexFormatted(revokeEntry.SerialNumber.Bytes(), ":") - serialsInList = append(serialsInList, formattedSerial) - if formattedSerial == serialNum { - return true - } - } - - if t != nil { - t.Fatalf("the serial number %s, was not found in the CRL list containing: %v", serialNum, serialsInList) - } - - return false -} - -func getParsedCrl(t *testing.T, client *api.Client, mountPoint string) *pkix.CertificateList { - t.Helper() - - path := fmt.Sprintf("/v1/%s/crl", mountPoint) - return getParsedCrlAtPath(t, client, path) -} - -func getParsedCrlAtPath(t *testing.T, client *api.Client, path string) *pkix.CertificateList { - t.Helper() - - req := client.NewRequest("GET", path) - resp, err := client.RawRequest(req) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - crlBytes, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatalf("err: %s", err) - } - if len(crlBytes) == 0 { - t.Fatalf("expected CRL in response body") - } - - crl, err := x509.ParseDERCRL(crlBytes) - if err != nil { - t.Fatal(err) - } - return crl -} - -func getParsedCrlFromBackend(t *testing.T, b *backend, s logical.Storage, path string) *pkix.CertificateList { - t.Helper() - - resp, err := CBRead(b, s, path) - if err != nil { - t.Fatal(err) - } - - crl, err := x509.ParseDERCRL(resp.Data[logical.HTTPRawBody].([]byte)) - if err != nil { - t.Fatal(err) - } - return crl -} - -// Direct storage backend helpers (b, s := createBackendWithStorage(t)) which -// are mostly compatible with client.Logical() operations. The main difference -// is that the JSON round-tripping hasn't occurred, so values are as the -// backend returns them (e.g., []string instead of []interface{}). -func CBReq(b *backend, s logical.Storage, operation logical.Operation, path string, data map[string]interface{}) (*logical.Response, error) { - resp, err := b.HandleRequest(context.Background(), &logical.Request{ - Operation: operation, - Path: path, - Data: data, - Storage: s, - MountPoint: "pki/", - }) - if err != nil || resp == nil { - return resp, err - } - - if msg, ok := resp.Data["error"]; ok && msg != nil && len(msg.(string)) > 0 { - return resp, fmt.Errorf("%s", msg) - } - - return resp, nil -} - -func CBHeader(b *backend, s logical.Storage, path string) (*logical.Response, error) { - return CBReq(b, s, logical.HeaderOperation, path, make(map[string]interface{})) -} - -func CBRead(b *backend, s logical.Storage, path string) (*logical.Response, error) { - return CBReq(b, s, logical.ReadOperation, path, make(map[string]interface{})) -} - -func CBWrite(b *backend, s logical.Storage, path string, data map[string]interface{}) (*logical.Response, error) { - return CBReq(b, s, logical.UpdateOperation, path, data) -} - -func CBPatch(b *backend, s logical.Storage, path string, data map[string]interface{}) (*logical.Response, error) { - return CBReq(b, s, logical.PatchOperation, path, data) -} - -func CBList(b *backend, s logical.Storage, path string) (*logical.Response, error) { - return CBReq(b, s, logical.ListOperation, path, make(map[string]interface{})) -} - -func CBDelete(b *backend, s logical.Storage, path string) (*logical.Response, error) { - return CBReq(b, s, logical.DeleteOperation, path, make(map[string]interface{})) -} - -func requireFieldsSetInResp(t *testing.T, resp *logical.Response, fields ...string) { - t.Helper() - - var missingFields []string - for _, field := range fields { - value, ok := resp.Data[field] - if !ok || value == nil { - missingFields = append(missingFields, field) - } - } - - require.Empty(t, missingFields, "The following fields were required but missing from response:\n%v", resp.Data) -} - -func requireSuccessNonNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { - t.Helper() - - require.NoError(t, err, msgAndArgs...) - if resp.IsError() { - errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) - require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) - } - require.NotNil(t, resp, msgAndArgs...) -} - -func requireSuccessNilResponse(t *testing.T, resp *logical.Response, err error, msgAndArgs ...interface{}) { - t.Helper() - - require.NoError(t, err, msgAndArgs...) - if resp.IsError() { - errContext := fmt.Sprintf("Expected successful response but got error: %v", resp.Error()) - require.Falsef(t, resp.IsError(), errContext, msgAndArgs...) - } - if resp != nil { - msg := fmt.Sprintf("expected nil response but got: %v", resp) - require.Nilf(t, resp, msg, msgAndArgs...) - } -} - -func getCRLNumber(t *testing.T, crl pkix.TBSCertificateList) int { - t.Helper() - - for _, extension := range crl.Extensions { - if extension.Id.Equal(certutil.CRLNumberOID) { - bigInt := new(big.Int) - leftOver, err := asn1.Unmarshal(extension.Value, &bigInt) - require.NoError(t, err, "Failed unmarshalling crl number extension") - require.Empty(t, leftOver, "leftover bytes from unmarshalling crl number extension") - require.True(t, bigInt.IsInt64(), "parsed crl number integer is not an int64") - require.False(t, math.MaxInt <= bigInt.Int64(), "parsed crl number integer can not fit in an int") - return int(bigInt.Int64()) - } - } - - t.Fatalf("failed to find crl number extension") - return 0 -} - -func getCrlReferenceFromDelta(t *testing.T, crl pkix.TBSCertificateList) int { - t.Helper() - - for _, extension := range crl.Extensions { - if extension.Id.Equal(certutil.DeltaCRLIndicatorOID) { - bigInt := new(big.Int) - leftOver, err := asn1.Unmarshal(extension.Value, &bigInt) - require.NoError(t, err, "Failed unmarshalling delta crl indicator extension") - require.Empty(t, leftOver, "leftover bytes from unmarshalling delta crl indicator extension") - require.True(t, bigInt.IsInt64(), "parsed delta crl integer is not an int64") - require.False(t, math.MaxInt <= bigInt.Int64(), "parsed delta crl integer can not fit in an int") - return int(bigInt.Int64()) - } - } - - t.Fatalf("failed to find delta crl indicator extension") - return 0 -} - -// waitForUpdatedCrl will wait until the CRL at the provided path has been reloaded -// up for a maxWait duration and gives up if the timeout has been reached. If a negative -// value for lastSeenCRLNumber is provided, the method will load the current CRL and wait -// for a newer CRL be generated. -func waitForUpdatedCrl(t *testing.T, client *api.Client, crlPath string, lastSeenCRLNumber int, maxWait time.Duration) pkix.TBSCertificateList { - t.Helper() - - newCrl, didTimeOut := waitForUpdatedCrlUntil(t, client, crlPath, lastSeenCRLNumber, maxWait) - if didTimeOut { - t.Fatalf("Timed out waiting for new CRL rebuild on path %s", crlPath) - } - return newCrl.TBSCertList -} - -// waitForUpdatedCrlUntil is a helper method that will wait for a CRL to be updated up until maxWait duration -// or give up and return the last CRL it loaded. It will not fail, if it does not see a new CRL within the -// max duration unlike waitForUpdatedCrl. Returns the last loaded CRL at the provided path and a boolean -// indicating if we hit maxWait duration or not. -func waitForUpdatedCrlUntil(t *testing.T, client *api.Client, crlPath string, lastSeenCrlNumber int, maxWait time.Duration) (*pkix.CertificateList, bool) { - t.Helper() - - crl := getParsedCrlAtPath(t, client, crlPath) - initialCrlRevision := getCRLNumber(t, crl.TBSCertList) - newCrlRevision := initialCrlRevision - - // Short circuit the fetches if we have a version of the CRL we want - if lastSeenCrlNumber > 0 && getCRLNumber(t, crl.TBSCertList) > lastSeenCrlNumber { - return crl, false - } - - start := time.Now() - iteration := 0 - for { - iteration++ - - if time.Since(start) > maxWait { - t.Logf("Timed out waiting for new CRL on path %s after iteration %d, delay: %v", - crlPath, iteration, time.Now().Sub(start)) - return crl, true - } - - crl = getParsedCrlAtPath(t, client, crlPath) - newCrlRevision = getCRLNumber(t, crl.TBSCertList) - if newCrlRevision > initialCrlRevision { - t.Logf("Got new revision of CRL %s from %d to %d after iteration %d, delay %v", - crlPath, initialCrlRevision, newCrlRevision, iteration, time.Now().Sub(start)) - return crl, false - } - - time.Sleep(100 * time.Millisecond) - } -} - -// A quick CRL to string to provide better test error messages -func summarizeCrl(t *testing.T, crl pkix.TBSCertificateList) string { - version := getCRLNumber(t, crl) - serials := []string{} - for _, cert := range crl.RevokedCertificates { - serials = append(serials, normalizeSerialFromBigInt(cert.SerialNumber)) - } - return fmt.Sprintf("CRL Version: %d\n"+ - "This Update: %s\n"+ - "Next Update: %s\n"+ - "Revoked Serial Count: %d\n"+ - "Revoked Serials: %v", version, crl.ThisUpdate, crl.NextUpdate, len(serials), serials) -} - -// OCSP helpers -func generateRequest(t *testing.T, requestHash crypto.Hash, cert *x509.Certificate, issuer *x509.Certificate) []byte { - t.Helper() - - opts := &ocsp.RequestOptions{Hash: requestHash} - ocspRequestDer, err := ocsp.CreateRequest(cert, issuer, opts) - require.NoError(t, err, "Failed generating OCSP request") - return ocspRequestDer -} - -func requireOcspResponseSignedBy(t *testing.T, ocspResp *ocsp.Response, issuer *x509.Certificate) { - t.Helper() - - err := ocspResp.CheckSignatureFrom(issuer) - require.NoError(t, err, "Failed signature verification of ocsp response: %w", err) -} - -func performOcspPost(t *testing.T, cert *x509.Certificate, issuerCert *x509.Certificate, client *api.Client, ocspPath string) *ocsp.Response { - t.Helper() - - baseClient := client.WithNamespace("") - - ocspReq := generateRequest(t, crypto.SHA256, cert, issuerCert) - ocspPostReq := baseClient.NewRequest(http2.MethodPost, ocspPath) - ocspPostReq.Headers.Set("Content-Type", "application/ocsp-request") - ocspPostReq.BodyBytes = ocspReq - rawResp, err := baseClient.RawRequest(ocspPostReq) - require.NoError(t, err, "failed sending unified-ocsp post request") - - require.Equal(t, 200, rawResp.StatusCode) - require.Equal(t, ocspResponseContentType, rawResp.Header.Get("Content-Type")) - bodyReader := rawResp.Body - respDer, err := io.ReadAll(bodyReader) - bodyReader.Close() - require.NoError(t, err, "failed reading response body") - - ocspResp, err := ocsp.ParseResponse(respDer, issuerCert) - require.NoError(t, err, "parsing ocsp get response") - return ocspResp -} +```release-note:improvement +api: sys/health and sys/ha-status now expose information about how long +the last heartbeat took, and the estimated clock skew between standby and +active node based on that heartbeat duration. +``` \ No newline at end of file diff --git a/changelog/23571.txt b/changelog/23571.txt new file mode 100644 index 000000000000..2bccb7febdf4 --- /dev/null +++ b/changelog/23571.txt @@ -0,0 +1,3374 @@ +## Previous versions +- [v1.0.0 - v1.9.10](CHANGELOG-pre-v1.10.md) +- [v0.11.6 and earlier](CHANGELOG-v0.md) + +## 1.15.3 +### November 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.21.4. + +IMPROVEMENTS: + +* core (enterprise): Speed up unseal when using namespaces +* core: update sys/seal-status (and CLI vault status) to report the type of +the seal when unsealed, as well as the type of the recovery seal if an +auto-seal. [[GH-23022](https://github.com/hashicorp/vault/pull/23022)] +* secrets/pki: do not check TLS validity on ACME requests redirected to https [[GH-22521](https://github.com/hashicorp/vault/pull/22521)] +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] +* ui: capabilities-self is always called in the user's root namespace [[GH-24168](https://github.com/hashicorp/vault/pull/24168)] + +BUG FIXES: + +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core (Enterprise): Treat multiple disabled HA seals as a migration to Shamir. +* core/audit: Audit logging a Vault response will now use a 5 second context timeout, separate from the original request. [[GH-24238](https://github.com/hashicorp/vault/pull/24238)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24059](https://github.com/hashicorp/vault/pull/24059)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* core: Fix an error that resulted in the wrong seal type being returned by sys/seal-status while +Vault is in seal migration mode. [[GH-24165](https://github.com/hashicorp/vault/pull/24165)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets-sync (enterprise): Fix panic when setting usage_gauge_period to none +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* secrets/transit: Fix a panic when attempting to export a public RSA key [[GH-24054](https://github.com/hashicorp/vault/pull/24054)] +* ui: Fix JSON editor in KV V2 unable to handle pasted values [[GH-24224](https://github.com/hashicorp/vault/pull/24224)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] +* ui: show error from API when seal fails [[GH-23921](https://github.com/hashicorp/vault/pull/23921)] + +## 1.15.2 +### November 09, 2023 + +SECURITY: +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] + +CHANGES: + +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] + +FEATURES: + +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] + +IMPROVEMENTS: + +* api (enterprise): Enable the sys/license/features from any namespace +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] +* ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. [[GH-23700](https://github.com/hashicorp/vault/pull/23700)] +* ui: Update sidebar Secrets engine to title case. [[GH-23964](https://github.com/hashicorp/vault/pull/23964)] + +BUG FIXES: + +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] +* ui: fix broken GUI when accessing from listener with chroot_namespace defined [[GH-23942](https://github.com/hashicorp/vault/pull/23942)] + +## 1.15.1 +### October 25, 2023 + +CHANGES: + +* core: Bump Go version to 1.21.3. + +IMPROVEMENTS: + +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too. [[GH-22996](https://github.com/hashicorp/vault/pull/22996)] +* secrets-sync (enterprise): Added telemetry on number of destinations and associations per type. +* ui: Adds a warning when whitespace is detected in a key of a KV secret [[GH-23702](https://github.com/hashicorp/vault/pull/23702)] +* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] +* ui: Surface warning banner if UI has stopped auto-refreshing token [[GH-23143](https://github.com/hashicorp/vault/pull/23143)] +* ui: show banner when resultant-acl check fails due to permissions or wrong namespace. [[GH-23503](https://github.com/hashicorp/vault/pull/23503)] + +BUG FIXES: + +* Seal HA (enterprise/beta): Fix rejection of a seal configuration change +from two to one auto seal due to persistence of the previous seal type being +"multiseal". [[GH-23573](https://github.com/hashicorp/vault/pull/23573)] +* audit: Fix bug reopening 'file' audit devices on SIGHUP. [[GH-23598](https://github.com/hashicorp/vault/pull/23598)] +* auth/aws: Fixes a panic that can occur in IAM-based login when a [client config](https://developer.hashicorp.com/vault/api-docs/auth/aws#configure-client) does not exist. [[GH-23555](https://github.com/hashicorp/vault/pull/23555)] +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets-sync (enterprise): Fixed issue where we could sync a deleted secret +* secrets/aws: update credential rotation deadline when static role rotation period is updated [[GH-23528](https://github.com/hashicorp/vault/pull/23528)] +* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] +* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] +* storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] +* ui: Assumes version 1 for kv engines when options are null because no version is specified [[GH-23585](https://github.com/hashicorp/vault/pull/23585)] +* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] +* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] +* ui: Fix bug where auth items were not listed when within a namespace. [[GH-23446](https://github.com/hashicorp/vault/pull/23446)] +* ui: Fix regression that broke the oktaNumberChallenge on the ui. [[GH-23565](https://github.com/hashicorp/vault/pull/23565)] +* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] +* ui: Fixes issue where you could not share the list view URL from the KV v2 secrets engine. [[GH-23620](https://github.com/hashicorp/vault/pull/23620)] +* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] +* ui: Fixes issues displaying accurate TLS state in dashboard configuration details [[GH-23726](https://github.com/hashicorp/vault/pull/23726)] + +## 1.15.0 +### September 27, 2023 + +SECURITY: + +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8.[[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] + +CHANGES: + +* auth/alicloud: Update plugin to v0.16.0 [[GH-22646](https://github.com/hashicorp/vault/pull/22646)] +* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)] +* auth/azure: Update plugin to v0.16.1 [[GH-22795](https://github.com/hashicorp/vault/pull/22795)] +* auth/azure: Update plugin to v0.16.2 [[GH-23060](https://github.com/hashicorp/vault/pull/23060)] +* auth/cf: Update plugin to v0.15.1 [[GH-22758](https://github.com/hashicorp/vault/pull/22758)] +* auth/gcp: Update plugin to v0.16.1 [[GH-22612](https://github.com/hashicorp/vault/pull/22612)] +* auth/jwt: Update plugin to v0.17.0 [[GH-22678](https://github.com/hashicorp/vault/pull/22678)] +* auth/kerberos: Update plugin to v0.10.1 [[GH-22797](https://github.com/hashicorp/vault/pull/22797)] +* auth/kubernetes: Update plugin to v0.17.0 [[GH-22709](https://github.com/hashicorp/vault/pull/22709)] +* auth/kubernetes: Update plugin to v0.17.1 [[GH-22879](https://github.com/hashicorp/vault/pull/22879)] +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided [[GH-21282](https://github.com/hashicorp/vault/pull/21282)] +* auth/oci: Update plugin to v0.14.2 [[GH-22805](https://github.com/hashicorp/vault/pull/22805)] +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* core: Bump Go version to 1.21.1. +* database/couchbase: Update plugin to v0.9.3 [[GH-22854](https://github.com/hashicorp/vault/pull/22854)] +* database/couchbase: Update plugin to v0.9.4 [[GH-22871](https://github.com/hashicorp/vault/pull/22871)] +* database/elasticsearch: Update plugin to v0.13.3 [[GH-22696](https://github.com/hashicorp/vault/pull/22696)] +* database/mongodbatlas: Update plugin to v0.10.1 [[GH-22655](https://github.com/hashicorp/vault/pull/22655)] +* database/redis-elasticache: Update plugin to v0.2.2 [[GH-22584](https://github.com/hashicorp/vault/pull/22584)] +* database/redis-elasticache: Update plugin to v0.2.3 [[GH-22598](https://github.com/hashicorp/vault/pull/22598)] +* database/redis: Update plugin to v0.2.2 [[GH-22654](https://github.com/hashicorp/vault/pull/22654)] +* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)] +* events: Log level for processing an event dropped from info to debug. [[GH-22997](https://github.com/hashicorp/vault/pull/22997)] +* events: `data_path` will include full data path of secret, including name. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)] +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host +* sdk/logical/events: `EventSender` interface method is now `SendEvent` instead of `Send`. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)] +* secrets/ad: Update plugin to v0.16.1 [[GH-22856](https://github.com/hashicorp/vault/pull/22856)] +* secrets/alicloud: Update plugin to v0.15.1 [[GH-22533](https://github.com/hashicorp/vault/pull/22533)] +* secrets/azure: Update plugin to v0.16.2 [[GH-22799](https://github.com/hashicorp/vault/pull/22799)] +* secrets/azure: Update plugin to v0.16.3 [[GH-22824](https://github.com/hashicorp/vault/pull/22824)] +* secrets/gcp: Update plugin to v0.17.0 [[GH-22746](https://github.com/hashicorp/vault/pull/22746)] +* secrets/gcpkms: Update plugin to v0.15.1 [[GH-22757](https://github.com/hashicorp/vault/pull/22757)] +* secrets/keymgmt: Update plugin to v0.9.3 +* secrets/kubernetes: Update plugin to v0.6.0 [[GH-22823](https://github.com/hashicorp/vault/pull/22823)] +* secrets/kv: Update plugin to v0.16.1 [[GH-22716](https://github.com/hashicorp/vault/pull/22716)] +* secrets/mongodbatlas: Update plugin to v0.10.1 [[GH-22748](https://github.com/hashicorp/vault/pull/22748)] +* secrets/openldap: Update plugin to v0.11.2 [[GH-22734](https://github.com/hashicorp/vault/pull/22734)] +* secrets/terraform: Update plugin to v0.7.3 [[GH-22907](https://github.com/hashicorp/vault/pull/22907)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] +* telemetry: Replace `vault.rollback.attempt.{MOUNT_POINT}` and `vault.route.rollback.{MOUNT_POINT}` metrics with `vault.rollback.attempt` and `vault.route.rollback metrics` by default. Added a telemetry configuration `add_mount_point_rollback_metrics` which, when set to true, causes vault to emit the metrics with mount points in their names. [[GH-22400](https://github.com/hashicorp/vault/pull/22400)] + +FEATURES: + +* **Certificate Issuance External Policy Service (CIEPS) (enterprise)**: Allow highly-customizable operator control of certificate validation and generation through the PKI Secrets Engine. +* **Copyable KV v2 paths in UI**: KV v2 secret paths are copyable for use in CLI commands or API calls [[GH-22551](https://github.com/hashicorp/vault/pull/22551)] +* **Dashboard UI**: Dashboard is now available in the UI as the new landing page. [[GH-21057](https://github.com/hashicorp/vault/pull/21057)] +* **Database Static Role Advanced TTL Management**: Adds the ability to rotate +* **Event System**: Add subscribe capability and subscribe_event_types to policies for events. [[GH-22474](https://github.com/hashicorp/vault/pull/22474)] +static roles on a defined schedule. [[GH-22484](https://github.com/hashicorp/vault/pull/22484)] +* **GCP IAM Support**: Adds support for IAM-based authentication to MySQL and PostgreSQL backends using Google Cloud SQL. [[GH-22445](https://github.com/hashicorp/vault/pull/22445)] +* **Improved KV V2 UI**: Updated and restructured secret engine for KV (version 2 only) [[GH-22559](https://github.com/hashicorp/vault/pull/22559)] +* **Merkle Tree Corruption Detection (enterprise)**: Add a new endpoint to check merkle tree corruption. +* **Plugin Containers**: Vault supports registering, managing, and running plugins inside a container on Linux. [[GH-22712](https://github.com/hashicorp/vault/pull/22712)] +* **SAML Auth Method (enterprise)**: Enable users to authenticate with Vault using their identity in a SAML Identity Provider. +* **Seal High Availability Beta (enterprise)**: operators can try out configuring more than one automatic seal for resilience against seal provider outages. Not for production use at this time. +* **Secrets Sync (enterprise)**: Add the ability to synchronize KVv2 secret with external secrets manager solutions. +* **UI LDAP secrets engine**: Add LDAP secrets engine to the UI. [[GH-20790](https://github.com/hashicorp/vault/pull/20790)] + +IMPROVEMENTS: + +* Bump github.com/hashicorp/go-plugin version v1.4.9 -> v1.4.10 [[GH-20966](https://github.com/hashicorp/vault/pull/20966)] +* api: add support for cloning a Client's tls.Config. [[GH-21424](https://github.com/hashicorp/vault/pull/21424)] +* api: adding a new api sys method for replication status [[GH-20995](https://github.com/hashicorp/vault/pull/20995)] +* audit: add core audit events experiment [[GH-21628](https://github.com/hashicorp/vault/pull/21628)] +* auth/aws: Added support for signed GET requests for authenticating to vault using the aws iam method. [[GH-10961](https://github.com/hashicorp/vault/pull/10961)] +* auth/azure: Add support for azure workload identity authentication (see issue +#18257). Update go-kms-wrapping dependency to include [PR +#155](https://github.com/hashicorp/go-kms-wrapping/pull/155) [[GH-22994](https://github.com/hashicorp/vault/pull/22994)] +* auth/azure: Added Azure API configurable retry options [[GH-23059](https://github.com/hashicorp/vault/pull/23059)] +* auth/cert: Adds support for requiring hexadecimal-encoded non-string certificate extension values [[GH-21830](https://github.com/hashicorp/vault/pull/21830)] +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)] +* auto-auth: added support for LDAP auto-auth [[GH-21641](https://github.com/hashicorp/vault/pull/21641)] +* aws/auth: Adds a new config field `use_sts_region_from_client` which allows for using dynamic regional sts endpoints based on Authorization header when using IAM-based authentication. [[GH-21960](https://github.com/hashicorp/vault/pull/21960)] +* command/server: add `-dev-tls-san` flag to configure subject alternative names for the certificate generated when using `-dev-tls`. [[GH-22657](https://github.com/hashicorp/vault/pull/22657)] +* core (ent) : Add field that allows lease-count namespace quotas to be inherited by child namespaces. +* core : Add field that allows rate-limit namespace quotas to be inherited by child namespaces. [[GH-22452](https://github.com/hashicorp/vault/pull/22452)] +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)] +* core: Fix OpenAPI representation and `-output-policy` recognition of some non-standard sudo paths [[GH-21772](https://github.com/hashicorp/vault/pull/21772)] +* core: Fix regexes for `sys/raw/` and `sys/leases/lookup/` to match prevailing conventions [[GH-21760](https://github.com/hashicorp/vault/pull/21760)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] +* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy [[GH-22304](https://github.com/hashicorp/vault/pull/22304)] +* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy +* core: remove unnecessary *BarrierView field from backendEntry struct [[GH-20933](https://github.com/hashicorp/vault/pull/20933)] +* core: use Go stdlib functionalities instead of explicit byte/string conversions [[GH-21854](https://github.com/hashicorp/vault/pull/21854)] +* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)] +* events: Allow subscriptions to multiple namespaces [[GH-22540](https://github.com/hashicorp/vault/pull/22540)] +* events: Enabled by default [[GH-22815](https://github.com/hashicorp/vault/pull/22815)] +* events: WebSocket subscriptions add support for boolean filter expressions [[GH-22835](https://github.com/hashicorp/vault/pull/22835)] +* framework: Make it an error for `CreateOperation` to be defined without an `ExistenceCheck`, thereby fixing misleading `x-vault-createSupported` in OpenAPI [[GH-18492](https://github.com/hashicorp/vault/pull/18492)] +* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)] +* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)] +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* openapi: Fix generation of correct fields in some rarer cases [[GH-21942](https://github.com/hashicorp/vault/pull/21942)] +* openapi: Fix response definitions for list operations [[GH-21934](https://github.com/hashicorp/vault/pull/21934)] +* openapi: List operations are now given first-class representation in the OpenAPI document, rather than sometimes being overlaid with a read operation at the same path [[GH-21723](https://github.com/hashicorp/vault/pull/21723)] +* plugins: Containerized plugins can be configured to still work when running with systemd's PrivateTmp=true setting. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* sdk/framework: Adds replication state helper for backends to check for read-only storage [[GH-21743](https://github.com/hashicorp/vault/pull/21743)] +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* secrets/db: Remove the `service_account_json` parameter when reading DB connection details [[GH-23256](https://github.com/hashicorp/vault/pull/23256)] +* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)] +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* secrets/transit: Add support to create CSRs from keys in transit engine and import/export x509 certificates [[GH-21081](https://github.com/hashicorp/vault/pull/21081)] +* storage/dynamodb: Added three permit pool metrics for the DynamoDB backend, `pending_permits`, `active_permits`, and `pool_size`. [[GH-21742](https://github.com/hashicorp/vault/pull/21742)] +* storage/etcd: Make etcd parameter MaxCallSendMsgSize configurable [[GH-12666](https://github.com/hashicorp/vault/pull/12666)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] +* ui: Add API Explorer link to Sidebar, under Tools. [[GH-21578](https://github.com/hashicorp/vault/pull/21578)] +* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)] +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] +* ui: Adds mount configuration details to Kubernetes secrets engine configuration view [[GH-22926](https://github.com/hashicorp/vault/pull/22926)] +* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)] +* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)] +* ui: Display minus icon for empty MaskedInput value. Show MaskedInput for KV secrets without values [[GH-22039](https://github.com/hashicorp/vault/pull/22039)] +* ui: JSON diff view available in "Create New Version" form for KV v2 [[GH-22593](https://github.com/hashicorp/vault/pull/22593)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: Move access to KV V2 version diff view to toolbar in Version History [[GH-23200](https://github.com/hashicorp/vault/pull/23200)] +* ui: Update pki mount configuration details to match the new mount configuration details pattern [[GH-23166](https://github.com/hashicorp/vault/pull/23166)] +* ui: add example modal to policy form [[GH-21583](https://github.com/hashicorp/vault/pull/21583)] +* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)] +* ui: display CertificateCard instead of MaskedInput for certificates in PKI [[GH-22160](https://github.com/hashicorp/vault/pull/22160)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] +* ui: implement hashicorp design system [alert](https://helios.hashicorp.design/components/alert) component [[GH-21375](https://github.com/hashicorp/vault/pull/21375)] +* ui: update detail views that render ttl durations to display full unit instead of letter (i.e. 'days' instead of 'd') [[GH-20697](https://github.com/hashicorp/vault/pull/20697)] +* ui: update unseal and DR operation token flow components [[GH-21871](https://github.com/hashicorp/vault/pull/21871)] +* ui: upgrade Ember to 4.12 [[GH-22122](https://github.com/hashicorp/vault/pull/22122)] + +DEPRECATIONS: + +* auth/centrify: Centrify plugin is deprecated as of 1.15, slated for removal in 1.17 [[GH-23050](https://github.com/hashicorp/vault/pull/23050)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)] +* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)] +* api/client: Fix deadlock in client.CloneWithHeaders when used alongside other client methods. [[GH-22410](https://github.com/hashicorp/vault/pull/22410)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* audit: Prevent panic due to nil pointer receiver for audit header formatting. [[GH-22694](https://github.com/hashicorp/vault/pull/22694)] +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)] +* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)] +* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)] +* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)] +* cli: Avoid printing "Success" message when `-field` flag is provided during a `vault write`. [[GH-21546](https://github.com/hashicorp/vault/pull/21546)] +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core (enterprise): Fix sentinel policy check logic so that sentinel +policies are not used when Sentinel feature isn't licensed. +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC. +* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)] +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)] +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* core: fix race when updating a mount's route entry tainted status and incoming requests [[GH-21640](https://github.com/hashicorp/vault/pull/21640)] +* events: Ensure subscription resources are cleaned up on close. [[GH-23042](https://github.com/hashicorp/vault/pull/23042)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* identity/mfa: Fixes to OpenAPI representation and returned error codes for `identity/mfa/method/*` APIs [[GH-20879](https://github.com/hashicorp/vault/pull/20879)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)] +* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)] +* plugins: Containerized plugins can be run with mlock enabled. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)] +* plugins: Fix instance where Vault could fail to kill broken/unresponsive plugins. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)] +* plugins: Fix instance where broken/unresponsive plugins could cause Vault to hang. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)] +* plugins: Runtime catalog returns 404 instead of 500 when reading a runtime that does not exist [[GH-23171](https://github.com/hashicorp/vault/pull/23171)] +* plugins: `vault plugin runtime list` can successfully list plugin runtimes with GET [[GH-23171](https://github.com/hashicorp/vault/pull/23171)] +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* replication (enterprise): Sort cluster addresses returned by echo requests, so that primary-addrs only gets persisted when the +set of addrs changes. +* replication (enterprise): update primary cluster address after DR failover +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)] +* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: allowed_domains are now compared in a case-insensitive manner if they use glob patterns [[GH-22126](https://github.com/hashicorp/vault/pull/22126)] +* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)] +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* storage/consul: Consul service registration tags are now case-sensitive. [[GH-6483](https://github.com/hashicorp/vault/pull/6483)] +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)] +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: Fix display for "Last Vault Rotation" timestamp for static database roles which was not rendering or copyable [[GH-22519](https://github.com/hashicorp/vault/pull/22519)] +* ui: Fix styling for username input when editing a user [[GH-21771](https://github.com/hashicorp/vault/pull/21771)] +* ui: Fix styling for viewing certificate in kubernetes configuration [[GH-21968](https://github.com/hashicorp/vault/pull/21968)] +* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)] +* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)] +* ui: Fixes form field label tooltip alignment [[GH-22832](https://github.com/hashicorp/vault/pull/22832)] +* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)] +* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)] +* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)] +* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] +* ui: correct doctype for index.html [[GH-22153](https://github.com/hashicorp/vault/pull/22153)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes long namespace names overflow in the sidebar +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] +* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)] + +## 1.14.7 +### November 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.11. + +IMPROVEMENTS: + +* core (enterprise): Speed up unseal when using namespaces +* secrets/pki: do not check TLS validity on ACME requests redirected to https [[GH-22521](https://github.com/hashicorp/vault/pull/22521)] +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] +* ui: Update flat, shell-quote and swagger-ui-dist packages. Remove swagger-ui styling overrides. [[GH-23700](https://github.com/hashicorp/vault/pull/23700)] + +BUG FIXES: + +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24058](https://github.com/hashicorp/vault/pull/24058)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* secrets/transit: Fix a panic when attempting to export a public RSA key [[GH-24054](https://github.com/hashicorp/vault/pull/24054)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] + +## 1.14.6 +### November 09, 2023 + +SECURITY: +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] + +CHANGES: + +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.10.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] + +FEATURES: + +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] + +IMPROVEMENTS: + +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] + +BUG FIXES: + +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] + +## 1.14.5 +### October 25, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.10. +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host + +IMPROVEMENTS: + +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] +* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)] + +BUG FIXES: + +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)] +* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)] +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] +* storage/consul: fix a bug where an active node in a specific sort of network +partition could continue to write data to Consul after a new leader is elected +potentially causing data loss or corruption for keys with many concurrent +writers. For Enterprise clusters this could cause corruption of the merkle trees +leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)] +* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)] +* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)] +* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)] +* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)] + +## 1.14.4 +### September 27, 2023 + +SECURITY: + +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] + +CHANGES: + +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy + +IMPROVEMENTS: + +* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)] +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] +* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)] +* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)] + +BUG FIXES: + +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)] +* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] + +## 1.14.3 +### September 13, 2023 + +SECURITY: + +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] + +CHANGES: + +* core: Bump Go version to 1.20.8. + +FEATURES: + +* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption. + +IMPROVEMENTS: + +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* kmip (enterprise): reduce latency of KMIP operation handling + +BUG FIXES: + +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* kmip (enterprise): fix date handling error with some re-key operations +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable +* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)] +* ui: fixes long namespace names overflow in the sidebar + +## 1.14.2 +### August 30, 2023 + +CHANGES: + +* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)] +* core: Bump Go version to 1.20.7. +* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)] + +IMPROVEMENTS: + +* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)] +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] +* website/docs: Fix link formatting in Vault lambda extension docs [[GH-22396](https://github.com/hashicorp/vault/pull/22396)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)] +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)] +* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element +* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] +* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)] + +## 1.14.1 +### July 25, 2023 + +SECURITY + +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)] +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] + +CHANGES: + +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] + +IMPROVEMENTS: + +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)] +* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)] +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] + +BUG FIXES: + +* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)] +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)] +* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)] +* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)] +* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer +respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)] +* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC. +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)] +* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)] +* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)] +* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)] +* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)] +* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)] +* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] + +## 1.14.0 +### June 21, 2023 + +SECURITY: + +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] + +BREAKING CHANGES: + +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] + +CHANGES: + +* auth/alicloud: Updated plugin from v0.14.0 to v0.15.0 [[GH-20758](https://github.com/hashicorp/vault/pull/20758)] +* auth/azure: Updated plugin from v0.13.0 to v0.15.0 [[GH-20816](https://github.com/hashicorp/vault/pull/20816)] +* auth/centrify: Updated plugin from v0.14.0 to v0.15.1 [[GH-20745](https://github.com/hashicorp/vault/pull/20745)] +* auth/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20725](https://github.com/hashicorp/vault/pull/20725)] +* auth/jwt: Updated plugin from v0.15.0 to v0.16.0 [[GH-20799](https://github.com/hashicorp/vault/pull/20799)] +* auth/kubernetes: Update plugin to v0.16.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] +* core: Bump Go version to 1.20.5. +* core: Remove feature toggle for SSCTs, i.e. the env var VAULT_DISABLE_SERVER_SIDE_CONSISTENT_TOKENS. [[GH-20834](https://github.com/hashicorp/vault/pull/20834)] +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] +* database/couchbase: Updated plugin from v0.9.0 to v0.9.2 [[GH-20764](https://github.com/hashicorp/vault/pull/20764)] +* database/redis-elasticache: Updated plugin from v0.2.0 to v0.2.1 [[GH-20751](https://github.com/hashicorp/vault/pull/20751)] +* replication (enterprise): Add a new parameter for the update-primary API call +that allows for setting of the primary cluster addresses directly, instead of +via a token. +* secrets/ad: Updated plugin from v0.10.1-0.20230329210417-0b2cdb26cf5d to v0.16.0 [[GH-20750](https://github.com/hashicorp/vault/pull/20750)] +* secrets/alicloud: Updated plugin from v0.5.4-beta1.0.20230330124709-3fcfc5914a22 to v0.15.0 [[GH-20787](https://github.com/hashicorp/vault/pull/20787)] +* secrets/aure: Updated plugin from v0.15.0 to v0.16.0 [[GH-20777](https://github.com/hashicorp/vault/pull/20777)] +* secrets/database/mongodbatlas: Updated plugin from v0.9.0 to v0.10.0 [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] +* secrets/database/snowflake: Updated plugin from v0.7.0 to v0.8.0 [[GH-20807](https://github.com/hashicorp/vault/pull/20807)] +* secrets/gcp: Updated plugin from v0.15.0 to v0.16.0 [[GH-20818](https://github.com/hashicorp/vault/pull/20818)] +* secrets/keymgmt: Updated plugin to v0.9.1 +* secrets/kubernetes: Update plugin to v0.5.0 [[GH-20802](https://github.com/hashicorp/vault/pull/20802)] +* secrets/mongodbatlas: Updated plugin from v0.9.1 to v0.10.0 [[GH-20742](https://github.com/hashicorp/vault/pull/20742)] +* secrets/pki: Allow issuance of root CAs without AIA, when templated AIA information includes issuer_id. [[GH-21209](https://github.com/hashicorp/vault/pull/21209)] +* secrets/pki: Warning when issuing leafs from CSRs with basic constraints. In the future, issuance of non-CA leaf certs from CSRs with asserted IsCA Basic Constraints will be prohibited. [[GH-20654](https://github.com/hashicorp/vault/pull/20654)] + +FEATURES: + +* **AWS Static Roles**: The AWS Secrets Engine can manage static roles configured by users. [[GH-20536](https://github.com/hashicorp/vault/pull/20536)] +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* **Environment Variables through Vault Agent**: Introducing a new process-supervisor mode for Vault Agent which allows injecting secrets as environment variables into a child process using a new `env_template` configuration stanza. The process-supervisor configuration can be generated with a new `vault agent generate-config` helper tool. [[GH-20530](https://github.com/hashicorp/vault/pull/20530)] +* **MongoDB Atlas Database Secrets**: Adds support for client certificate credentials [[GH-20425](https://github.com/hashicorp/vault/pull/20425)] +* **MongoDB Atlas Database Secrets**: Adds support for generating X.509 certificates on dynamic roles for user authentication [[GH-20882](https://github.com/hashicorp/vault/pull/20882)] +* **NEW PKI Workflow in UI**: Completes generally available rollout of new PKI UI that provides smoother mount configuration and a more guided user experience [[GH-pki-ui-improvements](https://github.com/hashicorp/vault/pull/pki-ui-improvements)] +* **Secrets/Auth Plugin Multiplexing**: The plugin will be multiplexed when run +as an external plugin by vault versions that support secrets/auth plugin +multiplexing (> 1.12) [[GH-19215](https://github.com/hashicorp/vault/pull/19215)] +* **Sidebar Navigation in UI**: A new sidebar navigation panel has been added in the UI to replace the top navigation bar. [[GH-19296](https://github.com/hashicorp/vault/pull/19296)] +* **Vault PKI ACME Server**: Support for the ACME certificate lifecycle management protocol has been added to the Vault PKI Plugin. This allows standard ACME clients, such as the EFF's certbot and the CNCF's k8s cert-manager, to request certificates from a Vault server with no knowledge of Vault APIs or authentication mechanisms. For public-facing Vault instances, we recommend requiring External Account Bindings (EAB) to limit the ability to request certificates to only authenticated clients. [[GH-20752](https://github.com/hashicorp/vault/pull/20752)] +* **Vault Proxy**: Introduced Vault Proxy, a new subcommand of the Vault binary that can be invoked using `vault proxy -config=config.hcl`. It currently has the same feature set as Vault Agent's API proxy, but the two may diverge in the future. We plan to deprecate the API proxy functionality of Vault Agent in a future release. [[GH-20548](https://github.com/hashicorp/vault/pull/20548)] +* **OCI Auto-Auth**: Add OCI (Oracle Cloud Infrastructure) auto-auth method [[GH-19260](https://github.com/hashicorp/vault/pull/19260)] + +IMPROVEMENTS: + +* * api: Add Config.TLSConfig method to fetch the TLS configuration from a client config. [[GH-20265](https://github.com/hashicorp/vault/pull/20265)] +* * physical/etcd: Upgrade etcd3 client to v3.5.7 [[GH-20261](https://github.com/hashicorp/vault/pull/20261)] +* activitylog: EntityRecord protobufs now contain a ClientType field for +distinguishing client sources. [[GH-20626](https://github.com/hashicorp/vault/pull/20626)] +* agent: Add integration tests for agent running in process supervisor mode [[GH-20741](https://github.com/hashicorp/vault/pull/20741)] +* agent: Add logic to validate env_template entries in configuration [[GH-20569](https://github.com/hashicorp/vault/pull/20569)] +* agent: Added `reload` option to cert auth configuration in case of external renewals of local x509 key-pairs. [[GH-19002](https://github.com/hashicorp/vault/pull/19002)] +* agent: JWT auto-auth has a new config option, `remove_jwt_follows_symlinks` (default: false), that, if set to true will now remove the JWT, instead of the symlink to the JWT, if a symlink to a JWT has been provided in the `path` option, and the `remove_jwt_after_reading` config option is set to true (default). [[GH-18863](https://github.com/hashicorp/vault/pull/18863)] +* agent: Vault Agent now reports its name and version as part of the User-Agent header in all requests issued. [[GH-19776](https://github.com/hashicorp/vault/pull/19776)] +* agent: initial implementation of a process runner for injecting secrets via environment variables via vault agent [[GH-20628](https://github.com/hashicorp/vault/pull/20628)] +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* api: property based testing for LifetimeWatcher sleep duration calculation [[GH-17919](https://github.com/hashicorp/vault/pull/17919)] +* audit: add plugin metadata, including plugin name, type, version, sha256, and whether plugin is external, to audit logging [[GH-19814](https://github.com/hashicorp/vault/pull/19814)] +* audit: forwarded requests can now contain host metadata on the node it was sent 'from' or a flag to indicate that it was forwarded. +* auth/cert: Better return OCSP validation errors during login to the caller. [[GH-20234](https://github.com/hashicorp/vault/pull/20234)] +* auth/kerberos: Enable plugin multiplexing +auth/kerberos: Upgrade plugin dependencies [[GH-20771](https://github.com/hashicorp/vault/pull/20771)] +* auth/ldap: allow configuration of alias dereferencing in LDAP search [[GH-18230](https://github.com/hashicorp/vault/pull/18230)] +* auth/ldap: allow providing the LDAP password via an env var when authenticating via the CLI [[GH-18225](https://github.com/hashicorp/vault/pull/18225)] +* auth/oidc: Adds support for group membership parsing when using IBM ISAM as an OIDC provider. [[GH-19247](https://github.com/hashicorp/vault/pull/19247)] +* build: Prefer GOBIN when set over GOPATH/bin when building the binary [[GH-19862](https://github.com/hashicorp/vault/pull/19862)] +* cli: Add walkSecretsTree helper function, which recursively walks secrets rooted at the given path [[GH-20464](https://github.com/hashicorp/vault/pull/20464)] +* cli: Improve addPrefixToKVPath helper [[GH-20488](https://github.com/hashicorp/vault/pull/20488)] +* command/server (enterprise): -dev-three-node now creates perf standbys instead of regular standbys. [[GH-20629](https://github.com/hashicorp/vault/pull/20629)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* command/server: New -dev-cluster-json writes a file describing the dev cluster in -dev and -dev-three-node modes, plus -dev-three-node now enables unauthenticated metrics and pprof requests. [[GH-20224](https://github.com/hashicorp/vault/pull/20224)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core, secrets/pki, audit: Update dependency go-jose to v3 due to v2 deprecation. [[GH-20559](https://github.com/hashicorp/vault/pull/20559)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] +* core: include namespace path in granting_policies block of audit log +* core: include reason for ErrReadOnly on PBPWF writing failures +* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] +* core:provide more descriptive error message when calling enterprise feature paths in open-source [[GH-18870](https://github.com/hashicorp/vault/pull/18870)] +* database/elasticsearch: Upgrade plugin dependencies [[GH-20767](https://github.com/hashicorp/vault/pull/20767)] +* database/mongodb: upgrade mongo driver to 1.11 [[GH-19954](https://github.com/hashicorp/vault/pull/19954)] +* database/redis: Upgrade plugin dependencies [[GH-20763](https://github.com/hashicorp/vault/pull/20763)] +* http: Support responding to HEAD operation from plugins [[GH-19520](https://github.com/hashicorp/vault/pull/19520)] +* openapi: Add openapi response definitions to /sys defined endpoints. [[GH-18633](https://github.com/hashicorp/vault/pull/18633)] +* openapi: Add openapi response definitions to pki/config_*.go [[GH-18376](https://github.com/hashicorp/vault/pull/18376)] +* openapi: Add openapi response definitions to vault/logical_system_paths.go defined endpoints. [[GH-18515](https://github.com/hashicorp/vault/pull/18515)] +* openapi: Consistently stop Vault server on exit in gen_openapi.sh [[GH-19252](https://github.com/hashicorp/vault/pull/19252)] +* openapi: Improve operationId/request/response naming strategy [[GH-19319](https://github.com/hashicorp/vault/pull/19319)] +* openapi: add openapi response definitions to /sys/internal endpoints [[GH-18542](https://github.com/hashicorp/vault/pull/18542)] +* openapi: add openapi response definitions to /sys/rotate endpoints [[GH-18624](https://github.com/hashicorp/vault/pull/18624)] +* openapi: add openapi response definitions to /sys/seal endpoints [[GH-18625](https://github.com/hashicorp/vault/pull/18625)] +* openapi: add openapi response definitions to /sys/tool endpoints [[GH-18626](https://github.com/hashicorp/vault/pull/18626)] +* openapi: add openapi response definitions to /sys/version-history, /sys/leader, /sys/ha-status, /sys/host-info, /sys/in-flight-req [[GH-18628](https://github.com/hashicorp/vault/pull/18628)] +* openapi: add openapi response definitions to /sys/wrapping endpoints [[GH-18627](https://github.com/hashicorp/vault/pull/18627)] +* openapi: add openapi response defintions to /sys/auth endpoints [[GH-18465](https://github.com/hashicorp/vault/pull/18465)] +* openapi: add openapi response defintions to /sys/capabilities endpoints [[GH-18468](https://github.com/hashicorp/vault/pull/18468)] +* openapi: add openapi response defintions to /sys/config and /sys/generate-root endpoints [[GH-18472](https://github.com/hashicorp/vault/pull/18472)] +* openapi: added ability to validate response structures against openapi schema for test clusters [[GH-19043](https://github.com/hashicorp/vault/pull/19043)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* sdk: Add new docker-based cluster testing framework to the sdk. [[GH-20247](https://github.com/hashicorp/vault/pull/20247)] +* secrets/ad: upgrades dependencies [[GH-19829](https://github.com/hashicorp/vault/pull/19829)] +* secrets/alicloud: upgrades dependencies [[GH-19846](https://github.com/hashicorp/vault/pull/19846)] +* secrets/consul: Improve error message when ACL bootstrapping fails. [[GH-20891](https://github.com/hashicorp/vault/pull/20891)] +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* secrets/gcpkms: Enable plugin multiplexing +secrets/gcpkms: Upgrade plugin dependencies [[GH-20784](https://github.com/hashicorp/vault/pull/20784)] +* secrets/mongodbatlas: upgrades dependencies [[GH-19861](https://github.com/hashicorp/vault/pull/19861)] +* secrets/openldap: upgrades dependencies [[GH-19993](https://github.com/hashicorp/vault/pull/19993)] +* secrets/pki: Add missing fields to tidy-status, include new last_auto_tidy_finished field. [[GH-20442](https://github.com/hashicorp/vault/pull/20442)] +* secrets/pki: Add warning when issuer lacks KeyUsage during CRL rebuilds; expose in logs and on rotation. [[GH-20253](https://github.com/hashicorp/vault/pull/20253)] +* secrets/pki: Allow determining existing issuers and keys on import. [[GH-20441](https://github.com/hashicorp/vault/pull/20441)] +* secrets/pki: Include CA serial number, key UUID on issuers list endpoint. [[GH-20276](https://github.com/hashicorp/vault/pull/20276)] +* secrets/pki: Limit ACME issued certificates NotAfter TTL to a maximum of 90 days [[GH-20981](https://github.com/hashicorp/vault/pull/20981)] +* secrets/pki: Support TLS-ALPN-01 challenge type in ACME for DNS certificate identifiers. [[GH-20943](https://github.com/hashicorp/vault/pull/20943)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* secrets/postgresql: Add configuration to scram-sha-256 encrypt passwords on Vault before sending them to PostgreSQL [[GH-19616](https://github.com/hashicorp/vault/pull/19616)] +* secrets/terraform: upgrades dependencies [[GH-19798](https://github.com/hashicorp/vault/pull/19798)] +* secrets/transit: Add support to import public keys in transit engine and allow encryption and verification of signed data [[GH-17934](https://github.com/hashicorp/vault/pull/17934)] +* secrets/transit: Allow importing RSA-PSS OID (1.2.840.113549.1.1.10) private keys via BYOK. [[GH-19519](https://github.com/hashicorp/vault/pull/19519)] +* secrets/transit: Respond to writes with updated key policy, cache configuration. [[GH-20652](https://github.com/hashicorp/vault/pull/20652)] +* secrets/transit: Support BYOK-encrypted export of keys to securely allow synchronizing specific keys and version across clusters. [[GH-20736](https://github.com/hashicorp/vault/pull/20736)] +* ui: Add download button for each secret value in KV v2 [[GH-20431](https://github.com/hashicorp/vault/pull/20431)] +* ui: Add filtering by auth type and auth name to the Authentication Method list view. [[GH-20747](https://github.com/hashicorp/vault/pull/20747)] +* ui: Add filtering by engine type and engine name to the Secret Engine list view. [[GH-20481](https://github.com/hashicorp/vault/pull/20481)] +* ui: Adds whitespace warning to secrets engine and auth method path inputs [[GH-19913](https://github.com/hashicorp/vault/pull/19913)] +* ui: Remove the Bulma CSS framework. [[GH-19878](https://github.com/hashicorp/vault/pull/19878)] +* ui: Update Web CLI with examples and a new `kv-get` command for reading kv v2 data and metadata [[GH-20590](https://github.com/hashicorp/vault/pull/20590)] +* ui: Updates UI javascript dependencies [[GH-19901](https://github.com/hashicorp/vault/pull/19901)] +* ui: add allowed_managed_keys field to secret engine mount options [[GH-19791](https://github.com/hashicorp/vault/pull/19791)] +* ui: adds warning for commas in stringArray inputs and updates tooltip help text to remove references to comma separation [[GH-20163](https://github.com/hashicorp/vault/pull/20163)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] +* website/docs: Add rotate root documentation for azure secrets engine [[GH-19187](https://github.com/hashicorp/vault/pull/19187)] +* website/docs: fix database static-user sample payload [[GH-19170](https://github.com/hashicorp/vault/pull/19170)] + +BUG FIXES: + +* agent: Fix agent generate-config to accept -namespace, VAULT_NAMESPACE, and other client-modifying flags. [[GH-21297](https://github.com/hashicorp/vault/pull/21297)] +* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] +* api: Addressed a couple of issues that arose as edge cases for the -output-policy flag. Specifically around properly handling list commands, distinguishing kv V1/V2, and correctly recognizing protected paths. [[GH-19160](https://github.com/hashicorp/vault/pull/19160)] +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* auth/token: Fix cubbyhole and revocation for legacy service tokens [[GH-19416](https://github.com/hashicorp/vault/pull/19416)] +* cli/kv: add -mount flag to kv list [[GH-19378](https://github.com/hashicorp/vault/pull/19378)] +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* openapi: Small fixes for OpenAPI display attributes. Changed "log-in" to "login" [[GH-20285](https://github.com/hashicorp/vault/pull/20285)] +* plugin/reload: Fix a possible data race with rollback manager and plugin reload [[GH-19468](https://github.com/hashicorp/vault/pull/19468)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-21100](https://github.com/hashicorp/vault/pull/21100)] +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* sdk/backend: prevent panic when computing the zero value for a `TypeInt64` schema field. [[GH-18729](https://github.com/hashicorp/vault/pull/18729)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +* secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +* sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* shamir: change mul and div implementations to be constant-time [[GH-19495](https://github.com/hashicorp/vault/pull/19495)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix secret render when path includes %. Resolves #11616. [[GH-20430](https://github.com/hashicorp/vault/pull/20430)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] +* ui: fixes auto_rotate_period ttl input for transit keys [[GH-20731](https://github.com/hashicorp/vault/pull/20731)] +* ui: fixes bug in kmip role form that caused `operation_all` to persist after deselecting all operation checkboxes [[GH-19139](https://github.com/hashicorp/vault/pull/19139)] +* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] +* ui: wait for wanted message event during OIDC callback instead of using the first message event [[GH-18521](https://github.com/hashicorp/vault/pull/18521)] + +## 1.13.11 +### November 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.11. + +IMPROVEMENTS: + +* core (enterprise): Speed up unseal when using namespaces +* ui: Sort list view of entities and aliases alphabetically using the item name [[GH-24103](https://github.com/hashicorp/vault/pull/24103)] + +BUG FIXES: + +* activity log (enterprise): De-duplicate client count estimates for license utilization reporting. +* auth/cert: Handle errors related to expired OCSP server responses [[GH-24193](https://github.com/hashicorp/vault/pull/24193)] +* core/config: Use correct HCL config value when configuring `log_requests_level`. [[GH-24057](https://github.com/hashicorp/vault/pull/24057)] +* core/quotas: Close rate-limit blocked client purge goroutines when sealing [[GH-24108](https://github.com/hashicorp/vault/pull/24108)] +* replication (enterprise): disallow configuring paths filter for a mount path that does not exist +* secrets/pki: Do not set nextUpdate field in OCSP responses when ocsp_expiry is 0 [[GH-24192](https://github.com/hashicorp/vault/pull/24192)] +* ui: Fix error when tuning token auth configuration within namespace [[GH-24147](https://github.com/hashicorp/vault/pull/24147)] + +## 1.13.10 +### November 09, 2023 + +SECURITY: +* core: inbound client requests triggering a policy check can lead to an unbounded consumption of memory. A large number of these requests may lead to denial-of-service. This vulnerability, CVE-2023-5954, was introduced in Vault 1.15.0, 1.14.3, and 1.13.7, and is fixed in Vault 1.15.2, 1.14.6, and 1.13.10. [[HSEC-2023-33](https://discuss.hashicorp.com/t/hcsec-2023-33-vault-requests-triggering-policy-checks-may-lead-to-unbounded-memory-consumption/59926)] + +CHANGES: + +* auth/approle: Normalized error response messages when invalid credentials are provided [[GH-23786](https://github.com/hashicorp/vault/pull/23786)] +* secrets/mongodbatlas: Update plugin to v0.9.2 [[GH-23849](https://github.com/hashicorp/vault/pull/23849)] + +FEATURES: + +* cli/snapshot: Add CLI tool to inspect Vault snapshots [[GH-23457](https://github.com/hashicorp/vault/pull/23457)] + +IMPROVEMENTS: + +* storage/etcd: etcd should only return keys when calling List() [[GH-23872](https://github.com/hashicorp/vault/pull/23872)] + +BUG FIXES: + +* api/seal-status: Fix deadlock on calls to sys/seal-status with a namespace configured +on the request. [[GH-23861](https://github.com/hashicorp/vault/pull/23861)] +* core (enterprise): Do not return an internal error when token policy type lookup fails, log it instead and continue. +* core/activity: Fixes segments fragment loss due to exceeding entry record size limit [[GH-23781](https://github.com/hashicorp/vault/pull/23781)] +* core/mounts: Fix reading an "auth" mount using "sys/internal/ui/mounts/" when filter paths are enforced returns 500 error code from the secondary [[GH-23802](https://github.com/hashicorp/vault/pull/23802)] +* core: Revert PR causing memory consumption bug [[GH-23986](https://github.com/hashicorp/vault/pull/23986)] +* core: Skip unnecessary deriving of policies during Login MFA Check. [[GH-23894](https://github.com/hashicorp/vault/pull/23894)] +* core: fix bug where deadlock detection was always on for expiration and quotas. +These can now be configured individually with `detect_deadlocks`. [[GH-23902](https://github.com/hashicorp/vault/pull/23902)] +* core: fix policies with wildcards not matching list operations due to the policy path not having a trailing slash [[GH-23874](https://github.com/hashicorp/vault/pull/23874)] +* expiration: Fix fatal error "concurrent map iteration and map write" when collecting metrics from leases. [[GH-24027](https://github.com/hashicorp/vault/pull/24027)] + +## 1.13.9 +### October 25, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.10. +* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host + +IMPROVEMENTS: + +* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)] +* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)] + +BUG FIXES: + +* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)] +* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)] +* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)] +* kmip (enterprise): Improve handling of failures due to storage replication issues. +* kmip (enterprise): Return a structure in the response for query function Query Server Information. +* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)] +* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster. +* replication (enterprise): Fix a missing unlock when changing replication state +* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key +* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported. +* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations +* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)] + +## 1.13.6 +### August 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.7. + +IMPROVEMENTS: + +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] + +## 1.13.8 +### September 27, 2023 + +SECURITY: + +* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)] + +CHANGES: + +* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy + +IMPROVEMENTS: + +* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)] + +BUG FIXES: + +* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)] +* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)] +* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)] +* ui: Fixes old pki's filter and search roles page bug [[GH-22810](https://github.com/hashicorp/vault/pull/22810)] +* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)] + +## 1.13.7 +### September 13, 2023 + +SECURITY: + +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)] + +CHANGES: + +* core: Bump Go version to 1.20.8. +* database/snowflake: Update plugin to v0.7.3 [[GH-22591](https://github.com/hashicorp/vault/pull/22591)] + +FEATURES: + +* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption. + +IMPROVEMENTS: + +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)] +* kmip (enterprise): reduce latency of KMIP operation handling + +BUG FIXES: + +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)] +* kmip (enterprise): fix date handling error with some re-key operations +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable + +## 1.13.6 +### August 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.7. + +IMPROVEMENTS: + +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)] +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] + +BUG FIXES: + +* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)] +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] +* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)] + +## 1.13.5 +### July 25, 2023 + +SECURITY: + +* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)] +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] + +CHANGES: + +* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace), +which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)] +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. + +IMPROVEMENTS: + +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)] +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling +* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)] + +BUG FIXES: + +* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21799](https://github.com/hashicorp/vault/pull/21799)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21632](https://github.com/hashicorp/vault/pull/21632)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] +* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)] + +## 1.13.4 +### June 21, 2023 +BREAKING CHANGES: + +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] + +CHANGES: + +* core: Bump Go version to 1.20.5. + +FEATURES: + +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] + +IMPROVEMENTS: + +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] + +BUG FIXES: + +* agent: Fix bug with 'cache' stanza validation [[GH-20934](https://github.com/hashicorp/vault/pull/20934)] +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* replication (enterprise): Fix regression causing token creation against a role +with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-21100](https://github.com/hashicorp/vault/pull/21100)] +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] + +## 1.13.3 +### June 08, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.4. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] +* replication (enterprise): Add a new parameter for the update-primary API call +that allows for setting of the primary cluster addresses directly, instead of +via a token. +* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)] + +IMPROVEMENTS: + +* Add debug symbols back to builds to fix Dynatrace support [[GH-20519](https://github.com/hashicorp/vault/pull/20519)] +* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] +* autopilot: Update version to v0.2.0 to add better support for respecting min quorum [[GH-19472](https://github.com/hashicorp/vault/pull/19472)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* core: Add possibility to decode a generated encoded root token via the rest API [[GH-20595](https://github.com/hashicorp/vault/pull/20595)] +* core: include namespace path in granting_policies block of audit log +* core: report intermediate error messages during request forwarding [[GH-20643](https://github.com/hashicorp/vault/pull/20643)] +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] + +BUG FIXES: + +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] +* command/server: fixes panic in Vault server command when running in recovery mode [[GH-20418](https://github.com/hashicorp/vault/pull/20418)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core/identity: Allow updates of only the custom-metadata for entity alias. [[GH-20368](https://github.com/hashicorp/vault/pull/20368)] +* core: Fix Forwarded Writer construction to correctly find active nodes, allowing PKI cross-cluster functionality to succeed on existing mounts. +* core: Fix writes to readonly storage on performance standbys when user lockout feature is enabled. [[GH-20783](https://github.com/hashicorp/vault/pull/20783)] +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/pki: Include per-issuer enable_aia_url_templating in issuer read endpoint. [[GH-20354](https://github.com/hashicorp/vault/pull/20354)] +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transform: Added importing of keys and key versions into the Transform secrets engine using the command 'vault transform import' and 'vault transform import-version'. [[GH-20668](https://github.com/hashicorp/vault/pull/20668)] +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] +* ui: fixes issue creating mfa login enforcement from method enforcements tab [[GH-20603](https://github.com/hashicorp/vault/pull/20603)] +* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)] + +## 1.13.2 +### April 26, 2023 + +CHANGES: + +* core: Bump Go version to 1.20.3. + +SECURITY: + +* core/seal: Fix handling of HMACing of seal-wrapped storage entries from HSMs using CKM_AES_CBC or CKM_AES_CBC_PAD which may have allowed an attacker to conduct a padding oracle attack. This vulnerability, CVE-2023-2197, affects Vault from 1.13.0 up to 1.13.1 and was fixed in 1.13.2. [[HCSEC-2023-14](https://discuss.hashicorp.com/t/hcsec-2023-14-vault-enterprise-vulnerable-to-padding-oracle-attacks-when-using-a-cbc-based-encryption-mechanism-with-a-hsm/53322)] + +IMPROVEMENTS: + +* Add debug symbols back to builds to fix Dynatrace support [[GH-20294](https://github.com/hashicorp/vault/pull/20294)] +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* core: include reason for ErrReadOnly on PBPWF writing failures +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] +* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] +* sys/wrapping: Add example how to unwrap without authentication in Vault [[GH-20109](https://github.com/hashicorp/vault/pull/20109)] +* ui: Allows license-banners to be dismissed. Saves preferences in localStorage. [[GH-19116](https://github.com/hashicorp/vault/pull/19116)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. +* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. +auth/cert: Fix OCSP validation against Vault's PKI engine. [[GH-20181](https://github.com/hashicorp/vault/pull/20181)] +* secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. [[GH-20034](https://github.com/hashicorp/vault/pull/20034)] +* secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. [[GH-20057](https://github.com/hashicorp/vault/pull/20057)] +* secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. [[GH-20058](https://github.com/hashicorp/vault/pull/20058)] +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: fixes remaining doc links to include /vault in path [[GH-20070](https://github.com/hashicorp/vault/pull/20070)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] +* website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. [[GH-20216](https://github.com/hashicorp/vault/pull/20216)] + +## 1.13.1 +### March 29, 2023 + +SECURITY: + +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] + +IMPROVEMENTS: + +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] +* database/elasticsearch: Update error messages resulting from Elasticsearch API errors [[GH-19545](https://github.com/hashicorp/vault/pull/19545)] +* events: Suppress log warnings triggered when events are sent but the events system is not enabled. [[GH-19593](https://github.com/hashicorp/vault/pull/19593)] + +BUG FIXES: + +* agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. [[GH-19483](https://github.com/hashicorp/vault/pull/19483)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. +* kmip (enterprise): Fix a problem forwarding some requests to the active node. +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19640](https://github.com/hashicorp/vault/pull/19640)] +* secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug [[GH-19624](https://github.com/hashicorp/vault/pull/19624)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library [[GH-19428](https://github.com/hashicorp/vault/pull/19428)] +* ui: fixes SSH engine config deletion [[GH-19448](https://github.com/hashicorp/vault/pull/19448)] +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted [[GH-19541](https://github.com/hashicorp/vault/pull/19541)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] + +## 1.13.0 +### March 01, 2023 + +SECURITY: + +* secrets/ssh: removal of the deprecated dynamic keys mode. **When any remaining dynamic key leases expire**, an error stating `secret is unsupported by this backend` will be thrown by the lease manager. [[GH-18874](https://github.com/hashicorp/vault/pull/18874)] +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] + +CHANGES: + +* auth/alicloud: require the `role` field on login [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] +* auth/approle: Add maximum length of 4096 for approle role_names, as this value results in HMAC calculation [[GH-17768](https://github.com/hashicorp/vault/pull/17768)] +* auth: Returns invalid credentials for ldap, userpass and approle when wrong credentials are provided for existent users. +This will only be used internally for implementing user lockout. [[GH-17104](https://github.com/hashicorp/vault/pull/17104)] +* core: Bump Go version to 1.20.1. +* core: Vault version has been moved out of sdk and into main vault module. +Plugins using sdk/useragent.String must instead use sdk/useragent.PluginString. [[GH-14229](https://github.com/hashicorp/vault/pull/14229)] +* logging: Removed legacy environment variable for log format ('LOGXI_FORMAT'), should use 'VAULT_LOG_FORMAT' instead [[GH-17822](https://github.com/hashicorp/vault/pull/17822)] +* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* plugins: `GET /database/config/:name` endpoint now returns an additional `plugin_version` field in the response data. [[GH-16982](https://github.com/hashicorp/vault/pull/16982)] +* plugins: `GET /sys/auth/:path/tune` and `GET /sys/mounts/:path/tune` endpoints may now return an additional `plugin_version` field in the response data if set. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] +* plugins: `GET` for `/sys/auth`, `/sys/auth/:path`, `/sys/mounts`, and `/sys/mounts/:path` paths now return additional `plugin_version`, `running_plugin_version` and `running_sha256` fields in the response data for each mount. [[GH-17167](https://github.com/hashicorp/vault/pull/17167)] +* sdk: Remove version package, make useragent.String versionless. [[GH-19068](https://github.com/hashicorp/vault/pull/19068)] +* secrets/aws: do not create leases for non-renewable/non-revocable STS credentials to reduce storage calls [[GH-15869](https://github.com/hashicorp/vault/pull/15869)] +* secrets/gcpkms: Updated plugin from v0.13.0 to v0.14.0 [[GH-19063](https://github.com/hashicorp/vault/pull/19063)] +* sys/internal/inspect: Turns of this endpoint by default. A SIGHUP can now be used to reload the configs and turns this endpoint on. +* ui: Upgrade Ember to version 4.4.0 [[GH-17086](https://github.com/hashicorp/vault/pull/17086)] + +FEATURES: + +* **User lockout**: Ignore repeated bad credentials from the same user for a configured period of time. Enabled by default. +* **Azure Auth Managed Identities**: Allow any Azure resource that supports managed identities to authenticate with Vault [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Azure Auth Rotate Root**: Add support for rotate root in Azure Auth engine [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. [[GH-19194](https://github.com/hashicorp/vault/pull/19194)] +* **GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] +* **Kubernetes Secrets Engine UI**: Kubernetes is now available in the UI as a supported secrets engine. [[GH-17893](https://github.com/hashicorp/vault/pull/17893)] +* **New PKI UI**: Add beta support for new and improved PKI UI [[GH-18842](https://github.com/hashicorp/vault/pull/18842)] +* **PKI Cross-Cluster Revocations**: Revocation information can now be +synchronized across primary and performance replica clusters offering +a unified CRL/OCSP view of revocations across cluster boundaries. [[GH-19196](https://github.com/hashicorp/vault/pull/19196)] +* **Server UDS Listener**: Adding listener to Vault server to serve http request via unix domain socket [[GH-18227](https://github.com/hashicorp/vault/pull/18227)] +* **Transit managed keys**: The transit secrets engine now supports configuring and using managed keys +* **User Lockout**: Adds support to configure the user-lockout behaviour for failed logins to prevent +brute force attacks for userpass, approle and ldap auth methods. [[GH-19230](https://github.com/hashicorp/vault/pull/19230)] +* **VMSS Flex Authentication**: Adds support for Virtual Machine Scale Set Flex Authentication [[GH-19077](https://github.com/hashicorp/vault/pull/19077)] +* **Namespaces (enterprise)**: Added the ability to allow access to secrets and more to be shared across namespaces that do not share a namespace hierarchy. Using the new `sys/config/group-policy-application` API, policies can be configured to apply outside of namespace hierarchy, allowing this kind of cross-namespace sharing. +* **OpenAPI-based Go & .NET Client Libraries (Beta)**: We have now made available two new [[OpenAPI-based Go](https://github.com/hashicorp/vault-client-go/)] & [[OpenAPI-based .NET](https://github.com/hashicorp/vault-client-dotnet/)] Client libraries (beta). You can use them to perform various secret management operations easily from your applications. + +IMPROVEMENTS: + +* **Redis ElastiCache DB Engine**: Renamed configuration parameters for disambiguation; old parameters still supported for compatibility. [[GH-18752](https://github.com/hashicorp/vault/pull/18752)] +* Bump github.com/hashicorp/go-plugin version from 1.4.5 to 1.4.8 [[GH-19100](https://github.com/hashicorp/vault/pull/19100)] +* Reduced binary size [[GH-17678](https://github.com/hashicorp/vault/pull/17678)] +* agent/config: Allow config directories to be specified with -config, and allow multiple -configs to be supplied. [[GH-18403](https://github.com/hashicorp/vault/pull/18403)] +* agent: Add note in logs when starting Vault Agent indicating if the version differs to the Vault Server. [[GH-18684](https://github.com/hashicorp/vault/pull/18684)] +* agent: Added `token_file` auto-auth configuration to allow using a pre-existing token for Vault Agent. [[GH-18740](https://github.com/hashicorp/vault/pull/18740)] +* agent: Agent listeners can now be to be the `metrics_only` role, serving only metrics, as part of the listener's new top level `role` option. [[GH-18101](https://github.com/hashicorp/vault/pull/18101)] +* agent: Configured Vault Agent listeners now listen without the need for caching to be configured. [[GH-18137](https://github.com/hashicorp/vault/pull/18137)] +* agent: allows some parts of config to be reloaded without requiring a restart. [[GH-18638](https://github.com/hashicorp/vault/pull/18638)] +* agent: fix incorrectly used loop variables in parallel tests and when finalizing seals [[GH-16872](https://github.com/hashicorp/vault/pull/16872)] +* api: Remove dependency on sdk module. [[GH-18962](https://github.com/hashicorp/vault/pull/18962)] +* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] +* audit: Add `elide_list_responses` option, providing a countermeasure for a common source of oversized audit log entries [[GH-18128](https://github.com/hashicorp/vault/pull/18128)] +* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] +* auth/alicloud: upgrades dependencies [[GH-18021](https://github.com/hashicorp/vault/pull/18021)] +* auth/azure: Adds support for authentication with Managed Service Identity (MSI) from a +Virtual Machine Scale Set (VMSS) in flexible orchestration mode. [[GH-17540](https://github.com/hashicorp/vault/pull/17540)] +* auth/azure: upgrades dependencies [[GH-17857](https://github.com/hashicorp/vault/pull/17857)] +* auth/cert: Add configurable support for validating client certs with OCSP. [[GH-17093](https://github.com/hashicorp/vault/pull/17093)] +* auth/cert: Support listing provisioned CRLs within the mount. [[GH-18043](https://github.com/hashicorp/vault/pull/18043)] +* auth/cf: Remove incorrect usage of CreateOperation from path_config [[GH-19098](https://github.com/hashicorp/vault/pull/19098)] +* auth/gcp: Upgrades dependencies [[GH-17858](https://github.com/hashicorp/vault/pull/17858)] +* auth/oidc: Adds `abort_on_error` parameter to CLI login command to help in non-interactive contexts [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] +* auth/oidc: Adds ability to set Google Workspace domain for groups search [[GH-19076](https://github.com/hashicorp/vault/pull/19076)] +* auth/token (enterprise): Allow batch token creation in perfStandby nodes +* auth: Allow naming login MFA methods and using those names instead of IDs in satisfying MFA requirement for requests. +Make passcode arguments consistent across login MFA method types. [[GH-18610](https://github.com/hashicorp/vault/pull/18610)] +* auth: Provide an IP address of the requests from Vault to a Duo challenge after successful authentication. [[GH-18811](https://github.com/hashicorp/vault/pull/18811)] +* autopilot: Update version to v.0.2.0 to add better support for respecting min quorum +* cli/kv: improve kv CLI to remove data or custom metadata using kv patch [[GH-18067](https://github.com/hashicorp/vault/pull/18067)] +* cli/pki: Add List-Intermediates functionality to pki client. [[GH-18463](https://github.com/hashicorp/vault/pull/18463)] +* cli/pki: Add health-check subcommand to evaluate the health of a PKI instance. [[GH-17750](https://github.com/hashicorp/vault/pull/17750)] +* cli/pki: Add pki issue command, which creates a CSR, has a vault mount sign it, then reimports it. [[GH-18467](https://github.com/hashicorp/vault/pull/18467)] +* cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. [[GH-18499](https://github.com/hashicorp/vault/pull/18499)] +* cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file [[GH-19269](https://github.com/hashicorp/vault/pull/19269)] +* cli: Add support for creating requests to existing non-KVv2 PATCH-capable endpoints. [[GH-17650](https://github.com/hashicorp/vault/pull/17650)] +* cli: Add transit import key helper commands for BYOK to Transit/Transform. [[GH-18887](https://github.com/hashicorp/vault/pull/18887)] +* cli: Support the -format=raw option, to read non-JSON Vault endpoints and original response bodies. [[GH-14945](https://github.com/hashicorp/vault/pull/14945)] +* cli: updated `vault operator rekey` prompts to describe recovery keys when `-target=recovery` [[GH-18892](https://github.com/hashicorp/vault/pull/18892)] +* client/pki: Add a new command verify-sign which checks the relationship between two certificates. [[GH-18437](https://github.com/hashicorp/vault/pull/18437)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* core/identity: Add machine-readable output to body of response upon alias clash during entity merge [[GH-17459](https://github.com/hashicorp/vault/pull/17459)] +* core/server: Added an environment variable to write goroutine stacktraces to a +temporary file for SIGUSR2 signals. [[GH-17929](https://github.com/hashicorp/vault/pull/17929)] +* core: Add RPCs to read and update userFailedLoginInfo map +* core: Add experiments system and `events.alpha1` experiment. [[GH-18682](https://github.com/hashicorp/vault/pull/18682)] +* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] +* core: Add user lockout field to config and configuring this for auth mount using auth tune to prevent brute forcing in auth methods [[GH-17338](https://github.com/hashicorp/vault/pull/17338)] +* core: Add vault.core.locked_users telemetry metric to emit information about total number of locked users. [[GH-18718](https://github.com/hashicorp/vault/pull/18718)] +* core: Added sys/locked-users endpoint to list locked users. Changed api endpoint from +sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] to sys/locked-users/[mount_accessor]/unlock/[alias_identifier]. [[GH-18675](https://github.com/hashicorp/vault/pull/18675)] +* core: Added sys/lockedusers/[mount_accessor]/unlock/[alias_identifier] endpoint to unlock an user +with given mount_accessor and alias_identifier if locked [[GH-18279](https://github.com/hashicorp/vault/pull/18279)] +* core: Added warning to /sys/seal-status and vault status command if potentially dangerous behaviour overrides are being used. [[GH-17855](https://github.com/hashicorp/vault/pull/17855)] +* core: Implemented background thread to update locked user entries every 15 minutes to prevent brute forcing in auth methods. [[GH-18673](https://github.com/hashicorp/vault/pull/18673)] +* core: License location is no longer cache exempt, meaning sys/health will not contribute as greatly to storage load when using consul as a storage backend. [[GH-17265](https://github.com/hashicorp/vault/pull/17265)] +* core: Update protoc from 3.21.5 to 3.21.7 [[GH-17499](https://github.com/hashicorp/vault/pull/17499)] +* core: add `detect_deadlocks` config to optionally detect core state deadlocks [[GH-18604](https://github.com/hashicorp/vault/pull/18604)] +* core: added changes for user lockout workflow. [[GH-17951](https://github.com/hashicorp/vault/pull/17951)] +* core: parallelize backend initialization to improve startup time for large numbers of mounts. [[GH-18244](https://github.com/hashicorp/vault/pull/18244)] +* database/postgres: Support multiline strings for revocation statements. [[GH-18632](https://github.com/hashicorp/vault/pull/18632)] +* database/redis-elasticache: changed config argument names for disambiguation [[GH-19044](https://github.com/hashicorp/vault/pull/19044)] +* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] +* hcp/connectivity: Add foundational OSS support for opt-in secure communication between self-managed Vault nodes and [HashiCorp Cloud Platform](https://cloud.hashicorp.com) [[GH-18228](https://github.com/hashicorp/vault/pull/18228)] +* hcp/connectivity: Include HCP organization, project, and resource ID in server startup logs [[GH-18315](https://github.com/hashicorp/vault/pull/18315)] +* hcp/connectivity: Only update SCADA session metadata if status changes [[GH-18585](https://github.com/hashicorp/vault/pull/18585)] +* hcp/status: Add cluster-level status information [[GH-18351](https://github.com/hashicorp/vault/pull/18351)] +* hcp/status: Expand node-level status information [[GH-18302](https://github.com/hashicorp/vault/pull/18302)] +* logging: Vault Agent supports logging to a specified file path via environment variable, CLI or config [[GH-17841](https://github.com/hashicorp/vault/pull/17841)] +* logging: Vault agent and server commands support log file and log rotation. [[GH-18031](https://github.com/hashicorp/vault/pull/18031)] +* migration: allow parallelization of key migration for `vault operator migrate` in order to speed up a migration. [[GH-18817](https://github.com/hashicorp/vault/pull/18817)] +* namespaces (enterprise): Add new API, `sys/config/group-policy-application`, to allow group policies to be configurable +to apply to a group in `any` namespace. The default, `within_namespace_hierarchy`, is the current behaviour. +* openapi: Add default values to thing_mount_path parameters [[GH-18935](https://github.com/hashicorp/vault/pull/18935)] +* openapi: Add logic to generate openapi response structures [[GH-18192](https://github.com/hashicorp/vault/pull/18192)] +* openapi: Add openapi response definitions to approle/path_login.go & approle/path_tidy_user_id.go [[GH-18772](https://github.com/hashicorp/vault/pull/18772)] +* openapi: Add openapi response definitions to approle/path_role.go [[GH-18198](https://github.com/hashicorp/vault/pull/18198)] +* openapi: Change gen_openapi.sh to generate schema with generic mount paths [[GH-18934](https://github.com/hashicorp/vault/pull/18934)] +* openapi: Mark request body objects as required [[GH-17909](https://github.com/hashicorp/vault/pull/17909)] +* openapi: add openapi response defintions to /sys/audit endpoints [[GH-18456](https://github.com/hashicorp/vault/pull/18456)] +* openapi: generic_mount_paths: Move implementation fully into server, rather than partially in plugin framework; recognize all 4 singleton mounts (auth/token, cubbyhole, identity, system) rather than just 2; change parameter from `{mountPath}` to `{_mount_path}` [[GH-18663](https://github.com/hashicorp/vault/pull/18663)] +* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] +* plugins: Allow selecting builtin plugins by their reported semantic version of the form `vX.Y.Z+builtin` or `vX.Y.Z+builtin.vault`. [[GH-17289](https://github.com/hashicorp/vault/pull/17289)] +* plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Mark app-id auth method Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] +* plugins: Mark logical database plugins Removed and remove the plugin code. [[GH-18039](https://github.com/hashicorp/vault/pull/18039)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] +* sdk: Add response schema validation method framework/FieldData.ValidateStrict and two test helpers (ValidateResponse, ValidateResponseData) [[GH-18635](https://github.com/hashicorp/vault/pull/18635)] +* sdk: Adding FindResponseSchema test helper to assist with response schema validation in tests [[GH-18636](https://github.com/hashicorp/vault/pull/18636)] +* secrets/aws: Update dependencies [[PR-17747](https://github.com/hashicorp/vault/pull/17747)] [[GH-17747](https://github.com/hashicorp/vault/pull/17747)] +* secrets/azure: Adds ability to persist an application for the lifetime of a role. [[GH-19096](https://github.com/hashicorp/vault/pull/19096)] +* secrets/azure: upgrades dependencies [[GH-17964](https://github.com/hashicorp/vault/pull/17964)] +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* secrets/gcp: Upgrades dependencies [[GH-17871](https://github.com/hashicorp/vault/pull/17871)] +* secrets/kubernetes: Add /check endpoint to determine if environment variables are set [[GH-18](https://github.com/hashicorp/vault-plugin-secrets-kubernetes/pull/18)] [[GH-18587](https://github.com/hashicorp/vault/pull/18587)] +* secrets/kubernetes: add /check endpoint to determine if environment variables are set [[GH-19084](https://github.com/hashicorp/vault/pull/19084)] +* secrets/kv: Emit events on write if events system enabled [[GH-19145](https://github.com/hashicorp/vault/pull/19145)] +* secrets/kv: make upgrade synchronous when no keys to upgrade [[GH-19056](https://github.com/hashicorp/vault/pull/19056)] +* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] +* secrets/pki: Add a new API that returns the serial numbers of revoked certificates on the local cluster [[GH-17779](https://github.com/hashicorp/vault/pull/17779)] +* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] +* secrets/pki: Added a new API that allows external actors to craft a CRL through JSON parameters [[GH-18040](https://github.com/hashicorp/vault/pull/18040)] +* secrets/pki: Allow UserID Field (https://www.rfc-editor.org/rfc/rfc1274#section-9.3.1) to be set on Certificates when +allowed by role [[GH-18397](https://github.com/hashicorp/vault/pull/18397)] +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* secrets/pki: Allow templating performance replication cluster- and issuer-specific AIA URLs. [[GH-18199](https://github.com/hashicorp/vault/pull/18199)] +* secrets/pki: Allow tidying of expired issuer certificates. [[GH-17823](https://github.com/hashicorp/vault/pull/17823)] +* secrets/pki: Allow tidying of the legacy ca_bundle, improving startup on post-migrated, seal-wrapped PKI mounts. [[GH-18645](https://github.com/hashicorp/vault/pull/18645)] +* secrets/pki: Respond with written data to `config/auto-tidy`, `config/crl`, and `roles/:role`. [[GH-18222](https://github.com/hashicorp/vault/pull/18222)] +* secrets/pki: Return issuer_id and issuer_name on /issuer/:issuer_ref/json endpoint. [[GH-18482](https://github.com/hashicorp/vault/pull/18482)] +* secrets/pki: Return new fields revocation_time_rfc3339 and issuer_id to existing certificate serial lookup api if it is revoked [[GH-17774](https://github.com/hashicorp/vault/pull/17774)] +* secrets/ssh: Allow removing SSH host keys from the dynamic keys feature. [[GH-18939](https://github.com/hashicorp/vault/pull/18939)] +* secrets/ssh: Evaluate ssh validprincipals user template before splitting [[GH-16622](https://github.com/hashicorp/vault/pull/16622)] +* secrets/transit: Add an optional reference field to batch operation items +which is repeated on batch responses to help more easily correlate inputs with outputs. [[GH-18243](https://github.com/hashicorp/vault/pull/18243)] +* secrets/transit: Add associated_data parameter for additional authenticated data in AEAD ciphers [[GH-17638](https://github.com/hashicorp/vault/pull/17638)] +* secrets/transit: Add support for PKCSv1_5_NoOID RSA signatures [[GH-17636](https://github.com/hashicorp/vault/pull/17636)] +* secrets/transit: Allow configuring whether upsert of keys is allowed. [[GH-18272](https://github.com/hashicorp/vault/pull/18272)] +* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. [[GH-17789](https://github.com/hashicorp/vault/pull/17789)] +* sys/internal/inspect: Creates an endpoint to look to inspect internal subsystems. +* ui: Add algorithm-signer as a SSH Secrets Engine UI field [[GH-10299](https://github.com/hashicorp/vault/pull/10299)] +* ui: Add inline policy creation when creating an identity entity or group [[GH-17749](https://github.com/hashicorp/vault/pull/17749)] +* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] +* ui: Enable typescript for future development [[GH-17927](https://github.com/hashicorp/vault/pull/17927)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] +* ui: adds allowed_response_headers as param for secret engine mount config [[GH-19216](https://github.com/hashicorp/vault/pull/19216)] +* ui: consolidate all tag usage [[GH-17866](https://github.com/hashicorp/vault/pull/17866)] +* ui: mfa: use proper request id generation [[GH-17835](https://github.com/hashicorp/vault/pull/17835)] +* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] +* ui: update DocLink component to use new host url: developer.hashicorp.com [[GH-18374](https://github.com/hashicorp/vault/pull/18374)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] +* ui: use the combined activity log (partial + historic) API for client count dashboard and remove use of monthly endpoint [[GH-17575](https://github.com/hashicorp/vault/pull/17575)] +* vault/diagnose: Upgrade `go.opentelemetry.io/otel`, `go.opentelemetry.io/otel/sdk`, `go.opentelemetry.io/otel/trace` to v1.11.2 [[GH-18589](https://github.com/hashicorp/vault/pull/18589)] + +DEPRECATIONS: + +* secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. [[GH-19334](https://github.com/hashicorp/vault/pull/19334)] + +BUG FIXES: + +* api: Remove timeout logic from ReadRaw functions and add ReadRawWithContext [[GH-18708](https://github.com/hashicorp/vault/pull/18708)] +* auth/alicloud: fix regression in vault login command that caused login to fail [[GH-19005](https://github.com/hashicorp/vault/pull/19005)] +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] +* auth/kubernetes: fixes and dep updates for the auth-kubernetes plugin (see plugin changelog for details) [[GH-19094](https://github.com/hashicorp/vault/pull/19094)] +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* cli/pki: Decode integer values properly in health-check configuration file [[GH-19265](https://github.com/hashicorp/vault/pull/19265)] +* cli/pki: Fix path for role health-check warning messages [[GH-19274](https://github.com/hashicorp/vault/pull/19274)] +* cli/pki: Properly report permission issues within health-check mount tune checks [[GH-19276](https://github.com/hashicorp/vault/pull/19276)] +* cli/transit: Fix import, import-version command invocation [[GH-19373](https://github.com/hashicorp/vault/pull/19373)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] +* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] +* core (enterprise): Fix missing quotation mark in error message +* core (enterprise): Fix panic that could occur with SSCT alongside invoking external plugins for revocation. +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core/auth: Return a 403 instead of a 500 for wrapping requests when token is not provided [[GH-18859](https://github.com/hashicorp/vault/pull/18859)] +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] +* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: Fix spurious `permission denied` for all HelpOperations on sudo-protected paths [[GH-18568](https://github.com/hashicorp/vault/pull/18568)] +* core: Fix vault operator init command to show the right curl string with -output-curl-string and right policy hcl with -output-policy [[GH-17514](https://github.com/hashicorp/vault/pull/17514)] +* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] +* core: Linux packages now have vendor label and set the default label to HashiCorp. +This fix is implemented for any future releases, but will not be updated for historical releases. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* core: Refactor lock grabbing code to simplify stateLock deadlock investigations [[GH-17187](https://github.com/hashicorp/vault/pull/17187)] +* core: fix GPG encryption to support subkeys. [[GH-16224](https://github.com/hashicorp/vault/pull/16224)] +* core: fix a start up race condition where performance standbys could go into a +mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: fix race when using SystemView.ReplicationState outside of a request context [[GH-17186](https://github.com/hashicorp/vault/pull/17186)] +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* core: trying to unseal with the wrong key now returns HTTP 400 [[GH-17836](https://github.com/hashicorp/vault/pull/17836)] +* credential/cert: adds error message if no tls connection is found during the AliasLookahead operation [[GH-17904](https://github.com/hashicorp/vault/pull/17904)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] +* kmip (enterprise): Fix a problem with some multi-part MAC Verify operations. +* kmip (enterprise): Only require data to be full blocks on encrypt/decrypt operations using CBC and ECB block cipher modes. +* license (enterprise): Fix bug where license would update even if the license didn't change. +* licensing (enterprise): update autoloaded license cache after reload +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* openapi: fix gen_openapi.sh script to correctly load vault plugins [[GH-17752](https://github.com/hashicorp/vault/pull/17752)] +* plugins/kv: KV v2 returns 404 instead of 500 for request paths that incorrectly include a trailing slash. [[GH-17339](https://github.com/hashicorp/vault/pull/17339)] +* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] +* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] +* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] +* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] +* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/ad: Fix bug where updates to config would fail if password isn't provided [[GH-19061](https://github.com/hashicorp/vault/pull/19061)] +* secrets/gcp: fix issue where IAM bindings were not preserved during policy update [[GH-19018](https://github.com/hashicorp/vault/pull/19018)] +* secrets/mongodb-atlas: Fix a bug that did not allow WAL rollback to handle partial failures when creating API keys [[GH-19111](https://github.com/hashicorp/vault/pull/19111)] +* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] +* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] +* secrets/pki: Fixes duplicate otherName in certificates created by the sign-verbatim endpoint. [[GH-16700](https://github.com/hashicorp/vault/pull/16700)] +* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] +* secrets/pki: consistently use UTC for CA's notAfter exceeded error message [[GH-18984](https://github.com/hashicorp/vault/pull/18984)] +* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] +* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* storage/raft: Fix race with follower heartbeat tracker during teardown. [[GH-18704](https://github.com/hashicorp/vault/pull/18704)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] +* ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. [[GH-19290](https://github.com/hashicorp/vault/pull/19290)] +* ui: Remove default value of 30 to TtlPicker2 if no value is passed in. [[GH-17376](https://github.com/hashicorp/vault/pull/17376)] +* ui: allow selection of "default" for ssh algorithm_signer in web interface [[GH-17894](https://github.com/hashicorp/vault/pull/17894)] +* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19403](https://github.com/hashicorp/vault/pull/19403)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] + +## 1.12.11 +### September 13, 2023 + +SECURITY: + +* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. [[GH-22852](https://github.com/hashicorp/vault/pull/22852)] + +IMPROVEMENTS: + +* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)] +* kmip (enterprise): reduce latency of KMIP operation handling + +BUG FIXES: + +* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)] +* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)] +* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)] +* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)] +* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable + +## 1.12.10 +### August 30, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.12. + +IMPROVEMENTS: + +* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)] +* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase. +* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)] +* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)] + +BUG FIXES: + +* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)] +* core (enterprise): Remove MFA Configuration for namespace when deleting namespace +* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context. +Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)] +* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)] +* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)] +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)] +* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)] +* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)] +* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath +* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards. +* sdk/ldaputil: Properly escape user filters when using UPN domains +sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)] +* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22332](https://github.com/hashicorp/vault/pull/22332)] +* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute +* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)] +* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)] + +## 1.12.9 +### July 25, 2023 + +SECURITY: + +* core/namespace (enterprise): An unhandled error in Vault Enterprise’s namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)] + +CHANGES: + +* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied. + +IMPROVEMENTS: + +* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise. +* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary +* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling + +BUG FIXES: + +* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-24170](https://github.com/hashicorp/vault/pull/24170)] +* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)] +* replication (enterprise): update primary cluster address after DR failover +* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21633](https://github.com/hashicorp/vault/pull/21633)] +* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)] +* secrets/pki: Support setting both maintain_stored_certificate_counts=false and publish_stored_certificate_count_metrics=false explicitly in tidy config. [[GH-20664](https://github.com/hashicorp/vault/pull/20664)] +* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present +* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required +* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)] +* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)] + +## 1.12.8 +### June 21, 2023 +BREAKING CHANGES: + +* secrets/pki: Maintaining running count of certificates will be turned off by default. +To re-enable keeping these metrics available on the tidy status endpoint, enable +maintain_stored_certificate_counts on tidy-config, to also publish them to the +metrics consumer, enable publish_stored_certificate_count_metrics . [[GH-18186](https://github.com/hashicorp/vault/pull/18186)] + +CHANGES: + +* core: Bump Go version to 1.19.10. + +FEATURES: + +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] + +IMPROVEMENTS: + +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] + +BUG FIXES: + +* core (enterprise): Don't delete backend stored data that appears to be filterable +on this secondary if we don't have a corresponding mount entry. +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. Fix wrong lock used in ListAuths link meta interface implementation. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs +* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)] + +## 1.12.7 +### June 08, 2023 + +SECURITY: + +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] + +CHANGES: + +* core: Bump Go version to 1.19.9. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] + +IMPROVEMENTS: + +* audit: add a `mount_point` field to audit requests and response entries [[GH-20411](https://github.com/hashicorp/vault/pull/20411)] +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* core: include namespace path in granting_policies block of audit log +* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)] +* sdk/framework: Fix non-deterministic ordering of 'required' fields in OpenAPI spec [[GH-20881](https://github.com/hashicorp/vault/pull/20881)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] + +BUG FIXES: + +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* cli: disable printing flags warnings messages for the ssh command [[GH-20502](https://github.com/hashicorp/vault/pull/20502)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation +* secrets/transit: Fix export of HMAC-only key, correctly exporting the key used for sign operations. For consumers of the previously incorrect key, use the plaintext export to retrieve these incorrect keys and import them as new versions. +secrets/transit: Fix bug related to shorter dedicated HMAC key sizing. +sdk/helper/keysutil: New HMAC type policies will have HMACKey equal to Key and be copied over on import. [[GH-20864](https://github.com/hashicorp/vault/pull/20864)] +* ui: Fixes issue unsealing cluster for seal types other than shamir [[GH-20897](https://github.com/hashicorp/vault/pull/20897)] + +## 1.12.6 +### April 26, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.8. + +IMPROVEMENTS: + +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] +* secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. [[GH-20201](https://github.com/hashicorp/vault/pull/20201)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows [[GH-20257](https://github.com/hashicorp/vault/pull/20257)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* kmip (enterprise): Fix a problem decrypting with keys that have no Process Start Date attribute. +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it [[GH-20220](https://github.com/hashicorp/vault/pull/20220)] +* replication (enterprise): Fix a caching issue when replicating filtered data to +a performance secondary. This resulted in the data being set to nil in the cache +and a "invalid value" error being returned from the API. +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] + +## 1.12.5 +### March 29, 2023 + +SECURITY: + +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] + +IMPROVEMENTS: + +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] + +BUG FIXES: + +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* kmip (enterprise): Do not require attribute Cryptographic Usage Mask when registering Secret Data managed objects. +* kmip (enterprise): Fix a problem forwarding some requests to the active node. +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. [[GH-19641](https://github.com/hashicorp/vault/pull/19641)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] + +## 1.12.4 +### March 01, 2023 + +SECURITY: +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] + +CHANGES: + +* core: Bump Go version to 1.19.6. + +IMPROVEMENTS: + +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] +* ui: remove wizard [[GH-19220](https://github.com/hashicorp/vault/pull/19220)] + +BUG FIXES: + +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18207](https://github.com/hashicorp/vault/pull/18207)] +* secrets/pki: Revert fix for PR [18938](https://github.com/hashicorp/vault/pull/18938) [[GH-19037](https://github.com/hashicorp/vault/pull/19037)] +* server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled [[GH-19311](https://github.com/hashicorp/vault/pull/19311)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19410](https://github.com/hashicorp/vault/pull/19410)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] + +## 1.12.3 +### February 6, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.4. + +IMPROVEMENTS: + +* audit: Include stack trace when audit logging recovers from a panic. [[GH-18121](https://github.com/hashicorp/vault/pull/18121)] +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* core: Add read support to `sys/loggers` and `sys/loggers/:name` endpoints [[GH-17979](https://github.com/hashicorp/vault/pull/17979)] +* plugins: Let Vault unseal and mount deprecated builtin plugins in a +deactivated state if this is not the first unseal after an upgrade. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* secrets/kv: new KVv2 mounts and KVv1 mounts without any keys will upgrade synchronously, allowing for instant use [[GH-17406](https://github.com/hashicorp/vault/pull/17406)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* ui: Added JWT authentication warning message about blocked pop-up windows and web browser settings. [[GH-18787](https://github.com/hashicorp/vault/pull/18787)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] + +BUG FIXES: + +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/cert: Address a race condition accessing the loaded crls without a lock [[GH-18945](https://github.com/hashicorp/vault/pull/18945)] +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#173](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/173)] [[GH-18716](https://github.com/hashicorp/vault/pull/18716)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* command/namespace: Fix vault cli namespace patch examples in help text. [[GH-18143](https://github.com/hashicorp/vault/pull/18143)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core/seal: Fix regression handling of the key_id parameter in seal configuration HCL. [[GH-17612](https://github.com/hashicorp/vault/pull/17612)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* expiration: Prevent panics on perf standbys when an irrevocable lease gets deleted. [[GH-18401](https://github.com/hashicorp/vault/pull/18401)] +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. +* licensing (enterprise): update autoloaded license cache after reload +* plugins: Allow running external plugins which override deprecated builtins. [[GH-17879](https://github.com/hashicorp/vault/pull/17879)] +* plugins: Listing all plugins while audit logging is enabled will no longer result in an internal server error. [[GH-18173](https://github.com/hashicorp/vault/pull/18173)] +* plugins: Skip loading but still mount data associated with missing plugins on unseal. [[GH-18189](https://github.com/hashicorp/vault/pull/18189)] +* sdk: Don't panic if system view or storage methods called during plugin setup. [[GH-18210](https://github.com/hashicorp/vault/pull/18210)] +* secrets/pki: Address nil panic when an empty POST request is sent to the OCSP handler [[GH-18184](https://github.com/hashicorp/vault/pull/18184)] +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/pki: OCSP GET request parameter was not being URL unescaped before processing. [[GH-18938](https://github.com/hashicorp/vault/pull/18938)] +* secrets/pki: fix race between tidy's cert counting and tidy status reporting. [[GH-18899](https://github.com/hashicorp/vault/pull/18899)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* secrets/transit: Honor `partial_success_response_code` on decryption failures. [[GH-18310](https://github.com/hashicorp/vault/pull/18310)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* ui: cleanup unsaved auth method ember data record when navigating away from mount backend form [[GH-18651](https://github.com/hashicorp/vault/pull/18651)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] +## 1.12.2 +### November 30, 2022 + +CHANGES: + +* core: Bump Go version to 1.19.3. +* plugins: Mounts can no longer be pinned to a specific _builtin_ version. Mounts previously pinned to a specific builtin version will now automatically upgrade to the latest builtin version, and may now be overridden if an unversioned plugin of the same name and type is registered. Mounts using plugin versions without `builtin` in their metadata remain unaffected. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] + +IMPROVEMENTS: + +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] +* storage/raft: Add `retry_join_as_non_voter` config option. [[GH-18030](https://github.com/hashicorp/vault/pull/18030)] + +BUG FIXES: + +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* core (enterprise): Supported storage check in `vault server` command will no longer prevent startup. Instead, a warning will be logged if configured to use storage backend other than `raft` or `consul`. +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* plugins: Only report deprecation status for builtin plugins. [[GH-17816](https://github.com/hashicorp/vault/pull/17816)] +* plugins: Vault upgrades will no longer fail if a mount has been created using an explicit builtin plugin version. [[GH-18051](https://github.com/hashicorp/vault/pull/18051)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18086](https://github.com/hashicorp/vault/pull/18086)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18111](https://github.com/hashicorp/vault/pull/18111)] +* secrets/pki: Fix upgrade of missing expiry, delta_rebuild_interval by setting them to the default. [[GH-17693](https://github.com/hashicorp/vault/pull/17693)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] + +## 1.12.1 +### November 2, 2022 + +IMPROVEMENTS: + +* api: Support VAULT_DISABLE_REDIRECTS environment variable (and --disable-redirects flag) to disable default client behavior and prevent the client following any redirection responses. [[GH-17352](https://github.com/hashicorp/vault/pull/17352)] +* database/snowflake: Allow parallel requests to Snowflake [[GH-17593](https://github.com/hashicorp/vault/pull/17593)] +* plugins: Add plugin version information to key plugin lifecycle log lines. [[GH-17430](https://github.com/hashicorp/vault/pull/17430)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] + +BUG FIXES: + +* cli: Remove empty table heading for `vault secrets list -detailed` output. [[GH-17577](https://github.com/hashicorp/vault/pull/17577)] +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): Switch to using hash length as PSS Salt length within the test/sign api for better PKCS#11 compatibility +* core: Fix panic caused in Vault Agent when rendering certificate templates [[GH-17419](https://github.com/hashicorp/vault/pull/17419)] +* core: Fixes spurious warnings being emitted relating to "unknown or unsupported fields" for JSON config [[GH-17660](https://github.com/hashicorp/vault/pull/17660)] +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. +* kmip (enterprise): Fix selection of Cryptographic Parameters for Encrypt/Decrypt operations. +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] + +## 1.12.0 +### October 13, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +CHANGES: + +* api: Exclusively use `GET /sys/plugins/catalog` endpoint for listing plugins, and add `details` field to list responses. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] +* auth: `GET /sys/auth/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* auth: `GET /sys/auth` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* auth: `POST /sys/auth/:type` endpoint response contains a warning for `Deprecated` auth methods. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] +* auth: `auth enable` returns an error and `POST /sys/auth/:type` endpoint reports an error for `Pending Removal` auth methods. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* core/entities: Fixed stranding of aliases upon entity merge, and require explicit selection of which aliases should be kept when some must be deleted [[GH-16539](https://github.com/hashicorp/vault/pull/16539)] +* core: Bump Go version to 1.19.2. +* core: Validate input parameters for vault operator init command. Vault 1.12 CLI version is needed to run operator init now. [[GH-16379](https://github.com/hashicorp/vault/pull/16379)] +* identity: a request to `/identity/group` that includes `member_group_ids` that contains a cycle will now be responded to with a 400 rather than 500 [[GH-15912](https://github.com/hashicorp/vault/pull/15912)] +* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades will not be allowed if the license expiration time is before the build date of the binary. +* plugins: Add plugin version to auth register, list, and mount table [[GH-16856](https://github.com/hashicorp/vault/pull/16856)] +* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint contains deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `GET /sys/plugins/catalog/:type/:name` endpoint now returns an additional `version` field in the response data. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* plugins: `GET /sys/plugins/catalog/` endpoint contains deprecation status in `detailed` list. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `GET /sys/plugins/catalog` endpoint now returns an additional `detailed` field in the response data with a list of additional plugin metadata. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* plugins: `plugin info` displays deprecation status for builtin plugins. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* plugins: `plugin list` now accepts a `-detailed` flag, which display deprecation status and version info. [[GH-17077](https://github.com/hashicorp/vault/pull/17077)] +* secrets/azure: Removed deprecated AAD graph API support from the secrets engine. [[GH-17180](https://github.com/hashicorp/vault/pull/17180)] +* secrets: All database-specific (standalone DB) secrets engines are now marked `Pending Removal`. [[GH-17038](https://github.com/hashicorp/vault/pull/17038)] +* secrets: `GET /sys/mounts/:name` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* secrets: `GET /sys/mounts` endpoint now returns an additional `deprecation_status` field in the response data for builtins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* secrets: `POST /sys/mounts/:type` endpoint response contains a warning for `Deprecated` secrets engines. [[GH-17058](https://github.com/hashicorp/vault/pull/17058)] +* secrets: `secrets enable` returns an error and `POST /sys/mount/:type` endpoint reports an error for `Pending Removal` secrets engines. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] + +FEATURES: + +* **GCP Cloud KMS support for managed keys**: Managed keys now support using GCP Cloud KMS keys +* **LDAP Secrets Engine**: Adds the `ldap` secrets engine with service account check-out functionality for all supported schemas. [[GH-17152](https://github.com/hashicorp/vault/pull/17152)] +* **OCSP Responder**: PKI mounts now have an OCSP responder that implements a subset of RFC6960, answering single serial number OCSP requests for a specific cluster's revoked certificates in a mount. [[GH-16723](https://github.com/hashicorp/vault/pull/16723)] +* **Redis DB Engine**: Adding the new Redis database engine that supports the generation of static and dynamic user roles and root credential rotation on a stand alone Redis server. [[GH-17070](https://github.com/hashicorp/vault/pull/17070)] +* **Redis ElastiCache DB Plugin**: Added Redis ElastiCache as a built-in plugin. [[GH-17075](https://github.com/hashicorp/vault/pull/17075)] +* **Secrets/auth plugin multiplexing**: manage multiple plugin configurations with a single plugin process [[GH-14946](https://github.com/hashicorp/vault/pull/14946)] +* **Transform Key Import (BYOK)**: The transform secrets engine now supports importing keys for tokenization and FPE transformations +* HCP (enterprise): Adding foundational support for self-managed vault nodes to securely communicate with [HashiCorp Cloud Platform](https://cloud.hashicorp.com) as an opt-in feature +* ui: UI support for Okta Number Challenge. [[GH-15998](https://github.com/hashicorp/vault/pull/15998)] +* **Plugin Versioning**: Vault supports registering, managing, and running plugins with semantic versions specified. + +IMPROVEMENTS: + +* core/managed-keys (enterprise): Allow operators to specify PSS signatures and/or hash algorithm for the test/sign api +* activity (enterprise): Added new clients unit tests to test accuracy of estimates +* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] +* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] +* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] +* agent: JWT auto auth now supports a `remove_jwt_after_reading` config option which defaults to true. [[GH-11969](https://github.com/hashicorp/vault/pull/11969)] +* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] +* api/mfa: Add namespace path to the MFA read/list endpoint [[GH-16911](https://github.com/hashicorp/vault/pull/16911)] +* api: Add a sentinel error for missing KV secrets [[GH-16699](https://github.com/hashicorp/vault/pull/16699)] +* auth/alicloud: Enables AliCloud roles to be compatible with Vault's role based quotas. [[GH-17251](https://github.com/hashicorp/vault/pull/17251)] +* auth/approle: SecretIDs can now be generated with an per-request specified TTL and num_uses. +When either the ttl and num_uses fields are not specified, the role's configuration is used. [[GH-14474](https://github.com/hashicorp/vault/pull/14474)] +* auth/aws: PKCS7 signatures will now use SHA256 by default in prep for Go 1.18 [[GH-16455](https://github.com/hashicorp/vault/pull/16455)] +* auth/azure: Enables Azure roles to be compatible with Vault's role based quotas. [[GH-17194](https://github.com/hashicorp/vault/pull/17194)] +* auth/cert: Add metadata to identity-alias [[GH-14751](https://github.com/hashicorp/vault/pull/14751)] +* auth/cert: Operators can now specify a CRL distribution point URL, in which case the cert auth engine will fetch and use the CRL from that location rather than needing to push CRLs directly to auth/cert. [[GH-17136](https://github.com/hashicorp/vault/pull/17136)] +* auth/cf: Enables CF roles to be compatible with Vault's role based quotas. [[GH-17196](https://github.com/hashicorp/vault/pull/17196)] +* auth/gcp: Add support for GCE regional instance groups [[GH-16435](https://github.com/hashicorp/vault/pull/16435)] +* auth/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17160](https://github.com/hashicorp/vault/pull/17160)] +* auth/jwt: Adds support for Microsoft US Gov L4 to the Azure provider for groups fetching. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] +* auth/jwt: Improves detection of Windows Subsystem for Linux (WSL) for CLI-based logins. [[GH-16525](https://github.com/hashicorp/vault/pull/16525)] +* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the Kerberos config in Vault. This removes any instance names found in the keytab service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] +* auth/kubernetes: Role resolution for K8S Auth [[GH-156](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/156)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] +* auth/oci: Add support for role resolution. [[GH-17212](https://github.com/hashicorp/vault/pull/17212)] +* auth/oidc: Adds support for group membership parsing when using SecureAuth as an OIDC provider. [[GH-16274](https://github.com/hashicorp/vault/pull/16274)] +* cli: CLI commands will print a warning if flags will be ignored because they are passed after positional arguments. [[GH-16441](https://github.com/hashicorp/vault/pull/16441)] +* cli: `auth` and `secrets` list `-detailed` commands now show Deprecation Status for builtin plugins. [[GH-16849](https://github.com/hashicorp/vault/pull/16849)] +* cli: `vault plugin list` now has a `details` field in JSON format, and version and type information in table format. [[GH-17347](https://github.com/hashicorp/vault/pull/17347)] +* command/audit: Improve missing type error message [[GH-16409](https://github.com/hashicorp/vault/pull/16409)] +* command/server: add `-dev-tls` and `-dev-tls-cert-dir` subcommands to create a Vault dev server with generated certificates and private key. [[GH-16421](https://github.com/hashicorp/vault/pull/16421)] +* command: Fix shell completion for KV v2 mounts [[GH-16553](https://github.com/hashicorp/vault/pull/16553)] +* core (enterprise): Add HTTP PATCH support for namespaces with an associated `namespace patch` CLI command +* core (enterprise): Add check to `vault server` command to ensure configured storage backend is supported. +* core (enterprise): Add custom metadata support for namespaces +* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] +* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] +* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] +* core/quotas (enterprise): Added ability to add path suffixes for lease-count resource quotas +* core/quotas (enterprise): Added ability to add role information for lease-count resource quotas, to limit login requests on auth mounts made using that role +* core/quotas: Added ability to add path suffixes for rate-limit resource quotas [[GH-15989](https://github.com/hashicorp/vault/pull/15989)] +* core/quotas: Added ability to add role information for rate-limit resource quotas, to limit login requests on auth mounts made using that role [[GH-16115](https://github.com/hashicorp/vault/pull/16115)] +* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* core: Handle and log deprecated builtin mounts. Introduces `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` to override shutdown and error when attempting to mount `Pending Removal` builtin plugins. [[GH-17005](https://github.com/hashicorp/vault/pull/17005)] +* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] +* core: Upgrade github.com/hashicorp/raft [[GH-16609](https://github.com/hashicorp/vault/pull/16609)] +* core: remove gox [[GH-16353](https://github.com/hashicorp/vault/pull/16353)] +* docs: Clarify the behaviour of local mounts in the context of DR replication [[GH-16218](https://github.com/hashicorp/vault/pull/16218)] +* identity/oidc: Adds support for detailed listing of clients and providers. [[GH-16567](https://github.com/hashicorp/vault/pull/16567)] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* identity/oidc: allows filtering the list providers response by an allowed_client_id [[GH-16181](https://github.com/hashicorp/vault/pull/16181)] +* identity: Prevent possibility of data races on entity creation. [[GH-16487](https://github.com/hashicorp/vault/pull/16487)] +* physical/postgresql: pass context to queries to propagate timeouts and cancellations on requests. [[GH-15866](https://github.com/hashicorp/vault/pull/15866)] +* plugins/multiplexing: Added multiplexing support to database plugins if run as external plugins [[GH-16995](https://github.com/hashicorp/vault/pull/16995)] +* plugins: Add Deprecation Status method to builtinregistry. [[GH-16846](https://github.com/hashicorp/vault/pull/16846)] +* plugins: Added environment variable flag to opt-out specific plugins from multiplexing [[GH-16972](https://github.com/hashicorp/vault/pull/16972)] +* plugins: Adding version to plugin GRPC interface [[GH-17088](https://github.com/hashicorp/vault/pull/17088)] +* plugins: Plugin catalog supports registering and managing plugins with semantic version information. [[GH-16688](https://github.com/hashicorp/vault/pull/16688)] +* replication (enterprise): Fix race in merkle sync that can prevent streaming by returning key value matching provided hash if found in log shipper buffer. +* secret/nomad: allow reading CA and client auth certificate from /nomad/config/access [[GH-15809](https://github.com/hashicorp/vault/pull/15809)] +* secret/pki: Add RSA PSS signature support for issuing certificates, signing CRLs [[GH-16519](https://github.com/hashicorp/vault/pull/16519)] +* secret/pki: Add signature_bits to sign-intermediate, sign-verbatim endpoints [[GH-16124](https://github.com/hashicorp/vault/pull/16124)] +* secret/pki: Allow issuing certificates with non-domain, non-email Common Names from roles, sign-verbatim, and as issuers (`cn_validations`). [[GH-15996](https://github.com/hashicorp/vault/pull/15996)] +* secret/pki: Allow specifying SKID for cross-signed issuance from older Vault versions. [[GH-16494](https://github.com/hashicorp/vault/pull/16494)] +* secret/transit: Allow importing Ed25519 keys from PKCS#8 with inner RFC 5915 ECPrivateKey blobs (NSS-wrapped keys). [[GH-15742](https://github.com/hashicorp/vault/pull/15742)] +* secrets/ad: set config default length only if password_policy is missing [[GH-16140](https://github.com/hashicorp/vault/pull/16140)] +* secrets/azure: Adds option to permanently delete AzureAD objects created by Vault. [[GH-17045](https://github.com/hashicorp/vault/pull/17045)] +* secrets/database/hana: Add ability to customize dynamic usernames [[GH-16631](https://github.com/hashicorp/vault/pull/16631)] +* secrets/database/snowflake: Add multiplexing support [[GH-17159](https://github.com/hashicorp/vault/pull/17159)] +* secrets/gcp: Updates dependencies: `google.golang.org/api@v0.83.0`, `github.com/hashicorp/go-gcp-common@v0.8.0`. [[GH-17174](https://github.com/hashicorp/vault/pull/17174)] +* secrets/gcpkms: Update dependencies: google.golang.org/api@v0.83.0. [[GH-17199](https://github.com/hashicorp/vault/pull/17199)] +* secrets/kubernetes: upgrade to v0.2.0 [[GH-17164](https://github.com/hashicorp/vault/pull/17164)] +* secrets/pki/tidy: Add another pair of metrics counting certificates not deleted by the tidy operation. [[GH-16702](https://github.com/hashicorp/vault/pull/16702)] +* secrets/pki: Add a new flag to issue/sign APIs which can filter out root CAs from the returned ca_chain field [[GH-16935](https://github.com/hashicorp/vault/pull/16935)] +* secrets/pki: Add a warning to any successful response when the requested TTL is overwritten by MaxTTL [[GH-17073](https://github.com/hashicorp/vault/pull/17073)] +* secrets/pki: Add ability to cancel tidy operations, control tidy resource usage. [[GH-16958](https://github.com/hashicorp/vault/pull/16958)] +* secrets/pki: Add ability to periodically rebuild CRL before expiry [[GH-16762](https://github.com/hashicorp/vault/pull/16762)] +* secrets/pki: Add ability to periodically run tidy operations to remove expired certificates. [[GH-16900](https://github.com/hashicorp/vault/pull/16900)] +* secrets/pki: Add support for per-issuer Authority Information Access (AIA) URLs [[GH-16563](https://github.com/hashicorp/vault/pull/16563)] +* secrets/pki: Add support to specify signature bits when generating CSRs through intermediate/generate apis [[GH-17388](https://github.com/hashicorp/vault/pull/17388)] +* secrets/pki: Added gauge metrics "secrets.pki.total_revoked_certificates_stored" and "secrets.pki.total_certificates_stored" to track the number of certificates in storage. [[GH-16676](https://github.com/hashicorp/vault/pull/16676)] +* secrets/pki: Allow revocation of certificates with explicitly provided certificate (bring your own certificate / BYOC). [[GH-16564](https://github.com/hashicorp/vault/pull/16564)] +* secrets/pki: Allow revocation via proving possession of certificate's private key [[GH-16566](https://github.com/hashicorp/vault/pull/16566)] +* secrets/pki: Allow tidy to associate revoked certs with their issuers for OCSP performance [[GH-16871](https://github.com/hashicorp/vault/pull/16871)] +* secrets/pki: Honor If-Modified-Since header on CA, CRL fetch; requires passthrough_request_headers modification on the mount point. [[GH-16249](https://github.com/hashicorp/vault/pull/16249)] +* secrets/pki: Improve stability of association of revoked cert with its parent issuer; when an issuer loses crl-signing usage, do not place certs on default issuer's CRL. [[GH-16874](https://github.com/hashicorp/vault/pull/16874)] +* secrets/pki: Support generating delta CRLs for up-to-date CRLs when auto-building is enabled. [[GH-16773](https://github.com/hashicorp/vault/pull/16773)] +* secrets/ssh: Add allowed_domains_template to allow templating of allowed_domains. [[GH-16056](https://github.com/hashicorp/vault/pull/16056)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] +* secrets/ssh: Allow the use of Identity templates in the `default_user` field [[GH-16351](https://github.com/hashicorp/vault/pull/16351)] +* secrets/transit: Add a dedicated HMAC key type, which can be used with key import. [[GH-16668](https://github.com/hashicorp/vault/pull/16668)] +* secrets/transit: Added a parameter to encrypt/decrypt batch operations to allow the caller to override the HTTP response code in case of partial user-input failures. [[GH-17118](https://github.com/hashicorp/vault/pull/17118)] +* secrets/transit: Allow configuring the possible salt lengths for RSA PSS signatures. [[GH-16549](https://github.com/hashicorp/vault/pull/16549)] +* ssh: Addition of an endpoint `ssh/issue/:role` to allow the creation of signed key pairs [[GH-15561](https://github.com/hashicorp/vault/pull/15561)] +* storage/cassandra: tuning parameters for clustered environments `connection_timeout`, `initial_connection_timeout`, `simple_retry_policy_retries`. [[GH-10467](https://github.com/hashicorp/vault/pull/10467)] +* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] +* ui: Changed the tokenBoundCidrs tooltip content to clarify that comma separated values are not accepted in this field. [[GH-15852](https://github.com/hashicorp/vault/pull/15852)] +* ui: Prevents requests to /sys/internal/ui/resultant-acl endpoint when unauthenticated [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] +* ui: Removed deprecated version of core-js 2.6.11 [[GH-15898](https://github.com/hashicorp/vault/pull/15898)] +* ui: Renamed labels under Tools for wrap, lookup, rewrap and unwrap with description. [[GH-16489](https://github.com/hashicorp/vault/pull/16489)] +* ui: Replaces non-inclusive terms [[GH-17116](https://github.com/hashicorp/vault/pull/17116)] +* ui: redirect_to param forwards from auth route when authenticated [[GH-16821](https://github.com/hashicorp/vault/pull/16821)] +* website/docs: API generate-recovery-token documentation. [[GH-16213](https://github.com/hashicorp/vault/pull/16213)] +* website/docs: Add documentation around the expensiveness of making lots of lease count quotas in a short period [[GH-16950](https://github.com/hashicorp/vault/pull/16950)] +* website/docs: Removes mentions of unauthenticated from internal ui resultant-acl doc [[GH-17139](https://github.com/hashicorp/vault/pull/17139)] +* website/docs: Update replication docs to mention Integrated Storage [[GH-16063](https://github.com/hashicorp/vault/pull/16063)] +* website/docs: changed to echo for all string examples instead of (<<<) here-string. [[GH-9081](https://github.com/hashicorp/vault/pull/9081)] + +BUG FIXES: + +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* agent: Agent will now respect `max_retries` retry configuration even when caching is set. [[GH-16970](https://github.com/hashicorp/vault/pull/16970)] +* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] +* api/sys/internal/specs/openapi: support a new "dynamic" query parameter to generate generic mountpaths [[GH-15835](https://github.com/hashicorp/vault/pull/15835)] +* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] +* api: Fixed issue with internal/ui/mounts and internal/ui/mounts/(?P.+) endpoints where it was not properly handling /auth/ [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] +* api: properly handle switching to/from unix domain socket when changing client address [[GH-11904](https://github.com/hashicorp/vault/pull/11904)] +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] +* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17161](https://github.com/hashicorp/vault/pull/17161)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core (enterprise): Fix bug where wrapping token lookup does not work within namespaces. [[GH-15583](https://github.com/hashicorp/vault/pull/15583)] +* core (enterprise): Fix creation of duplicate entities via alias metadata changes on local auth mounts. +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core/quotas (enterprise): Fixed issue with improper counting of leases if lease count quota created after leases +* core/quotas: Added globbing functionality on the end of path suffix quota paths [[GH-16386](https://github.com/hashicorp/vault/pull/16386)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* core: Fix panic when the plugin catalog returns neither a plugin nor an error. [[GH-17204](https://github.com/hashicorp/vault/pull/17204)] +* core: Fixes parsing boolean values for ha_storage backends in config [[GH-15900](https://github.com/hashicorp/vault/pull/15900)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* debug: Fix panic when capturing debug bundle on Windows [[GH-14399](https://github.com/hashicorp/vault/pull/14399)] +* debug: Remove extra empty lines from vault.log when debug command is run [[GH-16714](https://github.com/hashicorp/vault/pull/16714)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* openapi: Fixed issue where information about /auth/token endpoints was not present with explicit policy permissions [[GH-15552](https://github.com/hashicorp/vault/pull/15552)] +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] +* plugins: Corrected the path to check permissions on when the registered plugin name does not match the plugin binary's filename. [[GH-17340](https://github.com/hashicorp/vault/pull/17340)] +* quotas/lease-count: Fix lease-count quotas on mounts not properly being enforced when the lease generating request is a read [[GH-15735](https://github.com/hashicorp/vault/pull/15735)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* replication (enterprise): Fix data race in saveCheckpoint. +* replication (enterprise): Fix possible data race during merkle diff/sync +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] +* secrets/pki: Allow import of issuers without CRLSign KeyUsage; prohibit setting crl-signing usage on such issuers [[GH-16865](https://github.com/hashicorp/vault/pull/16865)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17385](https://github.com/hashicorp/vault/pull/17385)] +* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] +* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] +* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* storage/raft: Nodes no longer get demoted to nonvoter if we don't know their version due to missing heartbeats. [[GH-17019](https://github.com/hashicorp/vault/pull/17019)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] +* ui: Fixed bug where red spellcheck underline appears in sensitive/secret kv values when it should not appear [[GH-15681](https://github.com/hashicorp/vault/pull/15681)] +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] +* vault: Fix a bug where duplicate policies could be added to an identity group. [[GH-15638](https://github.com/hashicorp/vault/pull/15638)] + +## 1.11.12 +### June 21, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.10. +* licensing (enterprise): Terminated licenses will no longer result in shutdown. Instead, upgrades +will not be allowed if the license termination time is before the build date of the binary. + +FEATURES: + +* **Automated License Utilization Reporting**: Added automated license +utilization reporting, which sends minimal product-license [metering +data](https://developer.hashicorp.com/vault/docs/enterprise/license/utilization-reporting) +to HashiCorp without requiring you to manually collect and report them. +* core (enterprise): Add background worker for automatic reporting of billing +information. [[GH-19625](https://github.com/hashicorp/vault/pull/19625)] + +IMPROVEMENTS: + +* api: GET ... /sys/internal/counters/activity?current_billing_period=true now +results in a response which contains the full billing period [[GH-20694](https://github.com/hashicorp/vault/pull/20694)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`minimum_retention_months`. [[GH-20150](https://github.com/hashicorp/vault/pull/20150)] +* api: `/sys/internal/counters/config` endpoint now contains read-only +`reporting_enabled` and `billing_start_timestamp` fields. [[GH-20086](https://github.com/hashicorp/vault/pull/20086)] +* core (enterprise): add configuration for license reporting [[GH-19891](https://github.com/hashicorp/vault/pull/19891)] +* core (enterprise): license updates trigger a reload of reporting and the activity log [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): support reloading configuration for automated reporting via SIGHUP [[GH-20680](https://github.com/hashicorp/vault/pull/20680)] +* core (enterprise): vault server command now allows for opt-out of automated +reporting via the `OPTOUT_LICENSE_REPORTING` environment variable. [[GH-3939](https://github.com/hashicorp/vault/pull/3939)] +* core/activity: error when attempting to update retention configuration below the minimum [[GH-20078](https://github.com/hashicorp/vault/pull/20078)] +* core/activity: generate hyperloglogs containing clientIds for each month during precomputation [[GH-16146](https://github.com/hashicorp/vault/pull/16146)] +* core/activity: refactor activity log api to reuse partial api functions in activity endpoint when current month is specified [[GH-16162](https://github.com/hashicorp/vault/pull/16162)] +* core/activity: refactor the activity log's generation of precomputed queries [[GH-20073](https://github.com/hashicorp/vault/pull/20073)] +* core/activity: use monthly hyperloglogs to calculate new clients approximation for current month [[GH-16184](https://github.com/hashicorp/vault/pull/16184)] +* core: Activity log goroutine management improvements to allow tests to be more deterministic. [[GH-17028](https://github.com/hashicorp/vault/pull/17028)] +* core: Limit activity log client count usage by namespaces [[GH-16000](https://github.com/hashicorp/vault/pull/16000)] +* storage/raft: add additional raft metrics relating to applied index and heartbeating; also ensure OSS standbys emit periodic metrics. [[GH-12166](https://github.com/hashicorp/vault/pull/12166)] +* ui: updates clients configuration edit form state based on census reporting configuration [[GH-20125](https://github.com/hashicorp/vault/pull/20125)] + +BUG FIXES: + +* core/activity: add namespace breakdown for new clients when date range spans multiple months, including the current month. [[GH-18766](https://github.com/hashicorp/vault/pull/18766)] +* core/activity: de-duplicate namespaces when historical and current month data are mixed [[GH-18452](https://github.com/hashicorp/vault/pull/18452)] +* core/activity: fix the end_date returned from the activity log endpoint when partial counts are computed [[GH-17856](https://github.com/hashicorp/vault/pull/17856)] +* core/activity: include mount counts when de-duplicating current and historical month data [[GH-18598](https://github.com/hashicorp/vault/pull/18598)] +* core/activity: report mount paths (rather than mount accessors) in current month activity log counts and include deleted mount paths in precomputed queries. [[GH-18916](https://github.com/hashicorp/vault/pull/18916)] +* core/activity: return partial month counts when querying a historical date range and no historical data exists. [[GH-17935](https://github.com/hashicorp/vault/pull/17935)] +* core: Change where we evaluate filtered paths as part of mount operations; this is part of an enterprise bugfix that will +have its own changelog entry. [[GH-21260](https://github.com/hashicorp/vault/pull/21260)] +* core: Do not cache seal configuration to fix a bug that resulted in sporadic auto unseal failures. [[GH-21223](https://github.com/hashicorp/vault/pull/21223)] +* core: Don't exit just because we think there's a potential deadlock. [[GH-21342](https://github.com/hashicorp/vault/pull/21342)] +* core: Fix panic in sealed nodes using raft storage trying to emit raft metrics [[GH-21249](https://github.com/hashicorp/vault/pull/21249)] +* identity: Fixes duplicate groups creation with the same name but unique IDs. [[GH-20964](https://github.com/hashicorp/vault/pull/20964)] +* replication (enterprise): Fix a race condition with update-primary that could result in data loss after a DR failover +* replication (enterprise): Fix path filters deleting data right after it's written by backend Initialize funcs + +## 1.11.11 +### June 08, 2023 + +SECURITY: + +* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)] + +CHANGES: + +* core: Bump Go version to 1.19.9. +* core: Revert #19676 (VAULT_GRPC_MIN_CONNECT_TIMEOUT env var) as we decided it was unnecessary. [[GH-20826](https://github.com/hashicorp/vault/pull/20826)] + +IMPROVEMENTS: + +* command/server: Add support for dumping pprof files to the filesystem via SIGUSR2 when +`VAULT_PPROF_WRITE_TO_FILE=true` is set on the server. [[GH-20609](https://github.com/hashicorp/vault/pull/20609)] +* secrets/pki: add subject key identifier to read key response [[GH-20642](https://github.com/hashicorp/vault/pull/20642)] +* ui: update TTL picker for consistency [[GH-18114](https://github.com/hashicorp/vault/pull/18114)] + +BUG FIXES: + +* api: Properly Handle nil identity_policies in Secret Data [[GH-20636](https://github.com/hashicorp/vault/pull/20636)] +* auth/ldap: Set default value for `max_page_size` properly [[GH-20453](https://github.com/hashicorp/vault/pull/20453)] +* cli: CLI should take days as a unit of time for ttl like flags [[GH-20477](https://github.com/hashicorp/vault/pull/20477)] +* core (enterprise): Fix log shipper buffer size overflow issue for 32 bit architecture. +* core (enterprise): Fix logshipper buffer size to default to DefaultBufferSize only when reported system memory is zero. +* core (enterprise): Remove MFA Enforcment configuration for namespace when deleting namespace +* core: prevent panic on login after namespace is deleted that had mfa enforcement [[GH-20375](https://github.com/hashicorp/vault/pull/20375)] +* replication (enterprise): Fix a race condition with invalid tokens during WAL streaming that was causing Secondary clusters to be unable to connect to a Primary. +* replication (enterprise): fix bug where secondary grpc connections would timeout when connecting to a primary host that no longer exists. +* secrets/transform (enterprise): Fix a caching bug affecting secondary nodes after a tokenization key rotation + +## 1.11.10 +### April 26, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.8. + +IMPROVEMENTS: + +* cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. [[GH-20243](https://github.com/hashicorp/vault/pull/20243)] +* core/activity: add an endpoint to write test activity log data, guarded by a build flag [[GH-20019](https://github.com/hashicorp/vault/pull/20019)] +* core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. [[GH-20044](https://github.com/hashicorp/vault/pull/20044)] +* sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. [[GH-20144](https://github.com/hashicorp/vault/pull/20144)] + +BUG FIXES: + +* auth/ldap: Add max_page_size configurable to LDAP configuration [[GH-19032](https://github.com/hashicorp/vault/pull/19032)] +* core (enterprise): Fix intermittent issue with token entries sometimes not being found when using a newly created token in a request to a secondary, even when SSCT `new_token` forwarding is set. When this occurred, this would result in the following error to the client: `error performing token check: no lease entry found for token that ought to have one, possible eventual consistency issue`. +* core (enterprise): Fix read on perf standbys failing with 412 after leadership change, unseal, restores or restarts when no writes occur +* core/ssct (enterprise): Fixed race condition where a newly promoted DR may revert `sscGenCounter` +resulting in 412 errors. +* core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. [[GH-19721](https://github.com/hashicorp/vault/pull/19721)] +* helper/random: Fix race condition in string generator helper [[GH-19875](https://github.com/hashicorp/vault/pull/19875)] +* openapi: Fix many incorrect details in generated API spec, by using better techniques to parse path regexps [[GH-18554](https://github.com/hashicorp/vault/pull/18554)] +* replication (enterprise): Fix replication status for Primary clusters showing its primary cluster's information (in case of DR) in secondaries field when known_secondaries field is nil +* secrets/pki: Fix patching of leaf_not_after_behavior on issuers. [[GH-20341](https://github.com/hashicorp/vault/pull/20341)] +* secrets/transform (enterprise): Address SQL connection leak when cleaning expired tokens +* ui: Fix OIDC provider logo showing when domain doesn't match [[GH-20263](https://github.com/hashicorp/vault/pull/20263)] +* ui: Fix bad link to namespace when namespace name includes `.` [[GH-19799](https://github.com/hashicorp/vault/pull/19799)] +* ui: fixes browser console formatting for help command output [[GH-20064](https://github.com/hashicorp/vault/pull/20064)] +* ui: remove use of htmlSafe except when first sanitized [[GH-20235](https://github.com/hashicorp/vault/pull/20235)] + +## 1.11.9 +### March 29, 2023 + +SECURITY: + +* storage/mssql: When using Vault’s community-supported Microsoft SQL (MSSQL) database storage backend, a privileged attacker with the ability to write arbitrary data to Vault’s configuration may be able to perform arbitrary SQL commands on the underlying database server through Vault. This vulnerability, CVE-2023-0620, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-12](https://discuss.hashicorp.com/t/hcsec-2023-12-vault-s-microsoft-sql-database-storage-backend-vulnerable-to-sql-injection-via-configuration-file/52080)] +* secrets/pki: Vault’s PKI mount issuer endpoints did not correctly authorize access to remove an issuer or modify issuer metadata, potentially resulting in denial of service of the PKI mount. This bug did not affect public or private key material, trust chains or certificate issuance. This vulnerability, CVE-2023-0665, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-11](https://discuss.hashicorp.com/t/hcsec-2023-11-vault-s-pki-issuer-endpoint-did-not-correctly-authorize-access-to-issuer-metadata/52079)] +* core: HashiCorp Vault’s implementation of Shamir’s secret sharing used precomputed table lookups, and was vulnerable to cache-timing attacks. An attacker with access to, and the ability to observe a large number of unseal operations on the host through a side channel may reduce the search space of a brute force effort to recover the Shamir shares. This vulnerability, CVE-2023-25000, is fixed in Vault 1.13.1, 1.12.5, and 1.11.9. [[HCSEC-2023-10](https://discuss.hashicorp.com/t/hcsec-2023-10-vault-vulnerable-to-cache-timing-attacks-during-seal-and-unseal-operations/52078)] + +IMPROVEMENTS: + +* auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config [[GH-19244](https://github.com/hashicorp/vault/pull/19244)] +* core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. [[GH-19676](https://github.com/hashicorp/vault/pull/19676)] +* core: validate name identifiers in mssql physical storage backend prior use [[GH-19591](https://github.com/hashicorp/vault/pull/19591)] + +BUG FIXES: + +* auth/kubernetes: Ensure a consistent TLS configuration for all k8s API requests [[#190](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/190)] [[GH-19720](https://github.com/hashicorp/vault/pull/19720)] +* cli: Fix vault read handling to return raw data as secret.Data when there is no top-level data object from api response. [[GH-17913](https://github.com/hashicorp/vault/pull/17913)] +* core (enterprise): Attempt to reconnect to a PKCS#11 HSM if we retrieve a CKR_FUNCTION_FAILED error. +* core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. [[GH-19585](https://github.com/hashicorp/vault/pull/19585)] +* openapi: Fix logic for labeling unauthenticated/sudo paths. [[GH-19600](https://github.com/hashicorp/vault/pull/19600)] +* secrets/transform (enterprise): Fix persistence problem with rotated tokenization key versions +* ui: fixes issue navigating back a level using the breadcrumb from secret metadata view [[GH-19703](https://github.com/hashicorp/vault/pull/19703)] +* ui: pass encodeBase64 param to HMAC transit-key-actions. [[GH-19429](https://github.com/hashicorp/vault/pull/19429)] +* ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url [[GH-19460](https://github.com/hashicorp/vault/pull/19460)] + +## 1.11.8 +### March 01, 2023 + +SECURITY: + +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] + +CHANGES: + +* core: Bump Go version to 1.19.6. + +IMPROVEMENTS: + +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] + +BUG FIXES: + +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core (enterprise): Fix perf standby WAL streaming silently failures when replication setup happens at a bad time. +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* license (enterprise): Fix bug where license would update even if the license didn't change. +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18208](https://github.com/hashicorp/vault/pull/18208)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] +* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)] + +## 1.11.7 +### February 6, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.4. + +IMPROVEMENTS: + +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] + +BUG FIXES: + +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* kmip (enterprise): Fix Query operation response that omitted streaming capability and supported profiles. +* licensing (enterprise): update autoloaded license cache after reload +* secrets/pki: Allow patching issuer to set an empty issuer name. [[GH-18466](https://github.com/hashicorp/vault/pull/18466)] +* secrets/transit: Do not warn about unrecognized parameter 'batch_input' [[GH-18299](https://github.com/hashicorp/vault/pull/18299)] +* storage/raft (enterprise): An already joined node can rejoin by wiping storage +and re-issueing a join request, but in doing so could transiently become a +non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https://github.com/hashicorp/vault/pull/18263)] +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* storage/raft: Don't panic on unknown raft ops [[GH-17732](https://github.com/hashicorp/vault/pull/17732)] +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] + +## 1.11.6 +### November 30, 2022 + +IMPROVEMENTS: + +* secrets/pki: Allow issuer creation, import to change default issuer via `default_follows_latest_issuer`. [[GH-17824](https://github.com/hashicorp/vault/pull/17824)] + +BUG FIXES: + +* auth/okta: fix a panic for AuthRenew in Okta [[GH-18011](https://github.com/hashicorp/vault/pull/18011)] +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* cli: Fix issue preventing kv commands from executing properly when the mount path provided by `-mount` flag and secret key path are the same. [[GH-17679](https://github.com/hashicorp/vault/pull/17679)] +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* secret/pki: fix bug with initial legacy bundle migration (from < 1.11 into 1.11+) and missing issuers from ca_chain [[GH-17772](https://github.com/hashicorp/vault/pull/17772)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18085](https://github.com/hashicorp/vault/pull/18085)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18110](https://github.com/hashicorp/vault/pull/18110)] +* ui: Fixes issue with not being able to download raft snapshot via service worker [[GH-17769](https://github.com/hashicorp/vault/pull/17769)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] + +## 1.11.5 +### November 2, 2022 + +IMPROVEMENTS: + +* database/snowflake: Allow parallel requests to Snowflake [[GH-17594](https://github.com/hashicorp/vault/pull/17594)] +* sdk/ldap: Added support for paging when searching for groups using group filters [[GH-17640](https://github.com/hashicorp/vault/pull/17640)] + +BUG FIXES: + +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* kmip (enterprise): Fix a problem in the handling of attributes that caused Import operations to fail. +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Do not read revoked certificates from backend when CRL is disabled [[GH-17384](https://github.com/hashicorp/vault/pull/17384)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui/keymgmt: Sets the defaultValue for type when creating a key. [[GH-17407](https://github.com/hashicorp/vault/pull/17407)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] + +## 1.11.4 +### September 30, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +IMPROVEMENTS: + +* agent/auto-auth: Add `exit_on_err` which when set to true, will cause Agent to exit if any errors are encountered during authentication. [[GH-17091](https://github.com/hashicorp/vault/pull/17091)] +* agent: Send notifications to systemd on start and stop. [[GH-9802](https://github.com/hashicorp/vault/pull/9802)] + +BUG FIXES: + +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* auth/kubernetes: Restore support for JWT signature algorithm ES384 [[GH-160](https://github.com/hashicorp/vault-plugin-auth-kubernetes/pull/160)] [[GH-17162](https://github.com/hashicorp/vault/pull/17162)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] +* ui: Fixes secret version and status menu links transitioning to auth screen [[GH-16983](https://github.com/hashicorp/vault/pull/16983)] + +## 1.11.3 +### August 31, 2022 + +SECURITY: + +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +CHANGES: + +* core: Bump Go version to 1.17.13. + +IMPROVEMENTS: + +* auth/kerberos: add `add_group_aliases` config to include LDAP groups in Vault group aliases [[GH-16890](https://github.com/hashicorp/vault/pull/16890)] +* auth/kerberos: add `remove_instance_name` parameter to the login CLI and the +Kerberos config in Vault. This removes any instance names found in the keytab +service principal name. [[GH-16594](https://github.com/hashicorp/vault/pull/16594)] +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] +* storage/gcs: Add documentation explaining how to configure the gcs backend using environment variables instead of options in the configuration stanza [[GH-14455](https://github.com/hashicorp/vault/pull/14455)] + +BUG FIXES: + +* api: Fixed erroneous warnings of unrecognized parameters when unwrapping data. [[GH-16794](https://github.com/hashicorp/vault/pull/16794)] +* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16523](https://github.com/hashicorp/vault/pull/16523)] +* auth/kerberos: Maintain headers set by the client [[GH-16636](https://github.com/hashicorp/vault/pull/16636)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core/license (enterprise): Always remove stored license and allow unseal to complete when license cleanup fails +* database/elasticsearch: Fixes a bug in boolean parsing for initialize [[GH-16526](https://github.com/hashicorp/vault/pull/16526)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* plugin/secrets/auth: Fix a bug with aliased backends such as aws-ec2 or generic [[GH-16673](https://github.com/hashicorp/vault/pull/16673)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* secrets/pki: Fix migration to properly handle mounts that contain only keys, no certificates [[GH-16813](https://github.com/hashicorp/vault/pull/16813)] +* secrets/pki: Ignore EC PARAMETER PEM blocks during issuer import (/config/ca, /issuers/import/*, and /intermediate/set-signed) [[GH-16721](https://github.com/hashicorp/vault/pull/16721)] +* secrets/pki: LIST issuers endpoint is now unauthenticated. [[GH-16830](https://github.com/hashicorp/vault/pull/16830)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix info tooltip submitting form [[GH-16659](https://github.com/hashicorp/vault/pull/16659)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] + +SECURITY: + +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +## 1.11.2 +### August 2, 2022 + +IMPROVEMENTS: + +* agent: Added `disable_keep_alives` configuration to disable keep alives in auto-auth, caching and templating. [[GH-16479](https://github.com/hashicorp/vault/pull/16479)] + +BUG FIXES: + +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* secrets/kv: Fix `kv get` issue preventing the ability to read a secret when providing a leading slash [[GH-16443](https://github.com/hashicorp/vault/pull/16443)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] + +## 1.11.1 +### July 21, 2022 + +SECURITY: + +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +CHANGES: + +* core: Bump Go version to 1.17.12. + +IMPROVEMENTS: + +* agent: Added `disable_idle_connections` configuration to disable leaving idle connections open in auto-auth, caching and templating. [[GH-15986](https://github.com/hashicorp/vault/pull/15986)] +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] + +BUG FIXES: + +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* agent: Update consul-template for pkiCert bug fixes [[GH-16087](https://github.com/hashicorp/vault/pull/16087)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* kmip (enterprise): Return SecretData as supported Object Type. +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* storage/raft (enterprise): Prevent unauthenticated voter status change with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: OIDC login type uses localStorage instead of sessionStorage [[GH-16170](https://github.com/hashicorp/vault/pull/16170)] + +SECURITY: + +* storage/raft (enterprise): Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HCSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +## 1.11.0 +### June 20, 2022 + +CHANGES: + +* auth/aws: Add RoleSession to DisplayName when using assumeRole for authentication [[GH-14954](https://github.com/hashicorp/vault/pull/14954)] +* auth/kubernetes: If `kubernetes_ca_cert` is unset, and there is no pod-local CA available, an error will be surfaced when writing config instead of waiting for login. [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] +* auth: Remove support for legacy MFA +(https://www.vaultproject.io/docs/v1.10.x/auth/mfa) [[GH-14869](https://github.com/hashicorp/vault/pull/14869)] +* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.11. [[GH-go-ver-1110](https://github.com/hashicorp/vault/pull/go-ver-1110)] +* database & storage: Change underlying driver library from [lib/pq](https://github.com/lib/pq) to [pgx](https://github.com/jackc/pgx). This change affects Redshift & Postgres database secrets engines, and CockroachDB & Postgres storage engines [[GH-15343](https://github.com/hashicorp/vault/pull/15343)] +* licensing (enterprise): Remove support for stored licenses and associated `sys/license` and `sys/license/signed` +endpoints in favor of [autoloaded licenses](https://www.vaultproject.io/docs/enterprise/license/autoloading). +* replication (enterprise): The `/sys/replication/performance/primary/mount-filter` endpoint has been removed. Please use [Paths Filter](https://www.vaultproject.io/api-docs/system/replication/replication-performance#create-paths-filter) instead. +* secret/pki: Remove unused signature_bits parameter from intermediate CSR generation; this parameter doesn't control the final certificate's signature algorithm selection as that is up to the signing CA [[GH-15478](https://github.com/hashicorp/vault/pull/15478)] +* secrets/kubernetes: Split `additional_metadata` into `extra_annotations` and `extra_labels` parameters [[GH-15655](https://github.com/hashicorp/vault/pull/15655)] +* secrets/pki: A new aliased api path (/pki/issuer/:issuer_ref/sign-self-issued) +providing the same functionality as the existing API(/pki/root/sign-self-issued) +does not require sudo capabilities but the latter still requires it in an +effort to maintain backwards compatibility. [[GH-15211](https://github.com/hashicorp/vault/pull/15211)] +* secrets/pki: Err on unknown role during sign-verbatim. [[GH-15543](https://github.com/hashicorp/vault/pull/15543)] +* secrets/pki: Existing CRL API (/pki/crl) now returns an X.509 v2 CRL instead +of a v1 CRL. [[GH-15100](https://github.com/hashicorp/vault/pull/15100)] +* secrets/pki: The `ca_chain` response field within issuing (/pki/issue/:role) +and signing APIs will now include the root CA certificate if the mount is +aware of it. [[GH-15155](https://github.com/hashicorp/vault/pull/15155)] +* secrets/pki: existing Delete Root API (pki/root) will now delete all issuers +and keys within the mount path. [[GH-15004](https://github.com/hashicorp/vault/pull/15004)] +* secrets/pki: existing Generate Root (pki/root/generate/:type), +Set Signed Intermediate (/pki/intermediate/set-signed) APIs will +add new issuers/keys to a mount instead of warning that an existing CA exists [[GH-14975](https://github.com/hashicorp/vault/pull/14975)] +* secrets/pki: the signed CA certificate from the sign-intermediate api will now appear within the ca_chain +response field along with the issuer's ca chain. [[GH-15524](https://github.com/hashicorp/vault/pull/15524)] +* ui: Upgrade Ember to version 3.28 [[GH-14763](https://github.com/hashicorp/vault/pull/14763)] + +FEATURES: + +* **Autopilot Improvements (Enterprise)**: Autopilot on Vault Enterprise now supports automated upgrades and redundancy zones when using integrated storage. +* **KeyMgmt UI**: Add UI support for managing the Key Management Secrets Engine [[GH-15523](https://github.com/hashicorp/vault/pull/15523)] +* **Kubernetes Secrets Engine**: This new secrets engine generates Kubernetes service account tokens, service accounts, role bindings, and roles dynamically. [[GH-15551](https://github.com/hashicorp/vault/pull/15551)] +* **Non-Disruptive Intermediate/Root Certificate Rotation**: This allows +import, generation and configuration of any number of keys and/or issuers +within a PKI mount, providing operators the ability to rotate certificates +in place without affecting existing client configurations. [[GH-15277](https://github.com/hashicorp/vault/pull/15277)] +* **Print minimum required policy for any command**: The global CLI flag `-output-policy` can now be used with any command to print out the minimum required policy HCL for that operation, including whether the given path requires the "sudo" capability. [[GH-14899](https://github.com/hashicorp/vault/pull/14899)] +* **Snowflake Database Plugin**: Adds ability to manage RSA key pair credentials for dynamic and static Snowflake users. [[GH-15376](https://github.com/hashicorp/vault/pull/15376)] +* **Transit BYOK**: Allow import of externally-generated keys into the Transit secrets engine. [[GH-15414](https://github.com/hashicorp/vault/pull/15414)] +* nomad: Bootstrap Nomad ACL system if no token is provided [[GH-12451](https://github.com/hashicorp/vault/pull/12451)] +* storage/dynamodb: Added `AWS_DYNAMODB_REGION` environment variable. [[GH-15054](https://github.com/hashicorp/vault/pull/15054)] + +IMPROVEMENTS: + +* activity: return nil response months in activity log API when no month data exists [[GH-15420](https://github.com/hashicorp/vault/pull/15420)] +* agent/auto-auth: Add `min_backoff` to the method stanza for configuring initial backoff duration. [[GH-15204](https://github.com/hashicorp/vault/pull/15204)] +* agent: Update consul-template to v0.29.0 [[GH-15293](https://github.com/hashicorp/vault/pull/15293)] +* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] +* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* api: Add ability to pass certificate as PEM bytes to api.Client. [[GH-14753](https://github.com/hashicorp/vault/pull/14753)] +* api: Add context-aware functions to vault/api for each API wrapper function. [[GH-14388](https://github.com/hashicorp/vault/pull/14388)] +* api: Added MFALogin() for handling MFA flow when using login helpers. [[GH-14900](https://github.com/hashicorp/vault/pull/14900)] +* api: If the parameters supplied over the API payload are ignored due to not +being what the endpoints were expecting, or if the parameters supplied get +replaced by the values in the endpoint's path itself, warnings will be added to +the non-empty responses listing all the ignored and replaced parameters. [[GH-14962](https://github.com/hashicorp/vault/pull/14962)] +* api: KV helper methods to simplify the common use case of reading and writing KV secrets [[GH-15305](https://github.com/hashicorp/vault/pull/15305)] +* api: Provide a helper method WithNamespace to create a cloned client with a new NS [[GH-14963](https://github.com/hashicorp/vault/pull/14963)] +* api: Support VAULT_PROXY_ADDR environment variable to allow overriding the Vault client's HTTP proxy. [[GH-15377](https://github.com/hashicorp/vault/pull/15377)] +* api: Use the context passed to the api/auth Login helpers. [[GH-14775](https://github.com/hashicorp/vault/pull/14775)] +* api: make ListPlugins parse only known plugin types [[GH-15434](https://github.com/hashicorp/vault/pull/15434)] +* audit: Add a policy_results block into the audit log that contains the set of +policies that granted this request access. [[GH-15457](https://github.com/hashicorp/vault/pull/15457)] +* audit: Include mount_accessor in audit request and response logs [[GH-15342](https://github.com/hashicorp/vault/pull/15342)] +* audit: added entity_created boolean to audit log, set when login operations create an entity [[GH-15487](https://github.com/hashicorp/vault/pull/15487)] +* auth/aws: Add rsa2048 signature type to API [[GH-15719](https://github.com/hashicorp/vault/pull/15719)] +* auth/gcp: Enable the Google service endpoints used by the underlying client to be customized [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] +* auth/gcp: Vault CLI now infers the service account email when running on Google Cloud [[GH-15592](https://github.com/hashicorp/vault/pull/15592)] +* auth/jwt: Adds ability to use JSON pointer syntax for the `user_claim` value. [[GH-15593](https://github.com/hashicorp/vault/pull/15593)] +* auth/okta: Add support for Google provider TOTP type in the Okta auth method [[GH-14985](https://github.com/hashicorp/vault/pull/14985)] +* auth/okta: Add support for performing [the number +challenge](https://help.okta.com/en-us/Content/Topics/Mobile/ov-admin-config.htm?cshid=csh-okta-verify-number-challenge-v1#enable-number-challenge) +during an Okta Verify push challenge [[GH-15361](https://github.com/hashicorp/vault/pull/15361)] +* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] +* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] +* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] +* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] +* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] +* cli: Alternative flag-based syntax for KV to mitigate confusion from automatically appended /data [[GH-14807](https://github.com/hashicorp/vault/pull/14807)] +* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] +* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* command: Support optional '-log-level' flag to be passed to 'operator migrate' command (defaults to info). Also support VAULT_LOG_LEVEL env var. [[GH-15405](https://github.com/hashicorp/vault/pull/15405)] +* command: Support the optional '-detailed' flag to be passed to 'vault list' command to show ListResponseWithInfo data. Also supports the VAULT_DETAILED env var. [[GH-15417](https://github.com/hashicorp/vault/pull/15417)] +* core (enterprise): Include `termination_time` in `sys/license/status` response +* core (enterprise): Include termination time in `license inspect` command output +* core,transit: Allow callers to choose random byte source including entropy augmentation sources for the sys/tools/random and transit/random endpoints. [[GH-15213](https://github.com/hashicorp/vault/pull/15213)] +* core/activity: Order month data in ascending order of timestamps [[GH-15259](https://github.com/hashicorp/vault/pull/15259)] +* core/activity: allow client counts to be precomputed and queried on non-contiguous chunks of data [[GH-15352](https://github.com/hashicorp/vault/pull/15352)] +* core/managed-keys (enterprise): Allow configuring the number of parallel operations to PKCS#11 managed keys. +* core: Add an export API for historical activity log data [[GH-15586](https://github.com/hashicorp/vault/pull/15586)] +* core: Add new DB methods that do not prepare statements. [[GH-15166](https://github.com/hashicorp/vault/pull/15166)] +* core: check uid and permissions of config dir, config file, plugin dir and plugin binaries [[GH-14817](https://github.com/hashicorp/vault/pull/14817)] +* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] +* core: Include build date in `sys/seal-status` and `sys/version-history` endpoints. [[GH-14957](https://github.com/hashicorp/vault/pull/14957)] +* core: Upgrade github.org/x/crypto/ssh [[GH-15125](https://github.com/hashicorp/vault/pull/15125)] +* kmip (enterprise): Implement operations Query, Import, Encrypt and Decrypt. Improve operations Locate, Add Attribute, Get Attributes and Get Attribute List to handle most supported attributes. +* mfa/okta: migrate to use official Okta SDK [[GH-15355](https://github.com/hashicorp/vault/pull/15355)] +* sdk: Change OpenAPI code generator to extract request objects into /components/schemas and reference them by name. [[GH-14217](https://github.com/hashicorp/vault/pull/14217)] +* secrets/consul: Add support for Consul node-identities and service-identities [[GH-15295](https://github.com/hashicorp/vault/pull/15295)] +* secrets/consul: Vault is now able to automatically bootstrap the Consul ACL system. [[GH-10751](https://github.com/hashicorp/vault/pull/10751)] +* secrets/database/elasticsearch: Use the new /_security base API path instead of /_xpack/security when managing elasticsearch. [[GH-15614](https://github.com/hashicorp/vault/pull/15614)] +* secrets/pki: Add not_before_duration to root CA generation, intermediate CA signing paths. [[GH-14178](https://github.com/hashicorp/vault/pull/14178)] +* secrets/pki: Add support for CPS URLs and User Notice to Policy Information [[GH-15751](https://github.com/hashicorp/vault/pull/15751)] +* secrets/pki: Allow operators to control the issuing certificate behavior when +the requested TTL is beyond the NotAfter value of the signing certificate [[GH-15152](https://github.com/hashicorp/vault/pull/15152)] +* secrets/pki: Always return CRLs, URLs configurations, even if using the default value. [[GH-15470](https://github.com/hashicorp/vault/pull/15470)] +* secrets/pki: Enable Patch Functionality for Roles and Issuers (API only) [[GH-15510](https://github.com/hashicorp/vault/pull/15510)] +* secrets/pki: Have pki/sign-verbatim use the not_before_duration field defined in the role [[GH-15429](https://github.com/hashicorp/vault/pull/15429)] +* secrets/pki: Warn on empty Subject field during issuer generation (root/generate and root/sign-intermediate). [[GH-15494](https://github.com/hashicorp/vault/pull/15494)] +* secrets/pki: Warn on missing AIA access information when generating issuers (config/urls). [[GH-15509](https://github.com/hashicorp/vault/pull/15509)] +* secrets/pki: Warn when `generate_lease` and `no_store` are both set to `true` on requests. [[GH-14292](https://github.com/hashicorp/vault/pull/14292)] +* secrets/ssh: Add connection timeout of 1 minute for outbound SSH connection in deprecated Dynamic SSH Keys mode. [[GH-15440](https://github.com/hashicorp/vault/pull/15440)] +* secrets/ssh: Support for `add_before_duration` in SSH [[GH-15250](https://github.com/hashicorp/vault/pull/15250)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer +* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] +* ui: Default auto-rotation period in transit is 30 days [[GH-15474](https://github.com/hashicorp/vault/pull/15474)] +* ui: Parse schema refs from OpenAPI [[GH-14508](https://github.com/hashicorp/vault/pull/14508)] +* ui: Remove stored license references [[GH-15513](https://github.com/hashicorp/vault/pull/15513)] +* ui: Remove storybook. [[GH-15074](https://github.com/hashicorp/vault/pull/15074)] +* ui: Replaces the IvyCodemirror wrapper with a custom ember modifier. [[GH-14659](https://github.com/hashicorp/vault/pull/14659)] +* website/docs: Add usage documentation for Kubernetes Secrets Engine [[GH-15527](https://github.com/hashicorp/vault/pull/15527)] +* website/docs: added a link to an Enigma secret plugin. [[GH-14389](https://github.com/hashicorp/vault/pull/14389)] + +DEPRECATIONS: + +* docs: Document removal of X.509 certificates with signatures who use SHA-1 in Vault 1.12 [[GH-15581](https://github.com/hashicorp/vault/pull/15581)] +* secrets/consul: Deprecate old parameters "token_type" and "policy" [[GH-15550](https://github.com/hashicorp/vault/pull/15550)] +* secrets/consul: Deprecate parameter "policies" in favor of "consul_policies" for consistency [[GH-15400](https://github.com/hashicorp/vault/pull/15400)] + +BUG FIXES: + +* Fixed panic when adding or modifying a Duo MFA Method in Enterprise +* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Fixes bug where OutputCurlString field was unintentionally being copied over during client cloning [[GH-14968](https://github.com/hashicorp/vault/pull/14968)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* auth/kubernetes: Fix error code when using the wrong service account [[GH-15584](https://github.com/hashicorp/vault/pull/15584)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] +* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] +* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* cli: kv get command now honors trailing spaces to retrieve secrets [[GH-15188](https://github.com/hashicorp/vault/pull/15188)] +* command: do not report listener and storage types as key not found warnings [[GH-15383](https://github.com/hashicorp/vault/pull/15383)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core (enterprise): Fix some races in merkle index flushing code found in testing +* core (enterprise): Handle additional edge cases reinitializing PKCS#11 libraries after login errors. +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix double counting for "route" metrics [[GH-12763](https://github.com/hashicorp/vault/pull/12763)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] +* core: renaming the environment variable VAULT_DISABLE_FILE_PERMISSIONS_CHECK to VAULT_ENABLE_FILE_PERMISSIONS_CHECK and adjusting the logic [[GH-15452](https://github.com/hashicorp/vault/pull/15452)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* identity: deduplicate policies when creating/updating identity groups [[GH-15055](https://github.com/hashicorp/vault/pull/15055)] +* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] +* plugin: Fix a bug where plugin reload would falsely report success in certain scenarios. [[GH-15579](https://github.com/hashicorp/vault/pull/15579)] +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* sdk/cidrutil: Only check if cidr contains remote address for IP addresses [[GH-14487](https://github.com/hashicorp/vault/pull/14487)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] +* sdk: Fix OpenApi spec generator to remove duplicate sha_256 parameter [[GH-15163](https://github.com/hashicorp/vault/pull/15163)] +* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] +* secrets/kv: Fix issue preventing the ability to reset the `delete_version_after` key metadata field to 0s via HTTP `PATCH`. [[GH-15792](https://github.com/hashicorp/vault/pull/15792)] +* secrets/pki: CRLs on performance secondary clusters are now automatically +rebuilt upon changes to the list of issuers. [[GH-15179](https://github.com/hashicorp/vault/pull/15179)] +* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] +* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] +* secrets/ssh: Convert role field not_before_duration to seconds before returning it [[GH-15559](https://github.com/hashicorp/vault/pull/15559)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* storage/raft: Forward autopilot state requests on perf standbys to active node. [[GH-15493](https://github.com/hashicorp/vault/pull/15493)] +* storage/raft: joining a node to a cluster now ignores any VAULT_NAMESPACE environment variable set on the server process [[GH-15519](https://github.com/hashicorp/vault/pull/15519)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not accepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fix KV secret showing in the edit form after a user creates a new version but doesn't have read capabilities [[GH-14794](https://github.com/hashicorp/vault/pull/14794)] +* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Fix issue with KV not recomputing model when you changed versions. [[GH-14941](https://github.com/hashicorp/vault/pull/14941)] +* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] +* ui: Fixed unsupported revocation statements field for DB roles [[GH-15573](https://github.com/hashicorp/vault/pull/15573)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] +* ui: Revert using localStorage in favor of sessionStorage [[GH-15769](https://github.com/hashicorp/vault/pull/15769)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] +* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] +* ui: fix form validations ignoring default values and disabling submit button [[GH-15560](https://github.com/hashicorp/vault/pull/15560)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.10.11 +### March 01, 2023 + +SECURITY: + +* auth/approle: When using the Vault and Vault Enterprise (Vault) approle auth method, any authenticated user with access to the /auth/approle/role/:role_name/secret-id-accessor/destroy endpoint can destroy the secret ID of any other role by providing the secret ID accessor. This vulnerability, CVE-2023-24999 has been fixed in Vault 1.13.0, 1.12.4, 1.11.8, 1.10.11 and above. [[HSEC-2023-07](https://discuss.hashicorp.com/t/hcsec-2023-07-vault-fails-to-verify-if-approle-secretid-belongs-to-role-during-a-destroy-operation/51305)] + +CHANGES: + +* core: Bump Go version to 1.19.6. + +IMPROVEMENTS: + +* secrets/database: Adds error message requiring password on root crednetial rotation. [[GH-19103](https://github.com/hashicorp/vault/pull/19103)] + +BUG FIXES: + +* auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion [[GH-19186](https://github.com/hashicorp/vault/pull/19186)] +* core (enterprise): Fix panic when using invalid accessor for control-group request +* core: Prevent panics in `sys/leases/lookup`, `sys/leases/revoke`, and `sys/leases/renew` endpoints if provided `lease_id` is null [[GH-18951](https://github.com/hashicorp/vault/pull/18951)] +* replication (enterprise): Fix bug where reloading external plugin on a secondary would +break replication. +* secrets/ad: Fix bug where config couldn't be updated unless binddn/bindpass were included in the update. [[GH-18209](https://github.com/hashicorp/vault/pull/18209)] +* ui (enterprise): Fix cancel button from transform engine role creation page [[GH-19135](https://github.com/hashicorp/vault/pull/19135)] +* ui: Fix bug where logging in via OIDC fails if browser is in fullscreen mode [[GH-19071](https://github.com/hashicorp/vault/pull/19071)] + +## 1.10.10 +### February 6, 2023 + +CHANGES: + +* core: Bump Go version to 1.19.4. + +IMPROVEMENTS: + +* command/server: Environment variable keys are now logged at startup. [[GH-18125](https://github.com/hashicorp/vault/pull/18125)] +* core/fips: use upstream toolchain for FIPS 140-2 compliance again; this will appear as X=boringcrypto on the Go version in Vault server logs. +* secrets/db/mysql: Add `tls_server_name` and `tls_skip_verify` parameters [[GH-18799](https://github.com/hashicorp/vault/pull/18799)] +* ui: Prepends "passcode=" if not provided in user input for duo totp mfa method authentication [[GH-18342](https://github.com/hashicorp/vault/pull/18342)] +* ui: Update language on database role to "Connection name" [[GH-18261](https://github.com/hashicorp/vault/issues/18261)] [[GH-18350](https://github.com/hashicorp/vault/pull/18350)] + +BUG FIXES: + +* auth/approle: Fix `token_bound_cidrs` validation when using /32 blocks for role and secret ID [[GH-18145](https://github.com/hashicorp/vault/pull/18145)] +* auth/token: Fix ignored parameter warnings for valid parameters on token create [[GH-16938](https://github.com/hashicorp/vault/pull/16938)] +* cli/kv: skip formatting of nil secrets for patch and put with field parameter set [[GH-18163](https://github.com/hashicorp/vault/pull/18163)] +* core (enterprise): Fix a race condition resulting in login errors to PKCS#11 modules under high concurrency. +* core/managed-keys (enterprise): Limit verification checks to mounts in a key's namespace +* core/quotas (enterprise): Fix a potential deadlock that could occur when using lease count quotas. +* core/quotas: Fix issue with improper application of default rate limit quota exempt paths [[GH-18273](https://github.com/hashicorp/vault/pull/18273)] +* core: fix bug where context cancellations weren't forwarded to active node from performance standbys. +* core: prevent panic in login mfa enforcement delete after enforcement's namespace is deleted [[GH-18923](https://github.com/hashicorp/vault/pull/18923)] +* database/mongodb: Fix writeConcern set to be applied to any query made on the database [[GH-18546](https://github.com/hashicorp/vault/pull/18546)] +* identity (enterprise): Fix a data race when creating an entity for a local alias. +* kmip (enterprise): Fix Destroy operation response that omitted Unique Identifier on some batched responses. +* kmip (enterprise): Fix Locate operation response incompatibility with clients using KMIP versions prior to 1.3. +* licensing (enterprise): update autoloaded license cache after reload +* storage/raft (enterprise): Fix some storage-modifying RPCs used by perf standbys that weren't returning the resulting WAL state. +* ui: fixes query parameters not passed in api explorer test requests [[GH-18743](https://github.com/hashicorp/vault/pull/18743)] + +## 1.10.9 +### November 30, 2022 + +BUG FIXES: + +* auth: Deduplicate policies prior to ACL generation [[GH-17914](https://github.com/hashicorp/vault/pull/17914)] +* core/quotas (enterprise): Fix a lock contention issue that could occur and cause Vault to become unresponsive when creating, changing, or deleting lease count quotas. +* core: Fix potential deadlock if barrier ciphertext is less than 4 bytes. [[GH-17944](https://github.com/hashicorp/vault/pull/17944)] +* core: fix a start up race condition where performance standbys could go into a + mount loop if default policies are not yet synced from the active node. [[GH-17801](https://github.com/hashicorp/vault/pull/17801)] +* secrets/azure: add WAL to clean up role assignments if errors occur [[GH-18084](https://github.com/hashicorp/vault/pull/18084)] +* secrets/gcp: Fixes duplicate service account key for rotate root on standby or secondary [[GH-18109](https://github.com/hashicorp/vault/pull/18109)] +* ui: fix entity policies list link to policy show page [[GH-17950](https://github.com/hashicorp/vault/pull/17950)] + +## 1.10.8 +### November 2, 2022 + +BUG FIXES: + +* core/managed-keys (enterprise): Return better error messages when encountering key creation failures +* core/managed-keys (enterprise): fix panic when having `cache_disable` true +* core: prevent memory leak when using control group factors in a policy [[GH-17532](https://github.com/hashicorp/vault/pull/17532)] +* core: prevent panic during mfa after enforcement's namespace is deleted [[GH-17562](https://github.com/hashicorp/vault/pull/17562)] +* login: Store token in tokenhelper for interactive login MFA [[GH-17040](https://github.com/hashicorp/vault/pull/17040)] +* secrets/pki: Do not ignore provided signature bits value when signing intermediate and leaf certificates with a managed key [[GH-17328](https://github.com/hashicorp/vault/pull/17328)] +* secrets/pki: Respond to tidy-status, tidy-cancel on PR Secondary clusters. [[GH-17497](https://github.com/hashicorp/vault/pull/17497)] +* ui: Fixes oidc/jwt login issue with alternate mount path and jwt login via mount path tab [[GH-17661](https://github.com/hashicorp/vault/pull/17661)] + +## 1.10.7 +### September 30, 2022 + +SECURITY: + +* secrets/pki: Vault’s TLS certificate auth method did not initially load the optionally-configured CRL issued by the role’s CA into memory on startup, resulting in the revocation list not being checked, if the CRL has not yet been retrieved. This vulnerability, CVE-2022-41316, is fixed in Vault 1.12.0, 1.11.4, 1.10.7, and 1.9.10. [[HSEC-2022-24](https://discuss.hashicorp.com/t/hcsec-2022-24-vaults-tls-cert-auth-method-only-loaded-crl-after-first-request/45483)] + +BUG FIXES: + +* auth/cert: Vault does not initially load the CRLs in cert auth unless the read/write CRL endpoint is hit. [[GH-17138](https://github.com/hashicorp/vault/pull/17138)] +* core/quotas: Fix goroutine leak caused by the seal process not fully cleaning up Rate Limit Quotas. [[GH-17281](https://github.com/hashicorp/vault/pull/17281)] +* core: Prevent two or more DR failovers from invalidating SSCT tokens generated on the previous primaries. [[GH-16956](https://github.com/hashicorp/vault/pull/16956)] +* identity/oidc: Adds `claims_supported` to discovery document. [[GH-16992](https://github.com/hashicorp/vault/pull/16992)] +* replication (enterprise): Fix data race in SaveCheckpoint() +* secrets/transform (enterprise): Fix an issue loading tokenization transform configuration after a specific sequence of reconfigurations. +* secrets/transform (enterprise): Fix persistence problem with tokenization store credentials. +* ui: Fix lease force revoke action [[GH-16930](https://github.com/hashicorp/vault/pull/16930)] + +## 1.10.6 +### August 31, 2022 + +SECURITY: + +* core: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. This vulnerability, CVE-2022-40186, is fixed in 1.11.3, 1.10.6, and 1.9.9. [[HSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +CHANGES: + +* core: Bump Go version to 1.17.13. + +IMPROVEMENTS: + +* identity/oidc: Adds the `client_secret_post` token endpoint authentication method. [[GH-16598](https://github.com/hashicorp/vault/pull/16598)] + +BUG FIXES: + +* auth/gcp: Fixes the ability to reset the configuration's credentials to use application default credentials. [[GH-16524](https://github.com/hashicorp/vault/pull/16524)] +* command/debug: fix bug where monitor was not honoring configured duration [[GH-16834](https://github.com/hashicorp/vault/pull/16834)] +* core/auth: Return a 403 instead of a 500 for a malformed SSCT [[GH-16112](https://github.com/hashicorp/vault/pull/16112)] +* core: Increase the allowed concurrent gRPC streams over the cluster port. [[GH-16327](https://github.com/hashicorp/vault/pull/16327)] +* database: Invalidate queue should cancel context first to avoid deadlock [[GH-15933](https://github.com/hashicorp/vault/pull/15933)] +* identity/oidc: Change the `state` parameter of the Authorization Endpoint to optional. [[GH-16599](https://github.com/hashicorp/vault/pull/16599)] +* identity/oidc: Detect invalid `redirect_uri` values sooner in validation of the +Authorization Endpoint. [[GH-16601](https://github.com/hashicorp/vault/pull/16601)] +* identity/oidc: Fixes validation of the `request` and `request_uri` parameters. [[GH-16600](https://github.com/hashicorp/vault/pull/16600)] +* secrets/database: Fix a bug where the secret engine would queue up a lot of WAL deletes during startup. [[GH-16686](https://github.com/hashicorp/vault/pull/16686)] +* secrets/gcp: Fixes duplicate static account key creation from performance secondary clusters. [[GH-16534](https://github.com/hashicorp/vault/pull/16534)] +* storage/raft: Fix retry_join initialization failure [[GH-16550](https://github.com/hashicorp/vault/pull/16550)] +* ui: Fix OIDC callback to accept namespace flag in different formats [[GH-16886](https://github.com/hashicorp/vault/pull/16886)] +* ui: Fix issue logging in with JWT auth method [[GH-16466](https://github.com/hashicorp/vault/pull/16466)] +* ui: Fix naming of permitted_dns_domains form parameter on CA creation (root generation and sign intermediate). [[GH-16739](https://github.com/hashicorp/vault/pull/16739)] + +SECURITY: + +* identity/entity: When entity aliases mapped to a single entity share the same alias name, but have different mount accessors, Vault can leak metadata between the aliases. This metadata leak may result in unexpected access if templated policies are using alias metadata for path names. [[HCSEC-2022-18](https://discuss.hashicorp.com/t/hcsec-2022-18-vault-entity-alias-metadata-may-leak-between-aliases-with-the-same-name-assigned-to-the-same-entity/44550)] + +## 1.10.5 +### July 21, 2022 + +SECURITY: + +* storage/raft: Vault Enterprise (“Vault”) clusters using Integrated Storage expose an unauthenticated API endpoint that could be abused to override the voter status of a node within a Vault HA cluster, introducing potential for future data loss or catastrophic failure. This vulnerability, CVE-2022-36129, was fixed in Vault 1.9.8, 1.10.5, and 1.11.1. [[HSEC-2022-15](https://discuss.hashicorp.com/t/hcsec-2022-15-vault-enterprise-does-not-verify-existing-voter-status-when-joining-an-integrated-storage-ha-node/42420)] + +CHANGES: + +* core/fips: Disable and warn about entropy augmentation in FIPS 140-2 Inside mode [[GH-15858](https://github.com/hashicorp/vault/pull/15858)] +* core: Bump Go version to 1.17.12. + +IMPROVEMENTS: + +* core: Add `sys/loggers` and `sys/loggers/:name` endpoints to provide ability to modify logging verbosity [[GH-16111](https://github.com/hashicorp/vault/pull/16111)] +* secrets/ssh: Allow additional text along with a template definition in defaultExtension value fields. [[GH-16018](https://github.com/hashicorp/vault/pull/16018)] + +BUG FIXES: + +* agent/template: Fix parsing error for the exec stanza [[GH-16231](https://github.com/hashicorp/vault/pull/16231)] +* core/identity: Replicate member_entity_ids and policies in identity/group across nodes identically [[GH-16088](https://github.com/hashicorp/vault/pull/16088)] +* core/replication (enterprise): Don't flush merkle tree pages to disk after losing active duty +* core/seal: Fix possible keyring truncation when using the file backend. [[GH-15946](https://github.com/hashicorp/vault/pull/15946)] +* core: Limit SSCT WAL checks on perf standbys to raft backends only [[GH-15879](https://github.com/hashicorp/vault/pull/15879)] +* plugin/multiplexing: Fix panic when id doesn't exist in connection map [[GH-16094](https://github.com/hashicorp/vault/pull/16094)] +* secret/pki: Do not fail validation with a legacy key_bits default value and key_type=any when signing CSRs [[GH-16246](https://github.com/hashicorp/vault/pull/16246)] +* storage/raft (enterprise): Prevent unauthenticated voter status with rejoin [[GH-16324](https://github.com/hashicorp/vault/pull/16324)] +* transform (enterprise): Fix a bug in the handling of nested or unmatched capture groups in FPE transformations. +* ui: Fix issue where metadata tab is hidden even though policy grants access [[GH-15824](https://github.com/hashicorp/vault/pull/15824)] +* ui: Revert using localStorage in favor of sessionStorage [[GH-16169](https://github.com/hashicorp/vault/pull/16169)] +* ui: Updated `leasId` to `leaseId` in the "Copy Credentials" section of "Generate AWS Credentials" [[GH-15685](https://github.com/hashicorp/vault/pull/15685)] + +## 1.10.4 +### June 10, 2022 + +CHANGES: + +* core: Bump Go version to 1.17.11. [[GH-go-ver-1104](https://github.com/hashicorp/vault/pull/go-ver-1104)] + +IMPROVEMENTS: + +* api/monitor: Add log_format option to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* auth: Globally scoped Login MFA method Get/List endpoints [[GH-15248](https://github.com/hashicorp/vault/pull/15248)] +* auth: forward cached MFA auth response to the leader using RPC instead of forwarding all login requests [[GH-15469](https://github.com/hashicorp/vault/pull/15469)] +* cli/debug: added support for retrieving metrics from DR clusters if `unauthenticated_metrics_access` is enabled [[GH-15316](https://github.com/hashicorp/vault/pull/15316)] +* command/debug: Add log_format flag to allow for logs to be emitted in JSON format [[GH-15536](https://github.com/hashicorp/vault/pull/15536)] +* core: Fix some identity data races found by Go race detector (no known impact yet). [[GH-15123](https://github.com/hashicorp/vault/pull/15123)] +* storage/raft: Use larger timeouts at startup to reduce likelihood of inducing elections. [[GH-15042](https://github.com/hashicorp/vault/pull/15042)] +* ui: Allow namespace param to be parsed from state queryParam [[GH-15378](https://github.com/hashicorp/vault/pull/15378)] + +BUG FIXES: + +* agent: Redact auto auth token from renew endpoints [[GH-15380](https://github.com/hashicorp/vault/pull/15380)] +* auth/kubernetes: Fix error code when using the wrong service account [[GH-15585](https://github.com/hashicorp/vault/pull/15585)] +* auth/ldap: The logic for setting the entity alias when `username_as_alias` is set +has been fixed. The previous behavior would make a request to the LDAP server to +get `user_attr` before discarding it and using the username instead. This would +make it impossible for a user to connect if this attribute was missing or had +multiple values, even though it would not be used anyway. This has been fixed +and the username is now used without making superfluous LDAP searches. [[GH-15525](https://github.com/hashicorp/vault/pull/15525)] +* auth: Fixed erroneous success message when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed erroneous token information being displayed when using vault login in case of two-phase MFA [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Fixed two-phase MFA information missing from table format when using vault login [[GH-15428](https://github.com/hashicorp/vault/pull/15428)] +* auth: Prevent deleting a valid MFA method ID using the endpoint for a different MFA method type [[GH-15482](https://github.com/hashicorp/vault/pull/15482)] +* core (enterprise): Fix overcounting of lease count quota usage at startup. +* core: Prevent changing file permissions of audit logs when mode 0000 is used. [[GH-15759](https://github.com/hashicorp/vault/pull/15759)] +* core: Prevent metrics generation from causing deadlocks. [[GH-15693](https://github.com/hashicorp/vault/pull/15693)] +* core: fixed systemd reloading notification [[GH-15041](https://github.com/hashicorp/vault/pull/15041)] +* mfa/okta: disable client side rate limiting causing delays in push notifications [[GH-15369](https://github.com/hashicorp/vault/pull/15369)] +* storage/raft (enterprise): Auto-snapshot configuration now forbids slashes in file prefixes for all types, and "/" in path prefix for local storage type. Strip leading prefix in path prefix for AWS. Improve error handling/reporting. +* transform (enterprise): Fix non-overridable column default value causing tokenization tokens to expire prematurely when using the MySQL storage backend. +* ui: Fix inconsistent behavior in client count calendar widget [[GH-15789](https://github.com/hashicorp/vault/pull/15789)] +* ui: Fixed client count timezone for start and end months [[GH-15167](https://github.com/hashicorp/vault/pull/15167)] +* ui: fix firefox inability to recognize file format of client count csv export [[GH-15364](https://github.com/hashicorp/vault/pull/15364)] + +## 1.10.3 +### May 11, 2022 + +SECURITY: +* auth: A vulnerability was identified in Vault and Vault Enterprise (“Vault”) from 1.10.0 to 1.10.2 where MFA may not be enforced on user logins after a server restart. This vulnerability, CVE-2022-30689, was fixed in Vault 1.10.3. + +BUG FIXES: + +* auth: load login MFA configuration upon restart [[GH-15261](https://github.com/hashicorp/vault/pull/15261)] +* core/config: Only ask the system about network interfaces when address configs contain a template having the format: {{ ... }} [[GH-15224](https://github.com/hashicorp/vault/pull/15224)] +* core: pre-calculate namespace specific paths when tainting a route during postUnseal [[GH-15067](https://github.com/hashicorp/vault/pull/15067)] + +## 1.10.2 +### April 29, 2022 + +BUG FIXES: + +* raft: fix Raft TLS key rotation panic that occurs if active key is more than 24 hours old [[GH-15156](https://github.com/hashicorp/vault/pull/15156)] +* sdk: Fix OpenApi spec generator to properly convert TypeInt64 to OAS supported int64 [[GH-15104](https://github.com/hashicorp/vault/pull/15104)] + +## 1.10.1 +### April 22, 2022 + +CHANGES: + +* core: A request that fails path validation due to relative path check will now be responded to with a 400 rather than 500. [[GH-14328](https://github.com/hashicorp/vault/pull/14328)] +* core: Bump Go version to 1.17.9. [[GH-15044](https://github.com/hashicorp/vault/pull/15044)] + +IMPROVEMENTS: + +* agent: Upgrade hashicorp/consul-template version for sprig template functions and improved writeTo function [[GH-15092](https://github.com/hashicorp/vault/pull/15092)] +* auth: enforce a rate limit for TOTP passcode validation attempts [[GH-14864](https://github.com/hashicorp/vault/pull/14864)] +* cli/vault: warn when policy name contains upper-case letter [[GH-14670](https://github.com/hashicorp/vault/pull/14670)] +* cockroachdb: add high-availability support [[GH-12965](https://github.com/hashicorp/vault/pull/12965)] +* sentinel (enterprise): Upgrade sentinel to [v0.18.5](https://docs.hashicorp.com/sentinel/changelog#0-18-5-january-14-2022) to avoid potential naming collisions in the remote installer + +BUG FIXES: + +* Fixed panic when adding or modifying a Duo MFA Method in Enterprise +* agent: Fix log level mismatch between ERR and ERROR [[GH-14424](https://github.com/hashicorp/vault/pull/14424)] +* api/sys/raft: Update RaftSnapshotRestore to use net/http client allowing bodies larger than allocated memory to be streamed [[GH-14269](https://github.com/hashicorp/vault/pull/14269)] +* api: Respect increment value in grace period calculations in LifetimeWatcher [[GH-14836](https://github.com/hashicorp/vault/pull/14836)] +* auth/approle: Add maximum length for input values that result in SHA56 HMAC calculation [[GH-14746](https://github.com/hashicorp/vault/pull/14746)] +* auth: forward requests subject to login MFA from perfStandby to Active node [[GH-15009](https://github.com/hashicorp/vault/pull/15009)] +* cassandra: Update gocql Cassandra client to fix "no hosts available in the pool" error [[GH-14973](https://github.com/hashicorp/vault/pull/14973)] +* cli: Fix panic caused by parsing key=value fields whose value is a single backslash [[GH-14523](https://github.com/hashicorp/vault/pull/14523)] +* core (enterprise): Allow local alias create RPCs to persist alias metadata [[GH-changelog:_2747](https://github.com/hashicorp/vault/pull/changelog:_2747)] +* core/managed-keys (enterprise): Allow PKCS#11 managed keys to use 0 as a slot number +* core/metrics: Fix incorrect table size metric for local mounts [[GH-14755](https://github.com/hashicorp/vault/pull/14755)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited integers [[GH-15072](https://github.com/hashicorp/vault/pull/15072)] +* core: Fix panic caused by parsing JSON integers for fields defined as comma-delimited strings [[GH-14522](https://github.com/hashicorp/vault/pull/14522)] +* core: Fix panic caused by parsing policies with empty slice values. [[GH-14501](https://github.com/hashicorp/vault/pull/14501)] +* core: Fix panic for help request URL paths without /v1/ prefix [[GH-14704](https://github.com/hashicorp/vault/pull/14704)] +* core: fixing excessive unix file permissions [[GH-14791](https://github.com/hashicorp/vault/pull/14791)] +* core: fixing excessive unix file permissions on dir, files and archive created by vault debug command [[GH-14846](https://github.com/hashicorp/vault/pull/14846)] +* core: report unused or redundant keys in server configuration [[GH-14752](https://github.com/hashicorp/vault/pull/14752)] +* core: time.After() used in a select statement can lead to memory leak [[GH-14814](https://github.com/hashicorp/vault/pull/14814)] +* raft: Ensure initialMmapSize is set to 0 on Windows [[GH-14977](https://github.com/hashicorp/vault/pull/14977)] +* replication (enterprise): fix panic due to missing entity during invalidation of local aliases. [[GH-14622](https://github.com/hashicorp/vault/pull/14622)] +* secrets/database: Ensure that a `connection_url` password is redacted in all cases. [[GH-14744](https://github.com/hashicorp/vault/pull/14744)] +* secrets/pki: Fix handling of "any" key type with default zero signature bits value. [[GH-14875](https://github.com/hashicorp/vault/pull/14875)] +* secrets/pki: Fixed bug where larger SHA-2 hashes were truncated with shorter ECDSA CA certificates [[GH-14943](https://github.com/hashicorp/vault/pull/14943)] +* ui: Fix Generated Token's Policies helpText to clarify that comma separated values are not excepted in this field. [[GH-15046](https://github.com/hashicorp/vault/pull/15046)] +* ui: Fixes edit auth method capabilities issue [[GH-14966](https://github.com/hashicorp/vault/pull/14966)] +* ui: Fixes issue logging in with OIDC from a listed auth mounts tab [[GH-14916](https://github.com/hashicorp/vault/pull/14916)] +* ui: fix search-select component showing blank selections when editing group member entity [[GH-15058](https://github.com/hashicorp/vault/pull/15058)] +* ui: masked values no longer give away length or location of special characters [[GH-15025](https://github.com/hashicorp/vault/pull/15025)] + +## 1.10.0 +### March 23, 2022 + +CHANGES: + +* core (enterprise): requests with newly generated tokens to perf standbys which are lagging behind the active node return http 412 instead of 400/403/50x. +* core: Changes the unit of `default_lease_ttl` and `max_lease_ttl` values returned by +the `/sys/config/state/sanitized` endpoint from nanoseconds to seconds. [[GH-14206](https://github.com/hashicorp/vault/pull/14206)] +* core: Bump Go version to 1.17.7. [[GH-14232](https://github.com/hashicorp/vault/pull/14232)] +* plugin/database: The return value from `POST /database/config/:name` has been updated to "204 No Content" [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] +* secrets/azure: Changes the configuration parameter `use_microsoft_graph_api` to use the Microsoft +Graph API by default. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* storage/etcd: Remove support for v2. [[GH-14193](https://github.com/hashicorp/vault/pull/14193)] +* ui: Upgrade Ember to version 3.24 [[GH-13443](https://github.com/hashicorp/vault/pull/13443)] + +FEATURES: + +* **Database plugin multiplexing**: manage multiple database connections with a single plugin process [[GH-14033](https://github.com/hashicorp/vault/pull/14033)] +* **Login MFA**: Single and two phase MFA is now available when authenticating to Vault. [[GH-14025](https://github.com/hashicorp/vault/pull/14025)] +* **Mount Migration**: Vault supports moving secrets and auth mounts both within and across namespaces. +* **Postgres in the UI**: Postgres DB is now supported by the UI [[GH-12945](https://github.com/hashicorp/vault/pull/12945)] +* **Report in-flight requests**: Adding a trace capability to show in-flight requests, and a new gauge metric to show the total number of in-flight requests [[GH-13024](https://github.com/hashicorp/vault/pull/13024)] +* **Server Side Consistent Tokens**: Service tokens have been updated to be longer (a minimum of 95 bytes) and token prefixes for all token types are updated from s., b., and r. to hvs., hvb., and hvr. for service, batch, and recovery tokens respectively. Vault clusters with integrated storage will now have read-after-write consistency by default. [[GH-14109](https://github.com/hashicorp/vault/pull/14109)] +* **Transit SHA-3 Support**: Add support for SHA-3 in the Transit backend. [[GH-13367](https://github.com/hashicorp/vault/pull/13367)] +* **Transit Time-Based Key Autorotation**: Add support for automatic, time-based key rotation to transit secrets engine, including in the UI. [[GH-13691](https://github.com/hashicorp/vault/pull/13691)] +* **UI Client Count Improvements**: Restructures client count dashboard, making use of billing start date to improve accuracy. Adds mount-level distribution and filtering. [[GH-client-counts](https://github.com/hashicorp/vault/pull/client-counts)] +* **Agent Telemetry**: The Vault Agent can now collect and return telemetry information at the `/agent/v1/metrics` endpoint. + +IMPROVEMENTS: + +* agent: Adds ability to configure specific user-assigned managed identities for Azure auto-auth. [[GH-14214](https://github.com/hashicorp/vault/pull/14214)] +* agent: The `agent/v1/quit` endpoint can now be used to stop the Vault Agent remotely [[GH-14223](https://github.com/hashicorp/vault/pull/14223)] +* api: Allow cloning `api.Client` tokens via `api.Config.CloneToken` or `api.Client.SetCloneToken()`. [[GH-13515](https://github.com/hashicorp/vault/pull/13515)] +* api: Define constants for X-Vault-Forward and X-Vault-Inconsistent headers [[GH-14067](https://github.com/hashicorp/vault/pull/14067)] +* api: Implements Login method in Go client libraries for GCP and Azure auth methods [[GH-13022](https://github.com/hashicorp/vault/pull/13022)] +* api: Implements Login method in Go client libraries for LDAP auth methods [[GH-13841](https://github.com/hashicorp/vault/pull/13841)] +* api: Trim newline character from wrapping token in logical.Unwrap from the api package [[GH-13044](https://github.com/hashicorp/vault/pull/13044)] +* api: add api method for modifying raft autopilot configuration [[GH-12428](https://github.com/hashicorp/vault/pull/12428)] +* api: respect WithWrappingToken() option during AppRole login authentication when used with secret ID specified from environment or from string [[GH-13241](https://github.com/hashicorp/vault/pull/13241)] +* audit: The audit logs now contain the port used by the client [[GH-12790](https://github.com/hashicorp/vault/pull/12790)] +* auth/aws: Enable region detection in the CLI by specifying the region as `auto` [[GH-14051](https://github.com/hashicorp/vault/pull/14051)] +* auth/cert: Add certificate extensions as metadata [[GH-13348](https://github.com/hashicorp/vault/pull/13348)] +* auth/jwt: The Authorization Code flow makes use of the Proof Key for Code Exchange (PKCE) extension. [[GH-13365](https://github.com/hashicorp/vault/pull/13365)] +* auth/kubernetes: Added support for dynamically reloading short-lived tokens for better Kubernetes 1.21+ compatibility [[GH-13595](https://github.com/hashicorp/vault/pull/13595)] +* auth/ldap: Add a response warning and server log whenever the config is accessed +if `userfilter` doesn't consider `userattr` [[GH-14095](https://github.com/hashicorp/vault/pull/14095)] +* auth/ldap: Add username to alias metadata [[GH-13669](https://github.com/hashicorp/vault/pull/13669)] +* auth/ldap: Add username_as_alias configurable to change how aliases are named [[GH-14324](https://github.com/hashicorp/vault/pull/14324)] +* auth/okta: Update [okta-sdk-golang](https://github.com/okta/okta-sdk-golang) dependency to version v2.9.1 for improved request backoff handling [[GH-13439](https://github.com/hashicorp/vault/pull/13439)] +* auth/token: The `auth/token/revoke-accessor` endpoint is now idempotent and will +not error out if the token has already been revoked. [[GH-13661](https://github.com/hashicorp/vault/pull/13661)] +* auth: reading `sys/auth/:path` now returns the configuration for the auth engine mounted at the given path [[GH-12793](https://github.com/hashicorp/vault/pull/12793)] +* cli: interactive CLI for login mfa [[GH-14131](https://github.com/hashicorp/vault/pull/14131)] +* command (enterprise): "vault license get" now uses non-deprecated endpoint /sys/license/status +* core/ha: Add new mechanism for keeping track of peers talking to active node, and new 'operator members' command to view them. [[GH-13292](https://github.com/hashicorp/vault/pull/13292)] +* core/identity: Support updating an alias' `custom_metadata` to be empty. [[GH-13395](https://github.com/hashicorp/vault/pull/13395)] +* core/pki: Support Y10K value in notAfter field to be compliant with IEEE 802.1AR-2018 standard [[GH-12795](https://github.com/hashicorp/vault/pull/12795)] +* core/pki: Support Y10K value in notAfter field when signing non-CA certificates [[GH-13736](https://github.com/hashicorp/vault/pull/13736)] +* core: Add duration and start_time to completed requests log entries [[GH-13682](https://github.com/hashicorp/vault/pull/13682)] +* core: Add support to list password policies at `sys/policies/password` [[GH-12787](https://github.com/hashicorp/vault/pull/12787)] +* core: Add support to list version history via API at `sys/version-history` and via CLI with `vault version-history` [[GH-13766](https://github.com/hashicorp/vault/pull/13766)] +* core: Fixes code scanning alerts [[GH-13667](https://github.com/hashicorp/vault/pull/13667)] +* core: Periodically test the health of connectivity to auto-seal backends [[GH-13078](https://github.com/hashicorp/vault/pull/13078)] +* core: Reading `sys/mounts/:path` now returns the configuration for the secret engine at the given path [[GH-12792](https://github.com/hashicorp/vault/pull/12792)] +* core: Replace "master key" terminology with "root key" [[GH-13324](https://github.com/hashicorp/vault/pull/13324)] +* core: Small changes to ensure goroutines terminate in tests [[GH-14197](https://github.com/hashicorp/vault/pull/14197)] +* core: Systemd unit file included with the Linux packages now sets the service type to notify. [[GH-14385](https://github.com/hashicorp/vault/pull/14385)] +* core: Update github.com/prometheus/client_golang to fix security vulnerability CVE-2022-21698. [[GH-14190](https://github.com/hashicorp/vault/pull/14190)] +* core: Vault now supports the PROXY protocol v2. Support for UNKNOWN connections +has also been added to the PROXY protocol v1. [[GH-13540](https://github.com/hashicorp/vault/pull/13540)] +* http (enterprise): Serve /sys/license/status endpoint within namespaces +* identity/oidc: Adds a default OIDC provider [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds a default key for OIDC clients [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds an `allow_all` assignment that permits all entities to authenticate via an OIDC client [[GH-14119](https://github.com/hashicorp/vault/pull/14119)] +* identity/oidc: Adds proof key for code exchange (PKCE) support to OIDC providers. [[GH-13917](https://github.com/hashicorp/vault/pull/13917)] +* sdk: Add helper for decoding root tokens [[GH-10505](https://github.com/hashicorp/vault/pull/10505)] +* secrets/azure: Adds support for rotate-root. [#70](https://github.com/hashicorp/vault-plugin-secrets-azure/pull/70) [[GH-13034](https://github.com/hashicorp/vault/pull/13034)] +* secrets/consul: Add support for consul enterprise namespaces and admin partitions. [[GH-13850](https://github.com/hashicorp/vault/pull/13850)] +* secrets/consul: Add support for consul roles. [[GH-14014](https://github.com/hashicorp/vault/pull/14014)] +* secrets/database/influxdb: Switch/upgrade to the `influxdb1-client` module [[GH-12262](https://github.com/hashicorp/vault/pull/12262)] +* secrets/database: Add database configuration parameter 'disable_escaping' for username and password when connecting to a database. [[GH-13414](https://github.com/hashicorp/vault/pull/13414)] +* secrets/kv: add full secret path output to table-formatted responses [[GH-14301](https://github.com/hashicorp/vault/pull/14301)] +* secrets/kv: add patch support for KVv2 key metadata [[GH-13215](https://github.com/hashicorp/vault/pull/13215)] +* secrets/kv: add subkeys endpoint to retrieve a secret's stucture without its values [[GH-13893](https://github.com/hashicorp/vault/pull/13893)] +* secrets/pki: Add ability to fetch individual certificate as DER or PEM [[GH-10948](https://github.com/hashicorp/vault/pull/10948)] +* secrets/pki: Add count and duration metrics to PKI issue and revoke calls. [[GH-13889](https://github.com/hashicorp/vault/pull/13889)] +* secrets/pki: Add error handling for error types other than UserError or InternalError [[GH-14195](https://github.com/hashicorp/vault/pull/14195)] +* secrets/pki: Allow URI SAN templates in allowed_uri_sans when allowed_uri_sans_template is set to true. [[GH-10249](https://github.com/hashicorp/vault/pull/10249)] +* secrets/pki: Allow other_sans in sign-intermediate and sign-verbatim [[GH-13958](https://github.com/hashicorp/vault/pull/13958)] +* secrets/pki: Calculate the Subject Key Identifier as suggested in [RFC 5280, Section 4.2.1.2](https://datatracker.ietf.org/doc/html/rfc5280#section-4.2.1.2). [[GH-11218](https://github.com/hashicorp/vault/pull/11218)] +* secrets/pki: Restrict issuance of wildcard certificates via role parameter (`allow_wildcard_certificates`) [[GH-14238](https://github.com/hashicorp/vault/pull/14238)] +* secrets/pki: Return complete chain (in `ca_chain` field) on calls to `pki/cert/ca_chain` [[GH-13935](https://github.com/hashicorp/vault/pull/13935)] +* secrets/pki: Use application/pem-certificate-chain for PEM certificates, application/x-pem-file for PEM CRLs [[GH-13927](https://github.com/hashicorp/vault/pull/13927)] +* secrets/pki: select appropriate signature algorithm for ECDSA signature on certificates. [[GH-11216](https://github.com/hashicorp/vault/pull/11216)] +* secrets/ssh: Add support for generating non-RSA SSH CAs [[GH-14008](https://github.com/hashicorp/vault/pull/14008)] +* secrets/ssh: Allow specifying multiple approved key lengths for a single algorithm [[GH-13991](https://github.com/hashicorp/vault/pull/13991)] +* secrets/ssh: Use secure default for algorithm signer (rsa-sha2-256) with RSA SSH CA keys on new roles [[GH-14006](https://github.com/hashicorp/vault/pull/14006)] +* secrets/transit: Don't abort transit encrypt or decrypt batches on single item failure. [[GH-13111](https://github.com/hashicorp/vault/pull/13111)] +* storage/aerospike: Upgrade `aerospike-client-go` to v5.6.0. [[GH-12165](https://github.com/hashicorp/vault/pull/12165)] +* storage/raft: Set InitialMmapSize to 100GB on 64bit architectures [[GH-13178](https://github.com/hashicorp/vault/pull/13178)] +* storage/raft: When using retry_join stanzas, join against all of them in parallel. [[GH-13606](https://github.com/hashicorp/vault/pull/13606)] +* sys/raw: Enhance sys/raw to read and write values that cannot be encoded in json. [[GH-13537](https://github.com/hashicorp/vault/pull/13537)] +* ui: Add support for ECDSA and Ed25519 certificate views [[GH-13894](https://github.com/hashicorp/vault/pull/13894)] +* ui: Add version diff view for KV V2 [[GH-13000](https://github.com/hashicorp/vault/pull/13000)] +* ui: Added client side paging for namespace list view [[GH-13195](https://github.com/hashicorp/vault/pull/13195)] +* ui: Adds flight icons to UI [[GH-12976](https://github.com/hashicorp/vault/pull/12976)] +* ui: Adds multi-factor authentication support [[GH-14049](https://github.com/hashicorp/vault/pull/14049)] +* ui: Allow static role credential rotation in Database secrets engines [[GH-14268](https://github.com/hashicorp/vault/pull/14268)] +* ui: Display badge for all versions in secrets engine header [[GH-13015](https://github.com/hashicorp/vault/pull/13015)] +* ui: Swap browser localStorage in favor of sessionStorage [[GH-14054](https://github.com/hashicorp/vault/pull/14054)] +* ui: The integrated web terminal now accepts both `-f` and `--force` as aliases +for `-force` for the `write` command. [[GH-13683](https://github.com/hashicorp/vault/pull/13683)] +* ui: Transform advanced templating with encode/decode format support [[GH-13908](https://github.com/hashicorp/vault/pull/13908)] +* ui: Updates ember blueprints to glimmer components [[GH-13149](https://github.com/hashicorp/vault/pull/13149)] +* ui: customizes empty state messages for transit and transform [[GH-13090](https://github.com/hashicorp/vault/pull/13090)] + +BUG FIXES: + +* Fixed bug where auth method only considers system-identity when multiple identities are available. [#50](https://github.com/hashicorp/vault-plugin-auth-azure/pull/50) [[GH-14138](https://github.com/hashicorp/vault/pull/14138)] +* activity log (enterprise): allow partial monthly client count to be accessed from namespaces [[GH-13086](https://github.com/hashicorp/vault/pull/13086)] +* agent: Fixes bug where vault agent is unaware of the namespace in the config when wrapping token +* api/client: Fixes an issue where the `replicateStateStore` was being set to `nil` upon consecutive calls to `client.SetReadYourWrites(true)`. [[GH-13486](https://github.com/hashicorp/vault/pull/13486)] +* auth/approle: Fix regression where unset cidrlist is returned as nil instead of zero-length array. [[GH-13235](https://github.com/hashicorp/vault/pull/13235)] +* auth/approle: Fix wrapping of nil errors in `login` endpoint [[GH-14107](https://github.com/hashicorp/vault/pull/14107)] +* auth/github: Use the Organization ID instead of the Organization name to verify the org membership. [[GH-13332](https://github.com/hashicorp/vault/pull/13332)] +* auth/kubernetes: Properly handle the migration of role storage entries containing an empty `alias_name_source` [[GH-13925](https://github.com/hashicorp/vault/pull/13925)] +* auth/kubernetes: ensure valid entity alias names created for projected volume tokens [[GH-14144](https://github.com/hashicorp/vault/pull/14144)] +* auth/oidc: Fixes OIDC auth from the Vault UI when using the implicit flow and `form_post` response mode. [[GH-13492](https://github.com/hashicorp/vault/pull/13492)] +* cli: Fix using kv patch with older server versions that don't support HTTP PATCH. [[GH-13615](https://github.com/hashicorp/vault/pull/13615)] +* core (enterprise): Fix a data race in logshipper. +* core (enterprise): Workaround AWS CloudHSM v5 SDK issue not allowing read-only sessions +* core/api: Fix overwriting of request headers when using JSONMergePatch. [[GH-14222](https://github.com/hashicorp/vault/pull/14222)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13093](https://github.com/hashicorp/vault/pull/13093)] +* core/identity: Address a data race condition between local updates to aliases and invalidations [[GH-13476](https://github.com/hashicorp/vault/pull/13476)] +* core/token: Fix null token panic from 'v1/auth/token/' endpoints and return proper error response. [[GH-13233](https://github.com/hashicorp/vault/pull/13233)] +* core/token: Fix null token_type panic resulting from 'v1/auth/token/roles/{role_name}' endpoint [[GH-13236](https://github.com/hashicorp/vault/pull/13236)] +* core: Fix warnings logged on perf standbys re stored versions [[GH-13042](https://github.com/hashicorp/vault/pull/13042)] +* core: `-output-curl-string` now properly sets cURL options for client and CA +certificates. [[GH-13660](https://github.com/hashicorp/vault/pull/13660)] +* core: add support for go-sockaddr templates in the top-level cluster_addr field [[GH-13678](https://github.com/hashicorp/vault/pull/13678)] +* core: authentication to "login" endpoint for non-existent mount path returns permission denied with status code 403 [[GH-13162](https://github.com/hashicorp/vault/pull/13162)] +* core: revert some unintentionally downgraded dependencies from 1.9.0-rc1 [[GH-13168](https://github.com/hashicorp/vault/pull/13168)] +* ha (enterprise): Prevents performance standby nodes from serving and caching stale data immediately after performance standby election completes +* http (enterprise): Always forward internal/counters endpoints from perf standbys to active node +* http:Fix /sys/monitor endpoint returning streaming not supported [[GH-13200](https://github.com/hashicorp/vault/pull/13200)] +* identity/oidc: Adds support for port-agnostic validation of loopback IP redirect URIs. [[GH-13871](https://github.com/hashicorp/vault/pull/13871)] +* identity/oidc: Check for a nil signing key on rotation to prevent panics. [[GH-13716](https://github.com/hashicorp/vault/pull/13716)] +* identity/oidc: Fixes inherited group membership when evaluating client assignments [[GH-14013](https://github.com/hashicorp/vault/pull/14013)] +* identity/oidc: Fixes potential write to readonly storage on performance secondary clusters during key rotation [[GH-14426](https://github.com/hashicorp/vault/pull/14426)] +* identity/oidc: Make the `nonce` parameter optional for the Authorization Endpoint of OIDC providers. [[GH-13231](https://github.com/hashicorp/vault/pull/13231)] +* identity/token: Fixes a bug where duplicate public keys could appear in the .well-known JWKS [[GH-14543](https://github.com/hashicorp/vault/pull/14543)] +* identity: Fix possible nil pointer dereference. [[GH-13318](https://github.com/hashicorp/vault/pull/13318)] +* identity: Fix regression preventing startup when aliases were created pre-1.9. [[GH-13169](https://github.com/hashicorp/vault/pull/13169)] +* identity: Fixes a panic in the OIDC key rotation due to a missing nil check. [[GH-13298](https://github.com/hashicorp/vault/pull/13298)] +* kmip (enterprise): Fix locate by name operations fail to find key after a rekey operation. +* licensing (enterprise): Revert accidental inclusion of the TDE feature from the `prem` build. +* metrics/autosnapshots (enterprise) : Fix bug that could cause +vault.autosnapshots.save.errors to not be incremented when there is an +autosnapshot save error. +* physical/mysql: Create table with wider `vault_key` column when initializing database tables. [[GH-14231](https://github.com/hashicorp/vault/pull/14231)] +* plugin/couchbase: Fix an issue in which the locking patterns did not allow parallel requests. [[GH-13033](https://github.com/hashicorp/vault/pull/13033)] +* replication (enterprise): When using encrypted secondary tokens, only clear the +private key after a successful connection to the primary cluster +* sdk/framework: Generate proper OpenAPI specs for path patterns that use an alternation as the root. [[GH-13487](https://github.com/hashicorp/vault/pull/13487)] +* sdk/helper/ldaputil: properly escape a trailing escape character to prevent panics. [[GH-13452](https://github.com/hashicorp/vault/pull/13452)] +* sdk/queue: move lock before length check to prevent panics. [[GH-13146](https://github.com/hashicorp/vault/pull/13146)] +* sdk: Fixes OpenAPI to distinguish between paths that can do only List, or both List and Read. [[GH-13643](https://github.com/hashicorp/vault/pull/13643)] +* secrets/azure: Fixed bug where Azure environment did not change Graph URL [[GH-13973](https://github.com/hashicorp/vault/pull/13973)] +* secrets/azure: Fixes service principal generation when assigning roles that have [DataActions](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-definitions#dataactions). [[GH-13277](https://github.com/hashicorp/vault/pull/13277)] +* secrets/azure: Fixes the [rotate root](https://www.vaultproject.io/api-docs/secret/azure#rotate-root) +operation for upgraded configurations with a `root_password_ttl` of zero. [[GH-14130](https://github.com/hashicorp/vault/pull/14130)] +* secrets/database/cassandra: change connect_timeout to 5s as documentation says [[GH-12443](https://github.com/hashicorp/vault/pull/12443)] +* secrets/database/mssql: Accept a boolean for `contained_db`, rather than just a string. [[GH-13469](https://github.com/hashicorp/vault/pull/13469)] +* secrets/gcp: Fixed bug where error was not reported for invalid bindings [[GH-13974](https://github.com/hashicorp/vault/pull/13974)] +* secrets/gcp: Fixes role bindings for BigQuery dataset resources. [[GH-13548](https://github.com/hashicorp/vault/pull/13548)] +* secrets/openldap: Fix panic from nil logger in backend [[GH-14171](https://github.com/hashicorp/vault/pull/14171)] +* secrets/pki: Default value for key_bits changed to 0, enabling key_type=ec key generation with default value [[GH-13080](https://github.com/hashicorp/vault/pull/13080)] +* secrets/pki: Fix issuance of wildcard certificates matching glob patterns [[GH-14235](https://github.com/hashicorp/vault/pull/14235)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-13759](https://github.com/hashicorp/vault/pull/13759)] +* secrets/pki: Fix regression causing performance secondaries to forward certificate generation to the primary. [[GH-2456](https://github.com/hashicorp/vault/pull/2456)] +* secrets/pki: Fixes around NIST P-curve signature hash length, default value for signature_bits changed to 0. [[GH-12872](https://github.com/hashicorp/vault/pull/12872)] +* secrets/pki: Recognize ed25519 when requesting a response in PKCS8 format [[GH-13257](https://github.com/hashicorp/vault/pull/13257)] +* secrets/pki: Skip signature bits validation for ed25519 curve key type [[GH-13254](https://github.com/hashicorp/vault/pull/13254)] +* secrets/transit: Ensure that Vault does not panic for invalid nonce size when we aren't in convergent encryption mode. [[GH-13690](https://github.com/hashicorp/vault/pull/13690)] +* secrets/transit: Return an error if any required parameter is missing. [[GH-14074](https://github.com/hashicorp/vault/pull/14074)] +* storage/raft: Fix a panic when trying to store a key > 32KB in a transaction. [[GH-13286](https://github.com/hashicorp/vault/pull/13286)] +* storage/raft: Fix a panic when trying to write a key > 32KB [[GH-13282](https://github.com/hashicorp/vault/pull/13282)] +* storage/raft: Fix issues allowing invalid nodes to become leadership candidates. [[GH-13703](https://github.com/hashicorp/vault/pull/13703)] +* storage/raft: Fix regression in 1.9.0-rc1 that changed how time is represented in Raft logs; this prevented using a raft db created pre-1.9. [[GH-13165](https://github.com/hashicorp/vault/pull/13165)] +* storage/raft: On linux, use map_populate for bolt files to improve startup time. [[GH-13573](https://github.com/hashicorp/vault/pull/13573)] +* storage/raft: Units for bolt metrics now given in milliseconds instead of nanoseconds [[GH-13749](https://github.com/hashicorp/vault/pull/13749)] +* ui: Adds pagination to auth methods list view [[GH-13054](https://github.com/hashicorp/vault/pull/13054)] +* ui: Do not show verify connection value on database connection config page [[GH-13152](https://github.com/hashicorp/vault/pull/13152)] +* ui: Fix client count current month data not showing unless monthly history data exists [[GH-13396](https://github.com/hashicorp/vault/pull/13396)] +* ui: Fix default TTL display and set on database role [[GH-14224](https://github.com/hashicorp/vault/pull/14224)] +* ui: Fix incorrect validity message on transit secrets engine [[GH-14233](https://github.com/hashicorp/vault/pull/14233)] +* ui: Fix issue where UI incorrectly handled API errors when mounting backends [[GH-14551](https://github.com/hashicorp/vault/pull/14551)] +* ui: Fix kv engine access bug [[GH-13872](https://github.com/hashicorp/vault/pull/13872)] +* ui: Fixes breadcrumb bug for secrets navigation [[GH-13604](https://github.com/hashicorp/vault/pull/13604)] +* ui: Fixes caching issue on kv new version create [[GH-14489](https://github.com/hashicorp/vault/pull/14489)] +* ui: Fixes displaying empty masked values in PKI engine [[GH-14400](https://github.com/hashicorp/vault/pull/14400)] +* ui: Fixes horizontal bar chart hover issue when filtering namespaces and mounts [[GH-14493](https://github.com/hashicorp/vault/pull/14493)] +* ui: Fixes issue logging out with wrapped token query parameter [[GH-14329](https://github.com/hashicorp/vault/pull/14329)] +* ui: Fixes issue removing raft storage peer via cli not reflected in UI until refresh [[GH-13098](https://github.com/hashicorp/vault/pull/13098)] +* ui: Fixes issue restoring raft storage snapshot [[GH-13107](https://github.com/hashicorp/vault/pull/13107)] +* ui: Fixes issue saving KMIP role correctly [[GH-13585](https://github.com/hashicorp/vault/pull/13585)] +* ui: Fixes issue with OIDC auth workflow when using MetaMask Chrome extension [[GH-13133](https://github.com/hashicorp/vault/pull/13133)] +* ui: Fixes issue with SearchSelect component not holding focus [[GH-13590](https://github.com/hashicorp/vault/pull/13590)] +* ui: Fixes issue with automate secret deletion value not displaying initially if set in secret metadata edit view [[GH-13177](https://github.com/hashicorp/vault/pull/13177)] +* ui: Fixes issue with correct auth method not selected when logging out from OIDC or JWT methods [[GH-14545](https://github.com/hashicorp/vault/pull/14545)] +* ui: Fixes issue with placeholder not displaying for automatically deleted secrets when deletion time has passed [[GH-13166](https://github.com/hashicorp/vault/pull/13166)] +* ui: Fixes issue with the number of PGP Key inputs not matching the key shares number in the initialization form on change [[GH-13038](https://github.com/hashicorp/vault/pull/13038)] +* ui: Fixes long secret key names overlapping masked values [[GH-13032](https://github.com/hashicorp/vault/pull/13032)] +* ui: Fixes node-forge error when parsing EC (elliptical curve) certs [[GH-13238](https://github.com/hashicorp/vault/pull/13238)] +* ui: Redirects to managed namespace if incorrect namespace in URL param [[GH-14422](https://github.com/hashicorp/vault/pull/14422)] +* ui: Removes ability to tune token_type for token auth methods [[GH-12904](https://github.com/hashicorp/vault/pull/12904)] +* ui: trigger token renewal if inactive and half of TTL has passed [[GH-13950](https://github.com/hashicorp/vault/pull/13950)] diff --git a/changelog/24010.txt b/changelog/24010.txt new file mode 100644 index 000000000000..2332eea289ce --- /dev/null +++ b/changelog/24010.txt @@ -0,0 +1,87 @@ +# Each line is a file pattern followed by one or more owners. Being an owner +# means those groups or individuals will be added as reviewers to PRs affecting +# those areas of the code. +# +# More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners + +# Select Auth engines are owned by Ecosystem +/builtin/credential/aws/ @hashicorp/vault-ecosystem-applications +/builtin/credential/github/ @hashicorp/vault-ecosystem-applications +/builtin/credential/ldap/ @hashicorp/vault-ecosystem-applications +/builtin/credential/okta/ @hashicorp/vault-ecosystem-applications + +# Secrets engines (pki, ssh, totp and transit omitted) +/builtin/logical/aws/ @hashicorp/vault-ecosystem-applications +/builtin/logical/cassandra/ @hashicorp/vault-ecosystem-applications +/builtin/logical/consul/ @hashicorp/vault-ecosystem-applications +/builtin/logical/database/ @hashicorp/vault-ecosystem-applications +/builtin/logical/mongodb/ @hashicorp/vault-ecosystem-applications +/builtin/logical/mssql/ @hashicorp/vault-ecosystem-applications +/builtin/logical/mysql/ @hashicorp/vault-ecosystem-applications +/builtin/logical/nomad/ @hashicorp/vault-ecosystem-applications +/builtin/logical/postgresql/ @hashicorp/vault-ecosystem-applications +/builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem-applications + +# Identity Integrations (OIDC, tokens) +/vault/identity_store_oidc* @hashicorp/vault-ecosystem-applications + +/plugins/ @hashicorp/vault-ecosystem +/vault/plugin_catalog.go @hashicorp/vault-ecosystem + +/website/content/ @hashicorp/vault-education-approvers +/website/content/docs/plugin-portal.mdx @acahn @hashicorp/vault-education-approvers + +# Plugin docs +/website/content/docs/plugins/ @hashicorp/vault-ecosystem @hashicorp/vault-education-approvers +/website/content/docs/upgrading/plugins.mdx @hashicorp/vault-ecosystem @hashicorp/vault-education-approvers + +# UI code related to Vault's JWT/OIDC auth method and OIDC provider. +# Changes to these files often require coordination with backend code, +# so stewards of the backend code are added below for notification. +/ui/app/components/auth-jwt.js @hashicorp/vault-ecosystem-applications +/ui/app/routes/vault/cluster/oidc-*.js @hashicorp/vault-ecosystem-applications + +# Release config; service account is required for automation tooling. +/.release/ @hashicorp/github-secure-vault-core @hashicorp/quality-team +/.github/workflows/build.yml @hashicorp/github-secure-vault-core @hashicorp/quality-team + +# Quality engineering +/.github/ @hashicorp/quality-team +/enos/ @hashicorp/quality-team + +# Cryptosec +/builtin/logical/pki/ @hashicorp/vault-crypto +/builtin/logical/pkiext/ @hashicorp/vault-crypto +/website/content/docs/secrets/pki/ @hashicorp/vault-crypto +/website/content/api-docs/secret/pki.mdx @hashicorp/vault-crypto +/builtin/credential/cert/ @hashicorp/vault-crypto +/website/content/docs/auth/cert.mdx @hashicorp/vault-crypto +/website/content/api-docs/auth/cert.mdx @hashicorp/vault-crypto +/builtin/logical/ssh/ @hashicorp/vault-crypto +/website/content/docs/secrets/ssh/ @hashicorp/vault-crypto +/website/content/api-docs/secret/ssh.mdx @hashicorp/vault-crypto +/builtin/logical/transit/ @hashicorp/vault-crypto +/website/content/docs/secrets/transit/ @hashicorp/vault-crypto +/website/content/api-docs/secret/transit.mdx @hashicorp/vault-crypto +/helper/random/ @hashicorp/vault-crypto +/sdk/helper/certutil/ @hashicorp/vault-crypto +/sdk/helper/cryptoutil/ @hashicorp/vault-crypto +/sdk/helper/kdf/ @hashicorp/vault-crypto +/sdk/helper/keysutil/ @hashicorp/vault-crypto +/sdk/helper/ocsp/ @hashicorp/vault-crypto +/sdk/helper/salt/ @hashicorp/vault-crypto +/sdk/helper/tlsutil/ @hashicorp/vault-crypto +/shamir/ @hashicorp/vault-crypto +/vault/barrier* @hashicorp/vault-crypto +/vault/managed_key* @hashicorp/vault-crypto +/vault/seal* @hashicorp/vault-crypto +/vault/seal/ @hashicorp/vault-crypto +/website/content/docs/configuration/seal/ @hashicorp/vault-crypto +/website/content/docs/enterprise/sealwrap.mdx @hashicorp/vault-crypto +/website/content/api-docs/system/sealwrap-rewrap.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/transform/ @hashicorp/vault-crypto +/website/content/api-docs/secret/transform.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/kmip-profiles.mdx @hashicorp/vault-crypto +/website/content/docs/secrets/kmip.mdx @hashicorp/vault-crypto +/website/content/api-docs/secret/kmip.mdx @hashicorp/vault-crypto +/website/content/docs/enterprise/fips/ @hashicorp/vault-crypto diff --git a/changelog/24099.txt b/changelog/24099.txt new file mode 100644 index 000000000000..2fd0aa80e20d --- /dev/null +++ b/changelog/24099.txt @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +FROM docker.mirror.hashicorp.services/ubuntu:22.04 + +ARG plugin + +RUN groupadd nonroot && useradd -g nonroot nonroot + +USER nonroot + +COPY ${plugin} /bin/plugin + +ENTRYPOINT [ "/bin/plugin" ] \ No newline at end of file diff --git a/changelog/24168.txt b/changelog/24168.txt new file mode 100644 index 000000000000..0731bd030f72 --- /dev/null +++ b/changelog/24168.txt @@ -0,0 +1,92 @@ +License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved. +"Business Source License" is a trademark of MariaDB Corporation Ab. + +Parameters + +Licensor: HashiCorp, Inc. +Licensed Work: Vault Version 1.15.0 or later. The Licensed Work is (c) 2023 + HashiCorp, Inc. +Additional Use Grant: You may make production use of the Licensed Work, provided + Your use does not include offering the Licensed Work to third + parties on a hosted or embedded basis in order to compete with + HashiCorp's paid version(s) of the Licensed Work. For purposes + of this license: + + A "competitive offering" is a Product that is offered to third + parties on a paid basis, including through paid support + arrangements, that significantly overlaps with the capabilities + of HashiCorp's paid version(s) of the Licensed Work. If Your + Product is not a competitive offering when You first make it + generally available, it will not become a competitive offering + later due to HashiCorp releasing a new version of the Licensed + Work with additional capabilities. In addition, Products that + are not provided on a paid basis are not competitive. + + "Product" means software that is offered to end users to manage + in their own environments or offered as a service on a hosted + basis. + + "Embedded" means including the source code or executable code + from the Licensed Work in a competitive offering. "Embedded" + also means packaging the competitive offering in such a way + that the Licensed Work must be accessed or downloaded for the + competitive offering to operate. + + Hosting or using the Licensed Work(s) for internal purposes + within an organization is not considered a competitive + offering. HashiCorp considers your organization to include all + of your affiliates under common control. + + For binding interpretive guidance on using HashiCorp products + under the Business Source License, please visit our FAQ. + (https://www.hashicorp.com/license-faq) +Change Date: Four years from the date the Licensed Work is published. +Change License: MPL 2.0 + +For information about alternative licensing arrangements for the Licensed Work, +please contact licensing@hashicorp.com. + +Notice + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. diff --git a/changelog/24191.txt b/changelog/24191.txt new file mode 100644 index 000000000000..85c051c86e0e --- /dev/null +++ b/changelog/24191.txt @@ -0,0 +1,502 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pki + +import ( + "bytes" + "context" + "crypto/sha256" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "strings" + "time" +) + +const ( + DNSChallengePrefix = "_acme-challenge." + ALPNProtocol = "acme-tls/1" +) + +// While this should be a constant, there's no way to do a low-level test of +// ValidateTLSALPN01Challenge without spinning up a complicated Docker +// instance to build a custom responder. Because we already have a local +// toolchain, it is far easier to drive this through Go tests with a custom +// (high) port, rather than requiring permission to bind to port 443 (root-run +// tests are even worse). +var ALPNPort = "443" + +// OID of the acmeIdentifier X.509 Certificate Extension. +var OIDACMEIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 31} + +// ValidateKeyAuthorization validates that the given keyAuthz from a challenge +// matches our expectation, returning (true, nil) if so, or (false, err) if +// not. +func ValidateKeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { + parts := strings.Split(keyAuthz, ".") + if len(parts) != 2 { + return false, fmt.Errorf("invalid authorization: got %v parts, expected 2", len(parts)) + } + + tokenPart := parts[0] + thumbprintPart := parts[1] + + if token != tokenPart || thumbprint != thumbprintPart { + return false, fmt.Errorf("key authorization was invalid") + } + + return true, nil +} + +// ValidateSHA256KeyAuthorization validates that the given keyAuthz from a +// challenge matches our expectation, returning (true, nil) if so, or +// (false, err) if not. +// +// This is for use with DNS challenges, which require base64 encoding. +func ValidateSHA256KeyAuthorization(keyAuthz string, token string, thumbprint string) (bool, error) { + authzContents := token + "." + thumbprint + checksum := sha256.Sum256([]byte(authzContents)) + expectedAuthz := base64.RawURLEncoding.EncodeToString(checksum[:]) + + if keyAuthz != expectedAuthz { + return false, fmt.Errorf("sha256 key authorization was invalid") + } + + return true, nil +} + +// ValidateRawSHA256KeyAuthorization validates that the given keyAuthz from a +// challenge matches our expectation, returning (true, nil) if so, or +// (false, err) if not. +// +// This is for use with TLS challenges, which require the raw hash output. +func ValidateRawSHA256KeyAuthorization(keyAuthz []byte, token string, thumbprint string) (bool, error) { + authzContents := token + "." + thumbprint + expectedAuthz := sha256.Sum256([]byte(authzContents)) + + if len(keyAuthz) != len(expectedAuthz) || subtle.ConstantTimeCompare(expectedAuthz[:], keyAuthz) != 1 { + return false, fmt.Errorf("sha256 key authorization was invalid") + } + + return true, nil +} + +func buildResolver(config *acmeConfigEntry) (*net.Resolver, error) { + if len(config.DNSResolver) == 0 { + return net.DefaultResolver, nil + } + + return &net.Resolver{ + PreferGo: true, + StrictErrors: false, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + d := net.Dialer{ + Timeout: 10 * time.Second, + } + return d.DialContext(ctx, network, config.DNSResolver) + }, + }, nil +} + +func buildDialerConfig(config *acmeConfigEntry) (*net.Dialer, error) { + resolver, err := buildResolver(config) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %w", err) + } + + return &net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: -1 * time.Second, + Resolver: resolver, + }, nil +} + +// Validates a given ACME http-01 challenge against the specified domain, +// per RFC 8555. +// +// We attempt to be defensive here against timeouts, extra redirects, &c. +func ValidateHTTP01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + path := "http://" + domain + "/.well-known/acme-challenge/" + token + dialer, err := buildDialerConfig(config) + if err != nil { + return false, fmt.Errorf("failed to build dialer: %w", err) + } + + transport := &http.Transport{ + // Only a single request is sent to this server as we do not do any + // batching of validation attempts. There is no need to do an HTTP + // KeepAlive as a result. + DisableKeepAlives: true, + MaxIdleConns: 1, + MaxIdleConnsPerHost: 1, + MaxConnsPerHost: 1, + IdleConnTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + + // We'd rather timeout and re-attempt validation later than hang + // too many validators waiting for slow hosts. + DialContext: dialer.DialContext, + ResponseHeaderTimeout: 10 * time.Second, + } + + maxRedirects := 10 + urlLength := 2000 + + client := &http.Client{ + Transport: transport, + CheckRedirect: func(req *http.Request, via []*http.Request) error { + if len(via)+1 >= maxRedirects { + return fmt.Errorf("http-01: too many redirects: %v", len(via)+1) + } + + reqUrlLen := len(req.URL.String()) + if reqUrlLen > urlLength { + return fmt.Errorf("http-01: redirect url length too long: %v", reqUrlLen) + } + + return nil + }, + } + + resp, err := client.Get(path) + if err != nil { + return false, fmt.Errorf("http-01: failed to fetch path %v: %w", path, err) + } + + // We provision a buffer which allows for a variable size challenge, some + // whitespace, and a detection gap for too long of a message. + minExpected := len(token) + 1 + len(thumbprint) + maxExpected := 512 + + defer resp.Body.Close() + + // Attempt to read the body, but don't do so infinitely. + body, err := io.ReadAll(io.LimitReader(resp.Body, int64(maxExpected+1))) + if err != nil { + return false, fmt.Errorf("http-01: unexpected error while reading body: %w", err) + } + + if len(body) > maxExpected { + return false, fmt.Errorf("http-01: response too large: received %v > %v bytes", len(body), maxExpected) + } + + if len(body) < minExpected { + return false, fmt.Errorf("http-01: response too small: received %v < %v bytes", len(body), minExpected) + } + + // Per RFC 8555 Section 8.3. HTTP Challenge: + // + // > The server SHOULD ignore whitespace characters at the end of the body. + keyAuthz := string(body) + keyAuthz = strings.TrimSpace(keyAuthz) + + // If we got here, we got no non-EOF error while reading. Try to validate + // the token because we're bounded by a reasonable amount of length. + return ValidateKeyAuthorization(keyAuthz, token, thumbprint) +} + +func ValidateDNS01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + // Here, domain is the value from the post-wildcard-processed identifier. + // Per RFC 8555, no difference in validation occurs if a wildcard entry + // is requested or if a non-wildcard entry is requested. + // + // XXX: In this case the DNS server is operator controlled and is assumed + // to be less malicious so the default resolver is used. In the future, + // we'll want to use net.Resolver for two reasons: + // + // 1. To control the actual resolver via ACME configuration, + // 2. To use a context to set stricter timeout limits. + resolver, err := buildResolver(config) + if err != nil { + return false, fmt.Errorf("failed to build resolver: %w", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + name := DNSChallengePrefix + domain + results, err := resolver.LookupTXT(ctx, name) + if err != nil { + return false, fmt.Errorf("dns-01: failed to lookup TXT records for domain (%v) via resolver %v: %w", name, config.DNSResolver, err) + } + + for _, keyAuthz := range results { + ok, _ := ValidateSHA256KeyAuthorization(keyAuthz, token, thumbprint) + if ok { + return true, nil + } + } + + return false, fmt.Errorf("dns-01: challenge failed against %v records", len(results)) +} + +func ValidateTLSALPN01Challenge(domain string, token string, thumbprint string, config *acmeConfigEntry) (bool, error) { + // This RFC is defined in RFC 8737 Automated Certificate Management + // Environment (ACME) TLS Application‑Layer Protocol Negotiation + // (ALPN) Challenge Extension. + // + // This is conceptually similar to ValidateHTTP01Challenge, but + // uses a TLS connection on port 443 with the specified ALPN + // protocol. + + cfg := &tls.Config{ + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge, the name of the negotiated + // protocol is "acme-tls/1". + NextProtos: []string{ALPNProtocol}, + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > ... and an SNI extension containing only the domain name + // > being validated during the TLS handshake. + // + // According to the Go docs, setting this option (even though + // InsecureSkipVerify=true is also specified), allows us to + // set the SNI extension to this value. + ServerName: domain, + + VerifyConnection: func(connState tls.ConnectionState) error { + // We initiated a fresh connection with no session tickets; + // even if we did have a session ticket, we do not wish to + // use it. Verify that the server has not inadvertently + // reused connections between validation attempts or something. + if connState.DidResume { + return fmt.Errorf("server under test incorrectly reported that handshake was resumed when no session cache was provided; refusing to continue") + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The ACME server verifies that during the TLS handshake the + // > application-layer protocol "acme-tls/1" was successfully + // > negotiated (and that the ALPN extension contained only the + // > value "acme-tls/1"). + if connState.NegotiatedProtocol != ALPNProtocol { + return fmt.Errorf("server under test negotiated unexpected ALPN protocol %v", connState.NegotiatedProtocol) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > and that the certificate returned + // + // Because this certificate MUST be self-signed (per earlier + // statement in RFC 8737 Section 3), there is no point in sending + // more than one certificate, and so we will err early here if + // we got more than one. + if len(connState.PeerCertificates) > 1 { + return fmt.Errorf("server under test returned multiple (%v) certificates when we expected only one", len(connState.PeerCertificates)) + } + cert := connState.PeerCertificates[0] + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The client prepares for validation by constructing a + // > self-signed certificate that MUST contain an acmeIdentifier + // > extension and a subjectAlternativeName extension [RFC5280]. + // + // Verify that this is a self-signed certificate that isn't signed + // by another certificate (i.e., with the same key material but + // different issuer). + // NOTE: Do not use cert.CheckSignatureFrom(cert) as we need to bypass the + // checks for the parent certificate having the IsCA basic constraint set. + err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature) + if err != nil { + return fmt.Errorf("server under test returned a non-self-signed certificate: %w", err) + } + + if !bytes.Equal(cert.RawSubject, cert.RawIssuer) { + return fmt.Errorf("server under test returned a non-self-signed certificate: invalid subject (%v) <-> issuer (%v) match", cert.Subject.String(), cert.Issuer.String()) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The subjectAlternativeName extension MUST contain a single + // > dNSName entry where the value is the domain name being + // > validated. + // + // TODO: this does not validate that there are not other SANs + // with unknown (to Go) OIDs. + if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) > 0 || len(cert.IPAddresses) > 0 || len(cert.URIs) > 0 { + return fmt.Errorf("server under test returned a certificate with incorrect SANs") + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The comparison of dNSNames MUST be case insensitive + // > [RFC4343]. Note that as ACME doesn't support Unicode + // > identifiers, all dNSNames MUST be encoded using the rules + // > of [RFC3492]. + if !strings.EqualFold(cert.DNSNames[0], domain) { + return fmt.Errorf("server under test returned a certificate with unexpected identifier: %v", cert.DNSNames[0]) + } + + // Per above, verify that the acmeIdentifier extension is present + // exactly once and has the correct value. + var foundACMEId bool + for _, ext := range cert.Extensions { + if !ext.Id.Equal(OIDACMEIdentifier) { + continue + } + + // There must be only a single ACME extension. + if foundACMEId { + return fmt.Errorf("server under test returned a certificate with multiple acmeIdentifier extensions") + } + foundACMEId = true + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > a critical acmeIdentifier extension + if !ext.Critical { + return fmt.Errorf("server under test returned a certificate with an acmeIdentifier extension marked non-Critical") + } + + var keyAuthz []byte + remainder, err := asn1.Unmarshal(ext.Value, &keyAuthz) + if err != nil { + return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value: %w", err) + } + if len(remainder) > 0 { + return fmt.Errorf("server under test returned a certificate with invalid acmeIdentifier extension value with additional trailing data") + } + + ok, err := ValidateRawSHA256KeyAuthorization(keyAuthz, token, thumbprint) + if !ok || err != nil { + return fmt.Errorf("server under test returned a certificate with an invalid key authorization (%w)", err) + } + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > The ACME server verifies that ... the certificate returned + // > contains: ... a critical acmeIdentifier extension containing + // > the expected SHA-256 digest computed in step 1. + if !foundACMEId { + return fmt.Errorf("server under test returned a certificate without the required acmeIdentifier extension") + } + + // Remove the handled critical extension and validate that we + // have no additional critical extensions left unhandled. + var index int = -1 + for oidIndex, oid := range cert.UnhandledCriticalExtensions { + if oid.Equal(OIDACMEIdentifier) { + index = oidIndex + break + } + } + if index != -1 { + // Unlike the foundACMEId case, this is not a failure; if Go + // updates to "understand" this critical extension, we do not + // wish to fail. + cert.UnhandledCriticalExtensions = append(cert.UnhandledCriticalExtensions[0:index], cert.UnhandledCriticalExtensions[index+1:]...) + } + if len(cert.UnhandledCriticalExtensions) > 0 { + return fmt.Errorf("server under test returned a certificate with additional unknown critical extensions (%v)", cert.UnhandledCriticalExtensions) + } + + // All good! + return nil + }, + + // We never want to resume a connection; do not provide session + // cache storage. + ClientSessionCache: nil, + + // Do not trust any system trusted certificates; we're going to be + // manually validating the chain, so specifying a non-empty pool + // here could only cause additional, unnecessary work. + RootCAs: x509.NewCertPool(), + + // Do not bother validating the client's chain; we know it should be + // self-signed. This also disables hostname verification, but we do + // this verification as part of VerifyConnection(...) ourselves. + // + // Per Go docs, this option is only safe in conjunction with + // VerifyConnection which we define above. + InsecureSkipVerify: true, + + // RFC 8737 Section 4. acme-tls/1 Protocol Definition: + // + // > ACME servers that implement "acme-tls/1" MUST only negotiate + // > TLS 1.2 [RFC5246] or higher when connecting to clients for + // > validation. + MinVersion: tls.VersionTLS12, + + // While RFC 8737 does not place restrictions around allowed cipher + // suites, we wish to restrict ourselves to secure defaults. Specify + // the Intermediate guideline from Mozilla's TLS config generator to + // disable obviously weak ciphers. + // + // See also: https://ssl-config.mozilla.org/#server=go&version=1.14.4&config=intermediate&guideline=5.7 + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + }, + } + + // Build a dialer using our custom DNS resolver, to ensure domains get + // resolved according to configuration. + dialer, err := buildDialerConfig(config) + if err != nil { + return false, fmt.Errorf("failed to build dialer: %w", err) + } + + // Per RFC 8737 Section 3. TLS with Application-Layer Protocol + // Negotiation (TLS ALPN) Challenge: + // + // > 2. The ACME server resolves the domain name being validated and + // > chooses one of the IP addresses returned for validation (the + // > server MAY validate against multiple addresses if more than + // > one is returned). + // > 3. The ACME server initiates a TLS connection to the chosen IP + // > address. This connection MUST use TCP port 443. + address := fmt.Sprintf("%v:"+ALPNPort, domain) + conn, err := dialer.Dial("tcp", address) + if err != nil { + return false, fmt.Errorf("tls-alpn-01: failed to dial host: %w", err) + } + + // Initiate the connection to the remote peer. + client := tls.Client(conn, cfg) + + // We intentionally swallow this error as it isn't useful to the + // underlying protocol we perform here. Notably, per RFC 8737 + // Section 4. acme-tls/1 Protocol Definition: + // + // > Once the handshake is completed, the client MUST NOT exchange + // > any further data with the server and MUST immediately close the + // > connection. ... Because of this, an ACME server MAY choose to + // > withhold authorization if either the certificate signature is + // > invalid or the handshake doesn't fully complete. + defer client.Close() + + // We wish to put time bounds on the total time the handshake can + // stall for, so build a connection context here. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // See note above about why we can allow Handshake to complete + // successfully. + if err := client.HandshakeContext(ctx); err != nil { + return false, fmt.Errorf("tls-alpn-01: failed to perform handshake: %w", err) + } + return true, nil +} diff --git a/changelog/24192.txt b/changelog/24192.txt new file mode 100644 index 000000000000..f5b152831771 --- /dev/null +++ b/changelog/24192.txt @@ -0,0 +1,76 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +@mixin stacked-grid { + grid-template-columns: 1fr; + grid-row: 1/1; +} +@mixin stacked-content { + margin-bottom: $spacing-24; +} + +.action-block-width { + width: 100%; +} + +.action-block { + grid-template-columns: 2fr 1fr; + display: grid; + padding: $spacing-16 $spacing-24; + line-height: inherit; + grid-gap: $spacing-16; + + @include until($mobile) { + @include stacked-grid(); + } +} + +.action-block-info { + @include until($mobile) { + @include stacked-content(); + } +} + +.action-block.stacked { + @include stacked-grid(); +} +.stacked > .action-block-info { + @include stacked-content(); +} + +.action-block-title { + font-size: $size-5; + font-weight: $font-weight-bold; +} +.action-block-action { + text-align: right; + @include until($mobile) { + text-align: left; + } +} + +/* Action Block Grid */ +.replication-actions-grid-layout { + display: flex; + flex-wrap: wrap; + margin: $spacing-16 0; + @include until($mobile) { + display: block; + } +} + +.replication-actions-grid-item { + flex-basis: 50%; + padding: $spacing-12; + display: flex; + width: 100%; +} + +.replication-actions-grid-item .action-block { + width: 100%; + @include until($mobile) { + height: inherit; + } +} diff --git a/changelog/24193.txt b/changelog/24193.txt new file mode 100644 index 000000000000..c6a6cbaabd2f --- /dev/null +++ b/changelog/24193.txt @@ -0,0 +1,107 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
+
+

+ Generate a secondary token +

+

+ Generate a token to enable + {{this.model.replicationModeForDisplay}} + replication or change primaries on secondary cluster. +

+
+ +
+ +
+ +
+

+ This will be used to identify a secondary cluster once a connection has been established with the primary. +

+
+
+ +
+ {{#if (eq this.replicationMode "performance")}} + + {{/if}} +
+ + + + + + +{{#if this.isModalActive}} + + + Copy your token + + +

+ This token can be used to enable + {{this.model.replicationModeForDisplay}} + replication or change primaries on the secondary cluster. +

+
+

Activation token

+
+
+ +
+
+
+ + +
+
+
+ + + + + {{#unless this.isTokenCopied}} + + {{/unless}} + + +
+{{/if}} \ No newline at end of file diff --git a/changelog/24201.txt b/changelog/24201.txt new file mode 100644 index 000000000000..a3d46a32edc9 --- /dev/null +++ b/changelog/24201.txt @@ -0,0 +1,1265 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "crypto/tls" + "errors" + "flag" + "fmt" + "io" + "net" + "net/http" + "os" + "sort" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + "github.com/hashicorp/cli" + ctconfig "github.com/hashicorp/consul-template/config" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/gatedwriter" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/api" + agentConfig "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/command/agent/exec" + "github.com/hashicorp/vault/command/agent/template" + "github.com/hashicorp/vault/command/agentproxyshared" + "github.com/hashicorp/vault/command/agentproxyshared/auth" + "github.com/hashicorp/vault/command/agentproxyshared/cache" + "github.com/hashicorp/vault/command/agentproxyshared/sink" + "github.com/hashicorp/vault/command/agentproxyshared/sink/file" + "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" + "github.com/hashicorp/vault/command/agentproxyshared/winsvc" + "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/useragent" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/version" + "github.com/kr/pretty" + "github.com/oklog/run" + "github.com/posener/complete" + "golang.org/x/text/cases" + "golang.org/x/text/language" + "google.golang.org/grpc/test/bufconn" +) + +var ( + _ cli.Command = (*AgentCommand)(nil) + _ cli.CommandAutocomplete = (*AgentCommand)(nil) +) + +const ( + // flagNameAgentExitAfterAuth is used as an Agent specific flag to indicate + // that agent should exit after a single successful auth + flagNameAgentExitAfterAuth = "exit-after-auth" + nameAgent = "agent" +) + +type AgentCommand struct { + *BaseCommand + logFlags logFlags + + config *agentConfig.Config + + ShutdownCh chan struct{} + SighupCh chan struct{} + + tlsReloadFuncsLock sync.RWMutex + tlsReloadFuncs []reloadutil.ReloadFunc + + logWriter io.Writer + logGate *gatedwriter.Writer + logger hclog.Logger + + // Telemetry object + metricsHelper *metricsutil.MetricsHelper + + cleanupGuard sync.Once + + startedCh chan struct{} // for tests + reloadedCh chan struct{} // for tests + + flagConfigs []string + flagExitAfterAuth bool + flagTestVerifyOnly bool +} + +func (c *AgentCommand) Synopsis() string { + return "Start a Vault agent" +} + +func (c *AgentCommand) Help() string { + helpText := ` +Usage: vault agent [options] + + This command starts a Vault Agent that can perform automatic authentication + in certain environments. + + Start an agent with a configuration file: + + $ vault agent -config=/etc/vault/config.hcl + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *AgentCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + // Augment with the log flags + f.addLogFlags(&c.logFlags) + + f.StringSliceVar(&StringSliceVar{ + Name: "config", + Target: &c.flagConfigs, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + ), + Usage: "Path to a configuration file. This configuration file should " + + "contain only agent directives.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameAgentExitAfterAuth, + Target: &c.flagExitAfterAuth, + Default: false, + Usage: "If set to true, the agent will exit with code 0 after a single " + + "successful auth, where success means that a token was retrieved and " + + "all sinks successfully wrote it", + }) + + // Internal-only flags to follow. + // + // Why hello there little source code reader! Welcome to the Vault source + // code. The remaining options are intentionally undocumented and come with + // no warranty or backwards-compatibility promise. Do not use these flags + // in production. Do not build automation using these flags. Unless you are + // developing against Vault, you should not need any of these flags. + f.BoolVar(&BoolVar{ + Name: "test-verify-only", + Target: &c.flagTestVerifyOnly, + Default: false, + Hidden: true, + }) + + // End internal-only flags. + + return set +} + +func (c *AgentCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *AgentCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AgentCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Create a logger. We wrap it in a gated writer so that it doesn't + // start logging too early. + c.logGate = gatedwriter.NewWriter(os.Stderr) + c.logWriter = c.logGate + + if c.logFlags.flagCombineLogs { + c.logWriter = os.Stdout + } + + // Validation + if len(c.flagConfigs) < 1 { + c.UI.Error("Must specify exactly at least one config path using -config") + return 1 + } + + config, err := c.loadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + return 1 + } + + if config.AutoAuth == nil { + c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") + } + + c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars + c.config = config + + l, err := c.newLogger() + if err != nil { + c.outputErrors(err) + return 1 + } + + // Update the logger and then base the log writer on that logger. + // Log writer is supplied to consul-template runners for templates and execs. + // We want to ensure that consul-template will honor the settings, for example + // if the -log-format is JSON we want JSON, not a mix of JSON and non-JSON messages. + c.logger = l + c.logWriter = l.StandardWriter(&hclog.StandardLoggerOptions{ + InferLevels: true, + InferLevelsWithTimestamp: true, + }) + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { + c.UI.Output(fmt.Sprintf( + "\nConfiguration:\n%s\n", + pretty.Sprint(*c.config))) + } + return 0 + } + + // Ignore any setting of Agent's address. This client is used by the Agent + // to reach out to Vault. This should never loop back to agent. + c.flagAgentProxyAddress = "" + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf( + "Error fetching client: %v", + err)) + return 1 + } + + serverHealth, err := client.Sys().Health() + if err == nil { + // We don't exit on error here, as this is not worth stopping Agent over + serverVersion := serverHealth.Version + agentVersion := version.GetVersion().VersionNumber() + if serverVersion != agentVersion { + c.UI.Info("==> Note: Vault Agent version does not match Vault server version. " + + fmt.Sprintf("Vault Agent version: %s, Vault server version: %s", agentVersion, serverVersion)) + } + } + + if config.IsDefaultListerDefined() { + // Notably, we cannot know for sure if they are using the API proxy functionality unless + // we log on each API proxy call, which would be too noisy. + // A customer could have a listener defined but only be using e.g. the cache-clear API, + // even though the API proxy is something they have available. + c.UI.Warn("==> Note: Vault Agent will be deprecating API proxy functionality in a future " + + "release, and this functionality has moved to a new subcommand, vault proxy. If you rely on this " + + "functionality, plan to move to Vault Proxy instead.") + } + + // ctx and cancelFunc are passed to the AuthHandler, SinkServer, ExecServer and + // TemplateServer that periodically listen for ctx.Done() to fire and shut + // down accordingly. + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + + // telemetry configuration + inmemMetrics, _, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ + Config: config.Telemetry, + Ui: c.UI, + ServiceName: "vault", + DisplayName: "Vault", + UserAgent: useragent.AgentString(), + ClusterName: config.ClusterName, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) + return 1 + } + c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + + var method auth.AuthMethod + var sinks []*sink.SinkConfig + var templateNamespace string + if config.AutoAuth != nil { + if client.Headers().Get(consts.NamespaceHeaderName) == "" && config.AutoAuth.Method.Namespace != "" { + client.SetNamespace(config.AutoAuth.Method.Namespace) + } + templateNamespace = client.Headers().Get(consts.NamespaceHeaderName) + + sinkClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for file sink: %v", err)) + return 1 + } + + if config.DisableIdleConnsAutoAuth { + sinkClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + sinkClient.SetDisableKeepAlives(true) + } + + for _, sc := range config.AutoAuth.Sinks { + switch sc.Type { + case "file": + config := &sink.SinkConfig{ + Logger: c.logger.Named("sink.file"), + Config: sc.Config, + Client: sinkClient, + WrapTTL: sc.WrapTTL, + DHType: sc.DHType, + DeriveKey: sc.DeriveKey, + DHPath: sc.DHPath, + AAD: sc.AAD, + } + s, err := file.NewFileSink(config) + if err != nil { + c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) + return 1 + } + config.Sink = s + sinks = append(sinks, config) + default: + c.UI.Error(fmt.Sprintf("Unknown sink type %q", sc.Type)) + return 1 + } + } + + authConfig := &auth.AuthConfig{ + Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)), + MountPath: config.AutoAuth.Method.MountPath, + Config: config.AutoAuth.Method.Config, + } + method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) + return 1 + } + } + + // We do this after auto-auth has been configured, because we don't want to + // confuse the issue of retries for auth failures which have their own + // config and are handled a bit differently. + if os.Getenv(api.EnvVaultMaxRetries) == "" { + client.SetMaxRetries(ctconfig.DefaultRetryAttempts) + if config.Vault != nil { + if config.Vault.Retry != nil { + client.SetMaxRetries(config.Vault.Retry.NumRetries) + } + } + } + + enforceConsistency := cache.EnforceConsistencyNever + whenInconsistent := cache.WhenInconsistentFail + if config.APIProxy != nil { + switch config.APIProxy.EnforceConsistency { + case "always": + enforceConsistency = cache.EnforceConsistencyAlways + case "never", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for enforce_consistency: %q", config.APIProxy.EnforceConsistency)) + return 1 + } + + switch config.APIProxy.WhenInconsistent { + case "retry": + whenInconsistent = cache.WhenInconsistentRetry + case "forward": + whenInconsistent = cache.WhenInconsistentForward + case "fail", "": + default: + c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for when_inconsistent: %q", config.APIProxy.WhenInconsistent)) + return 1 + } + } + // Keep Cache configuration for legacy reasons, but error if defined alongside API Proxy + if config.Cache != nil { + switch config.Cache.EnforceConsistency { + case "always": + if enforceConsistency != cache.EnforceConsistencyNever { + c.UI.Error("enforce_consistency configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") + return 1 + } else { + enforceConsistency = cache.EnforceConsistencyAlways + } + case "never", "": + default: + c.UI.Error(fmt.Sprintf("Unknown cache setting for enforce_consistency: %q", config.Cache.EnforceConsistency)) + return 1 + } + + switch config.Cache.WhenInconsistent { + case "retry": + if whenInconsistent != cache.WhenInconsistentFail { + c.UI.Error("when_inconsistent configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") + return 1 + } else { + whenInconsistent = cache.WhenInconsistentRetry + } + case "forward": + if whenInconsistent != cache.WhenInconsistentFail { + c.UI.Error("when_inconsistent configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") + return 1 + } else { + whenInconsistent = cache.WhenInconsistentForward + } + case "fail", "": + default: + c.UI.Error(fmt.Sprintf("Unknown cache setting for when_inconsistent: %q", config.Cache.WhenInconsistent)) + return 1 + } + } + + // Warn if cache _and_ cert auto-auth is enabled but certificates were not + // provided in the auto_auth.method["cert"].config stanza. + if config.Cache != nil && (config.AutoAuth != nil && config.AutoAuth.Method != nil && config.AutoAuth.Method.Type == "cert") { + _, okCertFile := config.AutoAuth.Method.Config["client_cert"] + _, okCertKey := config.AutoAuth.Method.Config["client_key"] + + // If neither of these exists in the cert stanza, agent will use the + // certs from the vault stanza. + if !okCertFile && !okCertKey { + c.UI.Warn(wrapAtLength("WARNING! Cache is enabled and using the same certificates " + + "from the 'cert' auto-auth method specified in the 'vault' stanza. Consider " + + "specifying certificate information in the 'cert' auto-auth's config stanza.")) + } + + } + + // Output the header that the agent has started + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault Agent started! Log data will stream in below:\n") + } + + var leaseCache *cache.LeaseCache + var previousToken string + + proxyClient, err := client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for proxying: %v", err)) + return 1 + } + + if config.DisableIdleConnsAPIProxy { + proxyClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAPIProxy { + proxyClient.SetDisableKeepAlives(true) + } + + apiProxyLogger := c.logger.Named("apiproxy") + + // The API proxy to be used, if listeners are configured + apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ + Client: proxyClient, + Logger: apiProxyLogger, + EnforceConsistency: enforceConsistency, + WhenInconsistentAction: whenInconsistent, + UserAgentStringFunction: useragent.AgentProxyStringWithProxiedUserAgent, + UserAgentString: useragent.AgentProxyString(), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) + return 1 + } + + // Parse agent cache configurations + if config.Cache != nil { + cacheLogger := c.logger.Named("cache") + + // Create the lease cache proxier and set its underlying proxier to + // the API proxier. + leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ + Client: proxyClient, + BaseContext: ctx, + Proxier: apiProxy, + Logger: cacheLogger.Named("leasecache"), + CacheDynamicSecrets: true, + UserAgentToUse: useragent.ProxyAPIProxyString(), + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) + return 1 + } + + // Configure persistent storage and add to LeaseCache + if config.Cache.Persist != nil { + deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) + return 1 + } + previousToken = oldToken + if deferFunc != nil { + defer deferFunc() + } + } + } + + var listeners []net.Listener + + // If there are templates, add an in-process listener + if len(config.Templates) > 0 || len(config.EnvTemplates) > 0 { + config.Listeners = append(config.Listeners, &configutil.Listener{Type: listenerutil.BufConnType}) + } + + // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. + c.tlsReloadFuncsLock.Lock() + + for i, lnConfig := range config.Listeners { + var ln net.Listener + var tlsCfg *tls.Config + + if lnConfig.Type == listenerutil.BufConnType { + inProcListener := bufconn.Listen(1024 * 1024) + if config.Cache != nil { + config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) + } + ln = inProcListener + } else { + lnBundle, err := cache.StartListener(lnConfig) + if err != nil { + c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) + return 1 + } + + tlsCfg = lnBundle.TLSConfig + ln = lnBundle.Listener + + // Track the reload func, so we can reload later if needed. + c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) + } + + listeners = append(listeners, ln) + + proxyVaultToken := true + var inmemSink sink.Sink + if config.APIProxy != nil { + if config.APIProxy.UseAutoAuthToken { + apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") + inmemSink, err = inmem.New(&sink.SinkConfig{ + Logger: apiProxyLogger, + }, leaseCache) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) + return 1 + } + sinks = append(sinks, &sink.SinkConfig{ + Logger: apiProxyLogger, + Sink: inmemSink, + }) + } + proxyVaultToken = !config.APIProxy.ForceAutoAuthToken + } + + var muxHandler http.Handler + if leaseCache != nil { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) + } else { + muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) + } + + // Parse 'require_request_header' listener config option, and wrap + // the request handler if necessary + if lnConfig.RequireRequestHeader && ("metrics_only" != lnConfig.Role) { + muxHandler = verifyRequestHeader(muxHandler) + } + + // Create a muxer and add paths relevant for the lease cache layer + mux := http.NewServeMux() + quitEnabled := lnConfig.AgentAPI != nil && lnConfig.AgentAPI.EnableQuit + + mux.Handle(consts.AgentPathMetrics, c.handleMetrics()) + if "metrics_only" != lnConfig.Role { + mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) + mux.Handle(consts.AgentPathQuit, c.handleQuit(quitEnabled)) + mux.Handle("/", muxHandler) + } + + scheme := "https://" + if tlsCfg == nil { + scheme = "http://" + } + if ln.Addr().Network() == "unix" { + scheme = "unix://" + } + + infoKey := fmt.Sprintf("api address %d", i+1) + info[infoKey] = scheme + ln.Addr().String() + infoKeys = append(infoKeys, infoKey) + + server := &http.Server{ + Addr: ln.Addr().String(), + TLSConfig: tlsCfg, + Handler: mux, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: apiProxyLogger.StandardLogger(nil), + } + + go server.Serve(ln) + } + + c.tlsReloadFuncsLock.Unlock() + + // Ensure that listeners are closed at all the exits + listenerCloseFunc := func() { + for _, ln := range listeners { + ln.Close() + } + } + defer c.cleanupGuard.Do(listenerCloseFunc) + + // Inform any tests that the server is ready + if c.startedCh != nil { + close(c.startedCh) + } + + var g run.Group + + g.Add(func() error { + for { + select { + case <-c.SighupCh: + c.UI.Output("==> Vault Agent config reload triggered") + err := c.reloadConfig(c.flagConfigs) + if err != nil { + c.outputErrors(err) + } + // Send the 'reloaded' message on the relevant channel + select { + case c.reloadedCh <- struct{}{}: + default: + } + case <-ctx.Done(): + return nil + } + } + }, func(error) { + cancelFunc() + }) + + // This run group watches for signal termination + g.Add(func() error { + for { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault Agent shutdown triggered") + // Notify systemd that the server is shutting down + // Let the lease cache know this is a shutdown; no need to evict everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + return nil + case <-ctx.Done(): + return nil + case <-winsvc.ShutdownChannel(): + return nil + } + } + }, func(error) {}) + + // Start auto-auth and sink servers + if method != nil { + enableTemplateTokenCh := len(config.Templates) > 0 + enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0 + + // Auth Handler is going to set its own retry values, so we want to + // work on a copy of the client to not affect other subsystems. + ahClient, err := c.client.CloneWithHeaders() + if err != nil { + c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) + return 1 + } + + if config.DisableIdleConnsAutoAuth { + ahClient.SetMaxIdleConnections(-1) + } + + if config.DisableKeepAlivesAutoAuth { + ahClient.SetDisableKeepAlives(true) + } + + ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ + Logger: c.logger.Named("auth.handler"), + Client: ahClient, + WrapTTL: config.AutoAuth.Method.WrapTTL, + MinBackoff: config.AutoAuth.Method.MinBackoff, + MaxBackoff: config.AutoAuth.Method.MaxBackoff, + EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, + EnableTemplateTokenCh: enableTemplateTokenCh, + EnableExecTokenCh: enableEnvTemplateTokenCh, + Token: previousToken, + ExitOnError: config.AutoAuth.Method.ExitOnError, + UserAgent: useragent.AgentAutoAuthString(), + MetricsSignifier: "agent", + }) + + ss := sink.NewSinkServer(&sink.SinkServerConfig{ + Logger: c.logger.Named("sink.server"), + Client: ahClient, + ExitAfterAuth: config.ExitAfterAuth, + }) + + ts := template.NewServer(&template.ServerConfig{ + Logger: c.logger.Named("template.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + AgentConfig: c.config, + Namespace: templateNamespace, + ExitAfterAuth: config.ExitAfterAuth, + }) + + es, err := exec.NewServer(&exec.ServerConfig{ + AgentConfig: c.config, + Namespace: templateNamespace, + Logger: c.logger.Named("exec.server"), + LogLevel: c.logger.GetLevel(), + LogWriter: c.logWriter, + }) + if err != nil { + c.logger.Error("could not create exec server", "error", err) + return 1 + } + + g.Add(func() error { + return ah.Run(ctx, method) + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + + g.Add(func() error { + err := ss.Run(ctx, ah.OutputCh, sinks) + c.logger.Info("sinks finished, exiting") + + // Start goroutine to drain from ah.OutputCh from this point onward + // to prevent ah.Run from being blocked. + go func() { + for { + select { + case <-ctx.Done(): + return + case <-ah.OutputCh: + } + } + }() + + // Wait until templates are rendered + if len(config.Templates) > 0 { + <-ts.DoneCh + } + + return err + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + }) + + g.Add(func() error { + return ts.Run(ctx, ah.TemplateTokenCh, config.Templates) + }, func(error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + ts.Stop() + }) + + g.Add(func() error { + return es.Run(ctx, ah.ExecTokenCh) + }, func(err error) { + // Let the lease cache know this is a shutdown; no need to evict + // everything + if leaseCache != nil { + leaseCache.SetShuttingDown(true) + } + cancelFunc() + es.Close() + }) + + } + + // Server configuration output + padding := 24 + sort.Strings(infoKeys) + caser := cases.Title(language.English) + c.UI.Output("==> Vault Agent configuration:\n") + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + caser.String(k), + info[k])) + } + c.UI.Output("") + + // Release the log gate. + c.logGate.Flush() + + // Write out the PID to the file now that server has successfully started + if err := c.storePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) + return 1 + } + + // Notify systemd that the server is ready (if applicable) + c.notifySystemd(systemd.SdNotifyReady) + + defer func() { + if err := c.removePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) + } + }() + + var exitCode int + if err := g.Run(); err != nil { + var processExitError *exec.ProcessExitError + if errors.As(err, &processExitError) { + exitCode = processExitError.ExitCode + } else { + exitCode = 1 + } + + if exitCode != 0 { + c.logger.Error("runtime error encountered", "error", err, "exitCode", exitCode) + c.UI.Error("Error encountered during run, refer to logs for more details.") + } + } + + c.notifySystemd(systemd.SdNotifyStopping) + + return exitCode +} + +// applyConfigOverrides ensures that the config object accurately reflects the desired +// settings as configured by the user. It applies the relevant config setting based +// on the precedence (env var overrides file config, cli overrides env var). +// It mutates the config object supplied. +func (c *AgentCommand) applyConfigOverrides(f *FlagSets, config *agentConfig.Config) { + if config.Vault == nil { + config.Vault = &agentConfig.Vault{} + } + + f.applyLogConfigOverrides(config.SharedConfig) + + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAgentExitAfterAuth { + config.ExitAfterAuth = c.flagExitAfterAuth + } + }) + + c.setStringFlag(f, config.Vault.Address, &StringVar{ + Name: flagNameAddress, + Target: &c.flagAddress, + Default: "https://127.0.0.1:8200", + EnvVar: api.EnvVaultAddress, + }) + config.Vault.Address = c.flagAddress + c.setStringFlag(f, config.Vault.CACert, &StringVar{ + Name: flagNameCACert, + Target: &c.flagCACert, + Default: "", + EnvVar: api.EnvVaultCACert, + }) + config.Vault.CACert = c.flagCACert + c.setStringFlag(f, config.Vault.CAPath, &StringVar{ + Name: flagNameCAPath, + Target: &c.flagCAPath, + Default: "", + EnvVar: api.EnvVaultCAPath, + }) + config.Vault.CAPath = c.flagCAPath + c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ + Name: flagNameClientCert, + Target: &c.flagClientCert, + Default: "", + EnvVar: api.EnvVaultClientCert, + }) + config.Vault.ClientCert = c.flagClientCert + c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ + Name: flagNameClientKey, + Target: &c.flagClientKey, + Default: "", + EnvVar: api.EnvVaultClientKey, + }) + config.Vault.ClientKey = c.flagClientKey + c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ + Name: flagNameTLSSkipVerify, + Target: &c.flagTLSSkipVerify, + Default: false, + EnvVar: api.EnvVaultSkipVerify, + }) + config.Vault.TLSSkipVerify = c.flagTLSSkipVerify + c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ + Name: flagTLSServerName, + Target: &c.flagTLSServerName, + Default: "", + EnvVar: api.EnvVaultTLSServerName, + }) + config.Vault.TLSServerName = c.flagTLSServerName +} + +// verifyRequestHeader wraps an http.Handler inside a Handler that checks for +// the request header that is used for SSRF protection. +func verifyRequestHeader(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if val, ok := r.Header[consts.RequestHeaderName]; !ok || len(val) != 1 || val[0] != "true" { + logical.RespondError(w, + http.StatusPreconditionFailed, + fmt.Errorf("missing %q header", consts.RequestHeaderName)) + return + } + + handler.ServeHTTP(w, r) + }) +} + +func (c *AgentCommand) notifySystemd(status string) { + sent, err := systemd.SdNotify(false, status) + if err != nil { + c.logger.Error("error notifying systemd", "error", err) + } else { + if sent { + c.logger.Debug("sent systemd notification", "notification", status) + } else { + c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) + } + } +} + +func (c *AgentCommand) setStringFlag(f *FlagSets, configVal string, fVar *StringVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + *fVar.Target = flagEnvValue + case configVal != "": + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +func (c *AgentCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { + var isFlagSet bool + f.Visit(func(f *flag.Flag) { + if f.Name == fVar.Name { + isFlagSet = true + } + }) + + flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) + switch { + case isFlagSet: + // Don't do anything as the flag is already set from the command line + case flagEnvSet: + // Use value from env var + *fVar.Target = flagEnvValue != "" + case configVal: + // Use value from config + *fVar.Target = configVal + default: + // Use the default value + *fVar.Target = fVar.Default + } +} + +// storePidFile is used to write out our PID to a file if necessary +func (c *AgentCommand) storePidFile(pidPath string) error { + // Quit fast if no pidfile + if pidPath == "" { + return nil + } + + // Open the PID file + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("could not open pid file: %w", err) + } + defer pidFile.Close() + + // Write out the PID + pid := os.Getpid() + _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("could not write to pid file: %w", err) + } + return nil +} + +// removePidFile is used to cleanup the PID file if necessary +func (c *AgentCommand) removePidFile(pidPath string) error { + if pidPath == "" { + return nil + } + return os.Remove(pidPath) +} + +func (c *AgentCommand) handleMetrics() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + logical.RespondError(w, http.StatusMethodNotAllowed, nil) + return + } + + if err := r.ParseForm(); err != nil { + logical.RespondError(w, http.StatusBadRequest, err) + return + } + + format := r.Form.Get("format") + if format == "" { + format = metricsutil.FormatFromRequest(&logical.Request{ + Headers: r.Header, + }) + } + + resp := c.metricsHelper.ResponseForFormat(format) + + status := resp.Data[logical.HTTPStatusCode].(int) + w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) + switch v := resp.Data[logical.HTTPRawBody].(type) { + case string: + w.WriteHeader(status) + w.Write([]byte(v)) + case []byte: + w.WriteHeader(status) + w.Write(v) + default: + logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) + } + }) +} + +func (c *AgentCommand) handleQuit(enabled bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !enabled { + w.WriteHeader(http.StatusNotFound) + return + } + + switch r.Method { + case http.MethodPost: + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + c.logger.Debug("received quit request") + close(c.ShutdownCh) + }) +} + +// newLogger creates a logger based on parsed config field on the Agent Command struct. +func (c *AgentCommand) newLogger() (hclog.InterceptLogger, error) { + if c.config == nil { + return nil, fmt.Errorf("cannot create logger, no config") + } + + var errs *multierror.Error + + // Parse all the log related config + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + errs = multierror.Append(errs, err) + } + + logFormat, err := logging.ParseLogFormat(c.config.LogFormat) + if err != nil { + errs = multierror.Append(errs, err) + } + + logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) + if err != nil { + errs = multierror.Append(errs, err) + } + + if errs != nil { + return nil, errs + } + + logCfg, err := logging.NewLogConfig(nameAgent) + if err != nil { + return nil, err + } + logCfg.Name = nameAgent + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = c.config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = c.config.LogRotateBytes + logCfg.LogRotateMaxFiles = c.config.LogRotateMaxFiles + + l, err := logging.Setup(logCfg, c.logWriter) + if err != nil { + return nil, err + } + + return l, nil +} + +// loadConfig attempts to generate an Agent config from the file(s) specified. +func (c *AgentCommand) loadConfig(paths []string) (*agentConfig.Config, error) { + var errs *multierror.Error + cfg := agentConfig.NewConfig() + + for _, configPath := range paths { + configFromPath, err := agentConfig.LoadConfig(configPath) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) + } else { + cfg = cfg.Merge(configFromPath) + } + } + + if errs != nil { + return nil, errs + } + + if err := cfg.ValidateConfig(); err != nil { + return nil, fmt.Errorf("error validating configuration: %w", err) + } + + return cfg, nil +} + +// reloadConfig will attempt to reload the config from file(s) and adjust certain +// config values without requiring a restart of the Vault Agent. +// If config is retrieved without error it is stored in the config field of the AgentCommand. +// This operation is not atomic and could result in updated config but partially applied config settings. +// The error returned from this func may be a multierror. +// This function will most likely be called due to Vault Agent receiving a SIGHUP signal. +// Currently only reloading the following are supported: +// * log level +// * TLS certs for listeners +func (c *AgentCommand) reloadConfig(paths []string) error { + // Notify systemd that the server is reloading + c.notifySystemd(systemd.SdNotifyReloading) + defer c.notifySystemd(systemd.SdNotifyReady) + + var errors error + + // Reload the config + cfg, err := c.loadConfig(paths) + if err != nil { + // Returning single error as we won't continue with bad config and won't 'commit' it. + return err + } + c.config = cfg + + // Update the log level + err = c.reloadLogLevel() + if err != nil { + errors = multierror.Append(errors, err) + } + + // Update certs + err = c.reloadCerts() + if err != nil { + errors = multierror.Append(errors, err) + } + + return errors +} + +// reloadLogLevel will attempt to update the log level for the logger attached +// to the AgentComment struct using the value currently set in config. +func (c *AgentCommand) reloadLogLevel() error { + logLevel, err := logging.ParseLogLevel(c.config.LogLevel) + if err != nil { + return err + } + + c.logger.SetLevel(logLevel) + + return nil +} + +// reloadCerts will attempt to reload certificates using a reload func which +// was provided when the listeners were configured, only funcs that were appended +// to the AgentCommand slice will be invoked. +// This function returns a multierror type so that every func can report an error +// if it encounters one. +func (c *AgentCommand) reloadCerts() error { + var errors error + + c.tlsReloadFuncsLock.RLock() + defer c.tlsReloadFuncsLock.RUnlock() + + for _, reloadFunc := range c.tlsReloadFuncs { + // Non-TLS listeners will have a nil reload func. + if reloadFunc != nil { + err := reloadFunc() + if err != nil { + errors = multierror.Append(errors, err) + } + } + } + + return errors +} + +// outputErrors will take an error or multierror and handle outputting each to the UI +func (c *AgentCommand) outputErrors(err error) { + if err != nil { + if me, ok := err.(*multierror.Error); ok { + for _, err := range me.Errors { + c.UI.Error(err.Error()) + } + } else { + c.UI.Error(err.Error()) + } + } +} diff --git a/changelog/24224.txt b/changelog/24224.txt new file mode 100644 index 000000000000..31bbcd255c4e --- /dev/null +++ b/changelog/24224.txt @@ -0,0 +1,442 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "fmt" + "io" + "os" + paths "path" + "sort" + "strings" + "unicode" + + "github.com/hashicorp/cli" + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/hashicorp/vault/api" + "github.com/mitchellh/go-homedir" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AgentGenerateConfigCommand)(nil) + _ cli.CommandAutocomplete = (*AgentGenerateConfigCommand)(nil) +) + +type AgentGenerateConfigCommand struct { + *BaseCommand + + flagType string + flagPaths []string + flagExec string +} + +func (c *AgentGenerateConfigCommand) Synopsis() string { + return "Generate a Vault Agent configuration file." +} + +func (c *AgentGenerateConfigCommand) Help() string { + helpText := ` +Usage: vault agent generate-config [options] [path/to/config.hcl] + + Generates a simple Vault Agent configuration file from the given parameters. + + Currently, the only supported configuration type is 'env-template', which + helps you generate a configuration file with environment variable templates + for running Vault Agent in process supervisor mode. + + For every specified secret -path, the command will attempt to generate one or + multiple 'env_template' entries based on the JSON key(s) stored in the + specified secret. If the secret -path ends with '/*', the command will + attempt to recurse through the secrets tree rooted at the given path, + generating 'env_template' entries for each encountered secret. Currently, + only kv-v1 and kv-v2 paths are supported. + + The command specified in the '-exec' option will be used to generate an + 'exec' entry, which will tell Vault Agent which child process to run. + + In addition to env_template entries, the command generates an 'auto_auth' + section with 'token_file' authentication method. While this method is very + convenient for local testing, it should NOT be used in production. Please + see https://developer.hashicorp.com/vault/docs/agent-and-proxy/autoauth/methods + for a list of production-ready auto_auth methods that you can use instead. + + By default, the file will be generated in the local directory as 'agent.hcl' + unless a path is specified as an argument. + + Generate a simple environment variable template configuration: + + $ vault agent generate-config -type="env-template" \ + -exec="./my-app arg1 arg2" \ + -path="secret/foo" + + Generate an environment variable template configuration for multiple secrets: + + $ vault agent generate-config -type="env-template" \ + -exec="./my-app arg1 arg2" \ + -path="secret/foo" \ + -path="secret/bar" \ + -path="secret/my-app/*" + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AgentGenerateConfigCommand) Flags() *FlagSets { + // Include client-modifying flags (-address, -namespace, etc.) + set := c.flagSet(FlagSetHTTP) + + // Common Options + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagType, + Usage: "Type of configuration file to generate; currently, only 'env-template' is supported.", + Completion: complete.PredictSet( + "env-template", + ), + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "path", + Target: &c.flagPaths, + Usage: "Path to a kv-v1 or kv-v2 secret (e.g. secret/data/foo, kv-v2/prefix/*); multiple secrets and tail '*' wildcards are allowed.", + Completion: c.PredictVaultFolders(), + }) + + f.StringVar(&StringVar{ + Name: "exec", + Target: &c.flagExec, + Default: "env", + Usage: "The command to execute in agent process supervisor mode.", + }) + + return set +} + +func (c *AgentGenerateConfigCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *AgentGenerateConfigCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AgentGenerateConfigCommand) Run(args []string) int { + flags := c.Flags() + + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = flags.Args() + + if len(args) > 1 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected at most 1, got %d)", len(args))) + return 1 + } + + if c.flagType == "" { + c.UI.Error(`Please specify a -type flag; currently only -type="env-template" is supported.`) + return 1 + } + + if c.flagType != "env-template" { + c.UI.Error(fmt.Sprintf(`%q is not a supported configuration type; currently only -type="env-template" is supported.`, c.flagType)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + config, err := generateConfiguration(context.Background(), client, c.flagExec, c.flagPaths) + if err != nil { + c.UI.Error(fmt.Sprintf("Error: %v", err)) + return 2 + } + + var configPath string + if len(args) == 1 { + configPath = args[0] + } else { + configPath = "agent.hcl" + } + + f, err := os.Create(configPath) + if err != nil { + c.UI.Error(fmt.Sprintf("Could not create configuration file %q: %v", configPath, err)) + return 3 + } + defer func() { + if err := f.Close(); err != nil { + c.UI.Error(fmt.Sprintf("Could not close configuration file %q: %v", configPath, err)) + } + }() + + if _, err := config.WriteTo(f); err != nil { + c.UI.Error(fmt.Sprintf("Could not write to configuration file %q: %v", configPath, err)) + return 3 + } + + c.UI.Info(fmt.Sprintf("Successfully generated %q configuration file!", configPath)) + + c.UI.Warn("Warning: the generated file uses 'token_file' authentication method, which is not suitable for production environments.") + + return 0 +} + +func generateConfiguration(ctx context.Context, client *api.Client, flagExec string, flagPaths []string) (io.WriterTo, error) { + var execCommand []string + if flagExec != "" { + execCommand = strings.Split(flagExec, " ") + } else { + execCommand = []string{"env"} + } + + tokenPath, err := homedir.Expand("~/.vault-token") + if err != nil { + return nil, fmt.Errorf("could not expand home directory: %w", err) + } + + templates, err := constructTemplates(ctx, client, flagPaths) + if err != nil { + return nil, fmt.Errorf("could not generate templates: %w", err) + } + + config := generatedConfig{ + AutoAuth: generatedConfigAutoAuth{ + Method: generatedConfigAutoAuthMethod{ + Type: "token_file", + Config: generatedConfigAutoAuthMethodConfig{ + TokenFilePath: tokenPath, + }, + }, + }, + TemplateConfig: generatedConfigTemplateConfig{ + StaticSecretRenderInterval: "5m", + ExitOnRetryFailure: true, + }, + Vault: generatedConfigVault{ + Address: client.Address(), + }, + Exec: generatedConfigExec{ + Command: execCommand, + RestartOnSecretChanges: "always", + RestartStopSignal: "SIGTERM", + }, + EnvTemplates: templates, + } + + contents := hclwrite.NewEmptyFile() + + gohcl.EncodeIntoBody(&config, contents.Body()) + + return contents, nil +} + +func constructTemplates(ctx context.Context, client *api.Client, paths []string) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + for _, path := range paths { + path = sanitizePath(path) + + mountPath, v2, err := isKVv2(path, client) + if err != nil { + return nil, fmt.Errorf("could not validate secret path %q: %w", path, err) + } + + switch { + case strings.HasSuffix(path, "/*"): + // this path contains a tail wildcard, attempt to walk the tree + t, err := constructTemplatesFromTree(ctx, client, path[:len(path)-2], mountPath, v2) + if err != nil { + return nil, fmt.Errorf("could not traverse sercet at %q: %w", path, err) + } + templates = append(templates, t...) + + case strings.Contains(path, "*"): + // don't allow any other wildcards + return nil, fmt.Errorf("the path %q cannot contain '*' wildcard characters except as the last element of the path", path) + + default: + // regular secret path + t, err := constructTemplatesFromSecret(ctx, client, path, mountPath, v2) + if err != nil { + return nil, fmt.Errorf("could not read secret at %q: %v", path, err) + } + templates = append(templates, t...) + } + } + + return templates, nil +} + +func constructTemplatesFromTree(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + if v2 { + metadataPath := strings.Replace( + path, + paths.Join(mountPath, "data"), + paths.Join(mountPath, "metadata"), + 1, + ) + if path != metadataPath { + path = metadataPath + } else { + path = addPrefixToKVPath(path, mountPath, "metadata", true) + } + } + + err := walkSecretsTree(ctx, client, path, func(child string, directory bool) error { + if directory { + return nil + } + + dataPath := strings.Replace( + child, + paths.Join(mountPath, "metadata"), + paths.Join(mountPath, "data"), + 1, + ) + + t, err := constructTemplatesFromSecret(ctx, client, dataPath, mountPath, v2) + if err != nil { + return err + } + templates = append(templates, t...) + + return nil + }) + if err != nil { + return nil, err + } + + return templates, nil +} + +func constructTemplatesFromSecret(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { + var templates []generatedConfigEnvTemplate + + if v2 { + path = addPrefixToKVPath(path, mountPath, "data", true) + } + + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil { + return nil, fmt.Errorf("error querying: %w", err) + } + if resp == nil { + return nil, fmt.Errorf("secret not found") + } + + var data map[string]interface{} + if v2 { + internal, ok := resp.Data["data"] + if !ok { + return nil, fmt.Errorf("secret.Data not found") + } + data = internal.(map[string]interface{}) + } else { + data = resp.Data + } + + fields := make([]string, 0, len(data)) + + for field := range data { + fields = append(fields, field) + } + + // sort for a deterministic output + sort.Strings(fields) + + var dataContents string + if v2 { + dataContents = ".Data.data" + } else { + dataContents = ".Data" + } + + for _, field := range fields { + templates = append(templates, generatedConfigEnvTemplate{ + Name: constructDefaultEnvironmentKey(path, field), + Contents: fmt.Sprintf(`{{ with secret "%s" }}{{ %s.%s }}{{ end }}`, path, dataContents, field), + ErrorOnMissingKey: true, + }) + } + + return templates, nil +} + +func constructDefaultEnvironmentKey(path string, field string) string { + pathParts := strings.Split(path, "/") + pathPartsLast := pathParts[len(pathParts)-1] + + notLetterOrNumber := func(r rune) bool { + return !unicode.IsLetter(r) && !unicode.IsNumber(r) + } + + p1 := strings.FieldsFunc(pathPartsLast, notLetterOrNumber) + p2 := strings.FieldsFunc(field, notLetterOrNumber) + + keyParts := append(p1, p2...) + + return strings.ToUpper(strings.Join(keyParts, "_")) +} + +// Below, we are redefining a subset of the configuration-related structures +// defined under command/agent/config. Using these structures we can tailor the +// output of the generated config, while using the original structures would +// have produced an HCL document with many empty fields. The structures below +// should not be used for anything other than generation. + +type generatedConfig struct { + AutoAuth generatedConfigAutoAuth `hcl:"auto_auth,block"` + TemplateConfig generatedConfigTemplateConfig `hcl:"template_config,block"` + Vault generatedConfigVault `hcl:"vault,block"` + EnvTemplates []generatedConfigEnvTemplate `hcl:"env_template,block"` + Exec generatedConfigExec `hcl:"exec,block"` +} + +type generatedConfigTemplateConfig struct { + StaticSecretRenderInterval string `hcl:"static_secret_render_interval"` + ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` +} + +type generatedConfigExec struct { + Command []string `hcl:"command"` + RestartOnSecretChanges string `hcl:"restart_on_secret_changes"` + RestartStopSignal string `hcl:"restart_stop_signal"` +} + +type generatedConfigEnvTemplate struct { + Name string `hcl:"name,label"` + Contents string `hcl:"contents,attr"` + ErrorOnMissingKey bool `hcl:"error_on_missing_key"` +} + +type generatedConfigVault struct { + Address string `hcl:"address"` +} + +type generatedConfigAutoAuth struct { + Method generatedConfigAutoAuthMethod `hcl:"method,block"` +} + +type generatedConfigAutoAuthMethod struct { + Type string `hcl:"type"` + Config generatedConfigAutoAuthMethodConfig `hcl:"config,block"` +} + +type generatedConfigAutoAuthMethodConfig struct { + TokenFilePath string `hcl:"token_file_path"` +} diff --git a/changelog/24236.txt b/changelog/24236.txt new file mode 100644 index 000000000000..8d12f6584d3d --- /dev/null +++ b/changelog/24236.txt @@ -0,0 +1,3188 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "bufio" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/go-hclog" + vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + credAppRole "github.com/hashicorp/vault/builtin/credential/approle" + "github.com/hashicorp/vault/command/agent" + agentConfig "github.com/hashicorp/vault/command/agent/config" + "github.com/hashicorp/vault/helper/testhelpers/minimal" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/logging" + "github.com/hashicorp/vault/sdk/helper/pointerutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + BasicHclConfig = ` +log_file = "TMPDIR/juan.log" +log_level="warn" +log_rotate_max_files=2 +log_rotate_bytes=1048576 +vault { + address = "http://127.0.0.1:8200" + retry { + num_retries = 5 + } +} + +listener "tcp" { + address = "127.0.0.1:8100" + tls_disable = false + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" +}` + BasicHclConfig2 = ` +log_file = "TMPDIR/juan.log" +log_level="debug" +log_rotate_max_files=-1 +log_rotate_bytes=1048576 +vault { + address = "http://127.0.0.1:8200" + retry { + num_retries = 5 + } +} + +listener "tcp" { + address = "127.0.0.1:8100" + tls_disable = false + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" +}` +) + +func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AgentCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + logger: logger, + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + } +} + +func TestAgent_ExitAfterAuth(t *testing.T) { + t.Run("via_config", func(t *testing.T) { + testAgentExitAfterAuth(t, false) + }) + + t.Run("via_flag", func(t *testing.T) { + testAgentExitAfterAuth(t, true) + }) +} + +func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { + logger := logging.NewVaultLogger(hclog.Trace) + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "jwt": vaultjwt.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + client := cluster.Cores[0].Client + + // Setup Vault + err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ + Type: "jwt", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ + "bound_issuer": "https://team-vault.auth0.com/", + "jwt_validation_pubkeys": agent.TestECDSAPubKey, + "jwt_supported_algs": "ES256", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ + "role_type": "jwt", + "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", + "bound_audiences": "https://vault.plugin.auth.jwt.test", + "user_claim": "https://vault/user", + "groups_claim": "https://vault/groups", + "policies": "test", + "period": "3s", + }) + if err != nil { + t.Fatal(err) + } + + inf, err := os.CreateTemp("", "auth.jwt.test.") + if err != nil { + t.Fatal(err) + } + in := inf.Name() + inf.Close() + os.Remove(in) + t.Logf("input: %s", in) + + sink1f, err := os.CreateTemp("", "sink1.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink1 := sink1f.Name() + sink1f.Close() + os.Remove(sink1) + t.Logf("sink1: %s", sink1) + + sink2f, err := os.CreateTemp("", "sink2.jwt.test.") + if err != nil { + t.Fatal(err) + } + sink2 := sink2f.Name() + sink2f.Close() + os.Remove(sink2) + t.Logf("sink2: %s", sink2) + + conff, err := os.CreateTemp("", "conf.jwt.test.") + if err != nil { + t.Fatal(err) + } + conf := conff.Name() + conff.Close() + os.Remove(conf) + t.Logf("config: %s", conf) + + jwtToken, _ := agent.GetTestJWT(t) + if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test jwt", "path", in) + } + + exitAfterAuthTemplText := "exit_after_auth = true" + if viaFlag { + exitAfterAuthTemplText = "" + } + + config := ` +%s + +auto_auth { + method { + type = "jwt" + config = { + role = "test" + path = "%s" + } + } + + sink { + type = "file" + config = { + path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +} +` + + config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) + if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { + t.Fatal(err) + } else { + logger.Trace("wrote test config", "path", conf) + } + + doneCh := make(chan struct{}) + go func() { + ui, cmd := testAgentCommand(t, logger) + cmd.client = client + + args := []string{"-config", conf} + if viaFlag { + args = append(args, "-exit-after-auth") + } + + code := cmd.Run(args) + if code != 0 { + t.Errorf("expected %d to be %d", code, 0) + t.Logf("output from agent:\n%s", ui.OutputWriter.String()) + t.Logf("error from agent:\n%s", ui.ErrorWriter.String()) + } + close(doneCh) + }() + + select { + case <-doneCh: + break + case <-time.After(1 * time.Minute): + t.Fatal("timeout reached while waiting for agent to exit") + } + + sink1Bytes, err := os.ReadFile(sink1) + if err != nil { + t.Fatal(err) + } + if len(sink1Bytes) == 0 { + t.Fatal("got no output from sink 1") + } + + sink2Bytes, err := os.ReadFile(sink2) + if err != nil { + t.Fatal(err) + } + if len(sink2Bytes) == 0 { + t.Fatal("got no output from sink 2") + } + + if string(sink1Bytes) != string(sink2Bytes) { + t.Fatal("sink 1/2 values don't match") + } +} + +func TestAgent_RequireRequestHeader(t *testing.T) { + // newApiClient creates an *api.Client. + newApiClient := func(addr string, includeVaultRequestHeader bool) *api.Client { + conf := api.DefaultConfig() + conf.Address = addr + cli, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + h := cli.Headers() + val, ok := h[consts.RequestHeaderName] + if !ok || !reflect.DeepEqual(val, []string{"true"}) { + t.Fatalf("invalid %s header", consts.RequestHeaderName) + } + if !includeVaultRequestHeader { + delete(h, consts.RequestHeaderName) + cli.SetHeaders(h) + } + + return cli + } + + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + + // Start a vault server + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + // Create a config file + config := ` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } +} + +cache { + use_auto_auth_token = true +} + +listener "tcp" { + address = "%s" + tls_disable = true +} +listener "tcp" { + address = "%s" + tls_disable = true + require_request_header = false +} +listener "tcp" { + address = "%s" + tls_disable = true + require_request_header = true +} +` + listenAddr1 := generateListenerAddress(t) + listenAddr2 := generateListenerAddress(t) + listenAddr3 := generateListenerAddress(t) + config = fmt.Sprintf( + config, + roleIDPath, + secretIDPath, + listenAddr1, + listenAddr2, + listenAddr3, + ) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + var output string + var code int + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code = cmd.Run([]string{"-config", configPath}) + if code != 0 { + output = ui.ErrorWriter.String() + ui.OutputWriter.String() + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + // defer agent shutdown + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + if code != 0 { + t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) + } + }() + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + // Test against a listener configuration that omits + // 'require_request_header', with the header missing from the request. + agentClient := newApiClient("http://"+listenAddr1, false) + req := agentClient.NewRequest("GET", "/v1/sys/health") + request(t, agentClient, req, 200) + + // Test against a listener configuration that sets 'require_request_header' + // to 'false', with the header missing from the request. + agentClient = newApiClient("http://"+listenAddr2, false) + req = agentClient.NewRequest("GET", "/v1/sys/health") + request(t, agentClient, req, 200) + + // Test against a listener configuration that sets 'require_request_header' + // to 'true', with the header missing from the request. + agentClient = newApiClient("http://"+listenAddr3, false) + req = agentClient.NewRequest("GET", "/v1/sys/health") + resp, err := agentClient.RawRequest(req) + if err == nil { + t.Fatalf("expected error") + } + if resp.StatusCode != http.StatusPreconditionFailed { + t.Fatalf("expected status code %d, not %d", http.StatusPreconditionFailed, resp.StatusCode) + } + + // Test against a listener configuration that sets 'require_request_header' + // to 'true', with an invalid header present in the request. + agentClient = newApiClient("http://"+listenAddr3, false) + h := agentClient.Headers() + h[consts.RequestHeaderName] = []string{"bogus"} + agentClient.SetHeaders(h) + req = agentClient.NewRequest("GET", "/v1/sys/health") + resp, err = agentClient.RawRequest(req) + if err == nil { + t.Fatalf("expected error") + } + if resp.StatusCode != http.StatusPreconditionFailed { + t.Fatalf("expected status code %d, not %d", http.StatusPreconditionFailed, resp.StatusCode) + } + + // Test against a listener configuration that sets 'require_request_header' + // to 'true', with the proper header present in the request. + agentClient = newApiClient("http://"+listenAddr3, true) + req = agentClient.NewRequest("GET", "/v1/sys/health") + request(t, agentClient, req, 200) +} + +// TestAgent_RequireAutoAuthWithForce ensures that the client exits with a +// non-zero code if configured to force the use of an auto-auth token without +// configuring the auto_auth block +func TestAgent_RequireAutoAuthWithForce(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + // Create a config file + config := fmt.Sprintf(` +cache { + use_auto_auth_token = "force" +} + +listener "tcp" { + address = "%s" + tls_disable = true +} +`, generateListenerAddress(t)) + + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + code := cmd.Run([]string{"-config", configPath}) + if code == 0 { + t.Errorf("expected error code, but got 0: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } +} + +// TestAgent_Template_UserAgent Validates that the User-Agent sent to Vault +// as part of Templating requests is correct. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_Template_UserAgent(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentTemplatingString() + h.pathToCheck = "/v1/secret/data" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, "TestAgent_Template_UserAgent") + if err != nil { + t.Fatal(err) + } + + // make some template files + var templatePaths []string + fileName := filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(fileName, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + templatePaths = append(templatePaths, fileName) + + // build up the template config to be added to the Agent config.hcl file + var templateConfigStrings []string + for i, t := range templatePaths { + index := fmt.Sprintf("render_%d.json", i) + s := fmt.Sprintf(templateConfigString, t, tmpDir, index) + templateConfigStrings = append(templateConfigStrings, s) + } + + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s +` + + // flatten the template configs + templateConfig := strings.Join(templateConfigStrings, " ") + + config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // We need to shut down the Agent command + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + verify := func(suffix string) { + t.Helper() + // We need to poll for a bit to give Agent time to render the + // templates. Without this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + var err error + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + // Check for files rendered in the directory and break + // early for shutdown if we do have all the files + // rendered + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { + err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) + continue + } + + for i := range templatePaths { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) + var c []byte + c, err = os.ReadFile(fileName) + if err != nil { + continue + } + if string(c) != templateRendered(i)+suffix { + err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) + continue + } + } + return + } + } + + verify("") + + fileName = filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(fileName, []byte(templateContents(0)+"{}"), 0o600); err != nil { + t.Fatal(err) + } + + verify("{}") +} + +// TestAgent_Template tests rendering templates +func TestAgent_Template_Basic(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + // start test cases here + testCases := map[string]struct { + templateCount int + exitAfterAuth bool + }{ + "one": { + templateCount: 1, + }, + "one_with_exit": { + templateCount: 1, + exitAfterAuth: true, + }, + "many": { + templateCount: 15, + }, + "many_with_exit": { + templateCount: 13, + exitAfterAuth: true, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) + if err != nil { + t.Fatal(err) + } + + // make some template files + var templatePaths []string + for i := 0; i < tc.templateCount; i++ { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) + if err := os.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { + t.Fatal(err) + } + templatePaths = append(templatePaths, fileName) + } + + // build up the template config to be added to the Agent config.hcl file + var templateConfigStrings []string + for i, t := range templatePaths { + index := fmt.Sprintf("render_%d.json", i) + s := fmt.Sprintf(templateConfigString, t, tmpDir, index) + templateConfigStrings = append(templateConfigStrings, s) + } + + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s + +%s +` + + // conditionally set the exit_after_auth flag + exitAfterAuth := "" + if tc.exitAfterAuth { + exitAfterAuth = "exit_after_auth = true" + } + + // flatten the template configs + templateConfig := strings.Join(templateConfigStrings, " ") + + config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig, exitAfterAuth) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // if using exit_after_auth, then the command will have returned at the + // end and no longer be running. If we are not using exit_after_auth, then + // we need to shut down the command + if !tc.exitAfterAuth { + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + } + + verify := func(suffix string) { + t.Helper() + // We need to poll for a bit to give Agent time to render the + // templates. Without this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + var err error + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + // Check for files rendered in the directory and break + // early for shutdown if we do have all the files + // rendered + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { + err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) + continue + } + + for i := range templatePaths { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) + var c []byte + c, err = os.ReadFile(fileName) + if err != nil { + continue + } + if string(c) != templateRendered(i)+suffix { + err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) + continue + } + } + return + } + } + + verify("") + + for i := 0; i < tc.templateCount; i++ { + fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) + if err := os.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0o600); err != nil { + t.Fatal(err) + } + } + + verify("{}") + }) + } +} + +func setupAppRole(t *testing.T, serverClient *api.Client) (string, string) { + t.Helper() + // Enable the approle auth method + req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") + req.BodyBytes = []byte(`{ + "type": "approle" + }`) + request(t, serverClient, req, 204) + + // Create a named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") + req.BodyBytes = []byte(`{ + "token_ttl": "5m", + "token_policies":"default,myapp-read", + "policies":"default,myapp-read" + }`) + request(t, serverClient, req, 204) + + // Fetch the RoleID of the named role + req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") + body := request(t, serverClient, req, 200) + data := body["data"].(map[string]interface{}) + roleID := data["role_id"].(string) + + // Get a SecretID issued against the named role + req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") + body = request(t, serverClient, req, 200) + data = body["data"].(map[string]interface{}) + secretID := data["secret_id"].(string) + + // Write the RoleID and SecretID to temp files + roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") + secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") + t.Cleanup(func() { + os.Remove(roleIDPath) + os.Remove(secretIDPath) + }) + + return roleIDPath, secretIDPath +} + +func setupAppRoleAndKVMounts(t *testing.T, serverClient *api.Client) (string, string) { + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + // give test-role permissions to read the kv secret + req := serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") + req.BodyBytes = []byte(`{ + "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" + }`) + request(t, serverClient, req, 204) + + // setup the kv secrets + req = serverClient.NewRequest("POST", "/v1/sys/mounts/secret/tune") + req.BodyBytes = []byte(`{ + "options": {"version": "2"} + }`) + request(t, serverClient, req, 200) + + // Secret: myapp + req = serverClient.NewRequest("POST", "/v1/secret/data/myapp") + req.BodyBytes = []byte(`{ + "data": { + "username": "bar", + "password": "zap" + } + }`) + request(t, serverClient, req, 200) + + // Secret: myapp2 + req = serverClient.NewRequest("POST", "/v1/secret/data/myapp2") + req.BodyBytes = []byte(`{ + "data": { + "username": "barstuff", + "password": "zap" + } + }`) + request(t, serverClient, req, 200) + + // Secret: otherapp + req = serverClient.NewRequest("POST", "/v1/secret/data/otherapp") + req.BodyBytes = []byte(`{ + "data": { + "username": "barstuff", + "password": "zap", + "cert": "something" + } + }`) + request(t, serverClient, req, 200) + + return roleIDPath, secretIDPath +} + +// TestAgent_Template_VaultClientFromEnv tests that Vault Agent can read in its +// required `vault` client details from environment variables instead of config. +func TestAgent_Template_VaultClientFromEnv(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + vaultAddr := "https://" + cluster.Cores[0].Listeners[0].Address.String() + testCases := map[string]struct { + env map[string]string + }{ + "VAULT_ADDR and VAULT_CACERT": { + env: map[string]string{ + api.EnvVaultAddress: vaultAddr, + api.EnvVaultCACert: cluster.CACertPEMFile, + }, + }, + "VAULT_ADDR and VAULT_CACERT_BYTES": { + env: map[string]string{ + api.EnvVaultAddress: vaultAddr, + api.EnvVaultCACertBytes: string(cluster.CACertPEM), + }, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + for k, v := range tc.env { + t.Setenv(k, v) + } + tmpDir := t.TempDir() + + // Make a template. + templateFile := filepath.Join(tmpDir, "render.tmpl") + if err := os.WriteFile(templateFile, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + + // build up the template config to be added to the Agent config.hcl file + targetFile := filepath.Join(tmpDir, "render.json") + templateConfig := fmt.Sprintf(` +template { + source = "%s" + destination = "%s" +} + `, templateFile, targetFile) + + // Create a config file + config := ` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +%s +` + + config = fmt.Sprintf(config, roleIDPath, secretIDPath, templateConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + cmd.client = serverClient + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + code := cmd.Run([]string{"-config", configPath}) + if code != 0 { + t.Errorf("non-zero return code when running agent: %d", code) + t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) + t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + defer func() { + cmd.ShutdownCh <- struct{}{} + wg.Wait() + }() + + // We need to poll for a bit to give Agent time to render the + // templates. Without this this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(10 * time.Second) + for { + select { + case <-timeout: + t.Fatalf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + + contents, err := os.ReadFile(targetFile) + if err != nil { + // If the file simply doesn't exist, continue waiting for + // the template rendering to complete. + if os.IsNotExist(err) { + continue + } + t.Fatal(err) + } + + if string(contents) != templateRendered(0) { + t.Fatalf("expected=%q, got=%q", templateRendered(0), string(contents)) + } + + // Success! Break out of the retry loop. + break + } + }) + } +} + +func testListFiles(t *testing.T, dir, extension string) int { + t.Helper() + + files, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + var count int + for _, f := range files { + if filepath.Ext(f.Name()) == extension { + count++ + } + } + + return count +} + +// TestAgent_Template_ExitCounter tests that Vault Agent correctly renders all +// templates before exiting when the configuration uses exit_after_auth. This is +// similar to TestAgent_Template_Basic, but differs by using a consistent number +// of secrets from multiple sources, where as the basic test could possibly +// generate a random number of secrets, but all using the same source. This test +// reproduces https://github.com/hashicorp/vault/issues/7883 +func TestAgent_Template_ExitCounter(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Setenv(api.EnvVaultAddress, serverClient.Address()) + + roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, "agent-test") + if err != nil { + t.Fatal(err) + } + + // Create a config file + config := ` +vault { + address = "%s" + tls_skip_verify = true +} + +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} + +template { + contents = "{{ with secret \"secret/myapp\" }}{{ range $k, $v := .Data.data }}{{ $v }}{{ end }}{{ end }}" + destination = "%s/render-pass.txt" +} + +template { + contents = "{{ with secret \"secret/myapp2\" }}{{ .Data.data.username}}{{ end }}" + destination = "%s/render-user.txt" +} + +template { + contents = < 0 { + h.failCount-- + h.t.Logf("%s failing GET request on %s, failures left: %d", time.Now(), req.URL.Path, h.failCount) + resp.WriteHeader(500) + return + } + h.t.Logf("passing GET request on %s", req.URL.Path) + } + vaulthttp.Handler.Handler(h.props).ServeHTTP(resp, req) +} + +// userAgentHandler makes it easy to test the User-Agent header received +// by Vault +type userAgentHandler struct { + props *vault.HandlerProperties + failCount int + userAgentToCheckFor string + pathToCheck string + requestMethodToCheck string + t *testing.T +} + +func (h *userAgentHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if req.Method == h.requestMethodToCheck && strings.Contains(req.RequestURI, h.pathToCheck) { + userAgent := req.UserAgent() + if !(userAgent == h.userAgentToCheckFor) { + h.t.Fatalf("User-Agent string not as expected. Expected to find %s, got %s", h.userAgentToCheckFor, userAgent) + } + } + vaulthttp.Handler.Handler(h.props).ServeHTTP(w, req) +} + +// TestAgent_Template_Retry verifies that the template server retries requests +// based on retry configuration. +func TestAgent_Template_Retry(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h handler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + methodConf, cleanup := prepAgentApproleKV(t, serverClient) + defer cleanup() + + err := serverClient.Sys().TuneMount("secret", api.MountConfigInput{ + Options: map[string]string{ + "version": "2", + }, + }) + if err != nil { + t.Fatal(err) + } + + _, err = serverClient.Logical().Write("secret/data/otherapp", map[string]interface{}{ + "data": map[string]interface{}{ + "username": "barstuff", + "password": "zap", + "cert": "something", + }, + }) + if err != nil { + t.Fatal(err) + } + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + intRef := func(i int) *int { + return &i + } + // start test cases here + testCases := map[string]struct { + retries *int + expectError bool + }{ + "none": { + retries: intRef(-1), + expectError: true, + }, + "one": { + retries: intRef(1), + expectError: true, + }, + "two": { + retries: intRef(2), + expectError: false, + }, + "missing": { + retries: nil, + expectError: false, + }, + "default": { + retries: intRef(0), + expectError: false, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + // We fail the first 6 times. The consul-template code creates + // a Vault client with MaxRetries=2, so for every consul-template + // retry configured, it will in practice make up to 3 requests. + // Thus if consul-template is configured with "one" retry, it will + // fail given our failCount, but if configured with "two" retries, + // they will consume our 6th failure, and on the "third (from its + // perspective) attempt, it will succeed. + h.failCount = 6 + + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) + if err != nil { + t.Fatal(err) + } + + // make some template files + templatePath := filepath.Join(tmpDir, "render_0.tmpl") + if err := os.WriteFile(templatePath, []byte(templateContents(0)), 0o600); err != nil { + t.Fatal(err) + } + templateConfig := fmt.Sprintf(templateConfigString, templatePath, tmpDir, "render_0.json") + + var retryConf string + if tc.retries != nil { + retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) + } + + config := fmt.Sprintf(` +%s +vault { + address = "%s" + %s + tls_skip_verify = true +} +%s +template_config { + exit_on_retry_failure = true +} +`, methodConf, serverClient.Address(), retryConf, templateConfig) + + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + var code int + go func() { + code = cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + verify := func() error { + t.Helper() + // We need to poll for a bit to give Agent time to render the + // templates. Without this this, the test will attempt to read + // the temp dir before Agent has had time to render and will + // likely fail the test + tick := time.Tick(1 * time.Second) + timeout := time.After(15 * time.Second) + var err error + for { + select { + case <-timeout: + return fmt.Errorf("timed out waiting for templates to render, last error: %v", err) + case <-tick: + } + // Check for files rendered in the directory and break + // early for shutdown if we do have all the files + // rendered + + //---------------------------------------------------- + // Perform the tests + //---------------------------------------------------- + + if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != 1 { + err = fmt.Errorf("expected 1 template, got (%d)", numFiles) + continue + } + + fileName := filepath.Join(tmpDir, "render_0.json") + var c []byte + c, err = os.ReadFile(fileName) + if err != nil { + continue + } + if string(c) != templateRendered(0) { + err = fmt.Errorf("expected=%q, got=%q", templateRendered(0), string(c)) + continue + } + return nil + } + } + + err = verify() + close(cmd.ShutdownCh) + wg.Wait() + + switch { + case (code != 0 || err != nil) && tc.expectError: + case code == 0 && err == nil && !tc.expectError: + default: + t.Fatalf("%s expectError=%v error=%v code=%d", tcname, tc.expectError, err, code) + } + }) + } +} + +// prepAgentApproleKV configures a Vault instance for approle authentication, +// such that the resulting token will have global permissions across /kv +// and /secret mounts. Returns the auto_auth config stanza to setup an Agent +// to connect using approle. +func prepAgentApproleKV(t *testing.T, client *api.Client) (string, func()) { + t.Helper() + + policyAutoAuthAppRole := ` +path "/kv/*" { + capabilities = ["create", "read", "update", "delete", "list"] +} +path "/secret/*" { + capabilities = ["create", "read", "update", "delete", "list"] +} +` + // Add an kv-admin policy + if err := client.Sys().PutPolicy("test-autoauth", policyAutoAuthAppRole); err != nil { + t.Fatal(err) + } + + // Enable approle + err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ + Type: "approle", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().Write("auth/approle/role/test1", map[string]interface{}{ + "bind_secret_id": "true", + "token_ttl": "1h", + "token_max_ttl": "2h", + "policies": []string{"test-autoauth"}, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) + if err != nil { + t.Fatal(err) + } + secretID := resp.Data["secret_id"].(string) + secretIDFile := makeTempFile(t, "secret_id.txt", secretID+"\n") + + resp, err = client.Logical().Read("auth/approle/role/test1/role-id") + if err != nil { + t.Fatal(err) + } + roleID := resp.Data["role_id"].(string) + roleIDFile := makeTempFile(t, "role_id.txt", roleID+"\n") + + config := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + remove_secret_id_file_after_reading = false + } + } +} +`, roleIDFile, secretIDFile) + + cleanup := func() { + _ = os.Remove(roleIDFile) + _ = os.Remove(secretIDFile) + } + return config, cleanup +} + +// TestAgent_AutoAuth_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct when performing Auto-Auth. +// Uses the custom handler userAgentHandler (defined above) so +// that Vault validates the User-Agent on requests sent by Agent. +func TestAgent_AutoAuth_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + var h userAgentHandler + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + }, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentAutoAuthString() + h.requestMethodToCheck = "PUT" + h.pathToCheck = "auth/approle/login" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Enable the approle auth method + roleIDPath, secretIDPath := setupAppRole(t, serverClient) + + sinkf, err := os.CreateTemp("", "sink.test.") + if err != nil { + t.Fatal(err) + } + sink := sinkf.Name() + sinkf.Close() + os.Remove(sink) + + autoAuthConfig := fmt.Sprintf(` +auto_auth { + method "approle" { + mount_path = "auth/approle" + config = { + role_id_file_path = "%s" + secret_id_file_path = "%s" + } + } + + sink "file" { + config = { + path = "%s" + } + } +}`, roleIDPath, secretIDPath, sink) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +api_proxy { + use_auto_auth_token = true +} +%s +%s +`, serverClient.Address(), listenConfig, autoAuthConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Validate that the auto-auth token has been correctly attained + // and works for LookupSelf + conf := api.DefaultConfig() + conf.Address = "http://" + listenAddr + agentClient, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %s", err) + } + + agentClient.SetToken("") + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + // Wait for the token to be sent to syncs and be available to be used + time.Sleep(5 * time.Second) + + req := agentClient.NewRequest("GET", "/v1/auth/token/lookup-self") + request(t, agentClient, req, 200) + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestAgent_APIProxyWithoutCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct using the API proxy without +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_APIProxyWithoutCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +`, serverClient.Address(), listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + agentClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + agentClient.AddHeader("User-Agent", userAgentForProxiedClient) + agentClient.SetToken(serverClient.Token()) + agentClient.SetMaxRetries(0) + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = agentClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +// TestAgent_APIProxyWithCache_UserAgent tests that the User-Agent sent +// to Vault by Vault Agent is correct using the API proxy with +// the cache configured. Uses the custom handler +// userAgentHandler struct defined in this test package, so that Vault validates the +// User-Agent on requests sent by Agent. +func TestAgent_APIProxyWithCache_UserAgent(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + userAgentForProxiedClient := "proxied-client" + var h userAgentHandler + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc( + func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) + h.pathToCheck = "/v1/auth/token/lookup-self" + h.requestMethodToCheck = "GET" + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + cacheConfig := ` +cache { +}` + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), listenConfig, cacheConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + agentClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + agentClient.AddHeader("User-Agent", userAgentForProxiedClient) + agentClient.SetToken(serverClient.Token()) + agentClient.SetMaxRetries(0) + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + _, err = agentClient.Auth().Token().LookupSelf() + if err != nil { + t.Fatal(err) + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +func TestAgent_Cache_DynamicSecret(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + agentClient, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + agentClient.SetToken(serverClient.Token()) + agentClient.SetMaxRetries(0) + err = agentClient.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + + renewable := true + tokenCreateRequest := &api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + Renewable: &renewable, + } + + // This was the simplest test I could find to trigger the caching behaviour, + // i.e. the most concise I could make the test that I can tell + // creating an orphan token returns Auth, is renewable, and isn't a token + // that's managed elsewhere (since it's an orphan) + secret, err := agentClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token := secret.Auth.ClientToken + + secret, err = agentClient.Auth().Token().CreateOrphan(tokenCreateRequest) + if err != nil { + t.Fatal(err) + } + if secret == nil || secret.Auth == nil { + t.Fatalf("secret not as expected: %v", secret) + } + + token2 := secret.Auth.ClientToken + + if token != token2 { + t.Fatalf("token create response not cached when it should have been, as tokens differ") + } + + close(cmd.ShutdownCh) + wg.Wait() +} + +func TestAgent_ApiProxy_Retry(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + var h handler + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler { + h.props = properties + h.t = t + return &h + }), + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{ + "bar": "baz", + }) + if err != nil { + t.Fatal(err) + } + + intRef := func(i int) *int { + return &i + } + // start test cases here + testCases := map[string]struct { + retries *int + expectError bool + }{ + "none": { + retries: intRef(-1), + expectError: true, + }, + "one": { + retries: intRef(1), + expectError: true, + }, + "two": { + retries: intRef(2), + expectError: false, + }, + "missing": { + retries: nil, + expectError: false, + }, + "default": { + retries: intRef(0), + expectError: false, + }, + } + + for tcname, tc := range testCases { + t.Run(tcname, func(t *testing.T) { + h.failCount = 2 + + cacheConfig := ` +cache { +} +` + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + var retryConf string + if tc.retries != nil { + retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) + } + + config := fmt.Sprintf(` +vault { + address = "%s" + %s + tls_skip_verify = true +} +%s +%s +`, serverClient.Address(), retryConf, cacheConfig, listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + _, cmd := testAgentCommand(t, logger) + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + cmd.Run([]string{"-config", configPath}) + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + t.Fatal(err) + } + client.SetToken(serverClient.Token()) + client.SetMaxRetries(0) + err = client.SetAddress("http://" + listenAddr) + if err != nil { + t.Fatal(err) + } + secret, err := client.Logical().Read("secret/foo") + switch { + case (err != nil || secret == nil) && tc.expectError: + case (err == nil || secret != nil) && !tc.expectError: + default: + t.Fatalf("%s expectError=%v error=%v secret=%v", tcname, tc.expectError, err, secret) + } + if secret != nil && secret.Data["foo"] != nil { + val := secret.Data["foo"].(map[string]interface{}) + if !reflect.DeepEqual(val, map[string]interface{}{"bar": "baz"}) { + t.Fatalf("expected key 'foo' to yield bar=baz, got: %v", val) + } + } + time.Sleep(time.Second) + + close(cmd.ShutdownCh) + wg.Wait() + }) + } +} + +func TestAgent_TemplateConfig_ExitOnRetryFailure(t *testing.T) { + //---------------------------------------------------- + // Start the server and agent + //---------------------------------------------------- + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, + &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "approle": credAppRole.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "kv": logicalKv.Factory, + }, + }, + &vault.TestClusterOptions{ + NumCores: 1, + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + vault.TestWaitActive(t, cluster.Cores[0].Core) + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + autoAuthConfig, cleanup := prepAgentApproleKV(t, serverClient) + defer cleanup() + + err := serverClient.Sys().TuneMount("secret", api.MountConfigInput{ + Options: map[string]string{ + "version": "2", + }, + }) + if err != nil { + t.Fatal(err) + } + + _, err = serverClient.Logical().Write("secret/data/otherapp", map[string]interface{}{ + "data": map[string]interface{}{ + "username": "barstuff", + "password": "zap", + "cert": "something", + }, + }) + if err != nil { + t.Fatal(err) + } + + // make a temp directory to hold renders. Each test will create a temp dir + // inside this one + tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDirRoot) + + // Note that missing key is different from a non-existent secret. A missing + // key (2xx response with missing keys in the response map) can still yield + // a successful render unless error_on_missing_key is specified, whereas a + // missing secret (4xx response) always results in an error. + missingKeyTemplateContent := `{{- with secret "secret/otherapp"}}{"secret": "other", +{{- if .Data.data.foo}}"foo":"{{ .Data.data.foo}}"{{- end }}} +{{- end }}` + missingKeyTemplateRender := `{"secret": "other",}` + + badTemplateContent := `{{- with secret "secret/non-existent"}}{"secret": "other", +{{- if .Data.data.foo}}"foo":"{{ .Data.data.foo}}"{{- end }}} +{{- end }}` + + testCases := map[string]struct { + exitOnRetryFailure *bool + templateContents string + expectTemplateRender string + templateErrorOnMissingKey bool + expectError bool + expectExitFromError bool + }{ + "true, no template error": { + exitOnRetryFailure: pointerutil.BoolPtr(true), + templateContents: templateContents(0), + expectTemplateRender: templateRendered(0), + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + "true, with non-existent secret": { + exitOnRetryFailure: pointerutil.BoolPtr(true), + templateContents: badTemplateContent, + expectTemplateRender: "", + templateErrorOnMissingKey: false, + expectError: true, + expectExitFromError: true, + }, + "true, with missing key": { + exitOnRetryFailure: pointerutil.BoolPtr(true), + templateContents: missingKeyTemplateContent, + expectTemplateRender: missingKeyTemplateRender, + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + "true, with missing key, with error_on_missing_key": { + exitOnRetryFailure: pointerutil.BoolPtr(true), + templateContents: missingKeyTemplateContent, + expectTemplateRender: "", + templateErrorOnMissingKey: true, + expectError: true, + expectExitFromError: true, + }, + "false, no template error": { + exitOnRetryFailure: pointerutil.BoolPtr(false), + templateContents: templateContents(0), + expectTemplateRender: templateRendered(0), + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + "false, with non-existent secret": { + exitOnRetryFailure: pointerutil.BoolPtr(false), + templateContents: badTemplateContent, + expectTemplateRender: "", + templateErrorOnMissingKey: false, + expectError: true, + expectExitFromError: false, + }, + "false, with missing key": { + exitOnRetryFailure: pointerutil.BoolPtr(false), + templateContents: missingKeyTemplateContent, + expectTemplateRender: missingKeyTemplateRender, + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + "false, with missing key, with error_on_missing_key": { + exitOnRetryFailure: pointerutil.BoolPtr(false), + templateContents: missingKeyTemplateContent, + expectTemplateRender: missingKeyTemplateRender, + templateErrorOnMissingKey: true, + expectError: true, + expectExitFromError: false, + }, + "missing": { + exitOnRetryFailure: nil, + templateContents: templateContents(0), + expectTemplateRender: templateRendered(0), + templateErrorOnMissingKey: false, + expectError: false, + expectExitFromError: false, + }, + } + + for tcName, tc := range testCases { + t.Run(tcName, func(t *testing.T) { + // create temp dir for this test run + tmpDir, err := os.MkdirTemp(tmpDirRoot, tcName) + if err != nil { + t.Fatal(err) + } + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + var exitOnRetryFailure string + if tc.exitOnRetryFailure != nil { + exitOnRetryFailure = fmt.Sprintf("exit_on_retry_failure = %t", *tc.exitOnRetryFailure) + } + templateConfig := fmt.Sprintf(` +template_config = { + %s +} +`, exitOnRetryFailure) + + template := fmt.Sprintf(` +template { + contents = < 0, "no files were found") + + for _, p := range m { + f, err := os.Open(p) + require.NoError(t, err) + + fs := bufio.NewScanner(f) + fs.Split(bufio.ScanLines) + + for fs.Scan() { + s := fs.Text() + entry := make(map[string]string) + err := json.Unmarshal([]byte(s), &entry) + require.NoError(t, err) + v, ok := entry["@message"] + if !ok { + continue + } + if v == runnerLogMessage { + found = true + break + } + } + } + + require.Truef(t, found, "unable to find consul-template partial message in logs", runnerLogMessage) +} + +// Get a randomly assigned port and then free it again before returning it. +// There is still a race when trying to use it, but should work better +// than a static port. +func generateListenerAddress(t *testing.T) string { + t.Helper() + + ln1, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + listenAddr := ln1.Addr().String() + ln1.Close() + return listenAddr +} diff --git a/changelog/24238.txt b/changelog/24238.txt new file mode 100644 index 000000000000..a992257ede3f --- /dev/null +++ b/changelog/24238.txt @@ -0,0 +1,102 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, settled, find, waitUntil } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; + +const SHARED_STYLES = { + success: { + icon: 'check-circle-fill', + class: 'hds-alert--color-success', + }, + warning: { + icon: 'alert-triangle-fill', + class: 'hds-alert--color-warning', + }, +}; +module('Integration | Component | alert-inline', function (hooks) { + setupRenderingTest(hooks); + + test('it renders alert message for each @color arg', async function (assert) { + const COLORS = { + ...SHARED_STYLES, + neutral: { + icon: 'info-fill', + class: 'hds-alert--color-neutral', + }, + highlight: { + icon: 'info-fill', + class: 'hds-alert--color-highlight', + }, + critical: { + icon: 'alert-diamond-fill', + class: 'hds-alert--color-critical', + }, + }; + + const { neutral } = COLORS; // default color + await render(hbs``); + assert.dom('[data-test-inline-error-message]').hasText('some very important alert'); + assert.dom(`[data-test-icon="${neutral.icon}"]`).exists('renders default icon'); + assert.dom('[data-test-inline-alert]').hasClass(neutral.class, 'renders default class'); + + // assert deprecated @type arg values map to expected color + for (const type in COLORS) { + this.color = type; + const color = COLORS[type]; + await render(hbs``); + assert.dom(`[data-test-icon="${color.icon}"]`).exists(`@color="${type}" renders icon: ${color.icon}`); + assert + .dom('[data-test-inline-alert]') + .hasClass(color.class, `@color="${type}" renders class: ${color.class}`); + } + }); + + test('it renders alert color for each deprecated @type arg', async function (assert) { + const OLD_TYPES = { + ...SHARED_STYLES, + info: { + icon: 'info-fill', + class: 'hds-alert--color-highlight', + }, + danger: { + icon: 'alert-diamond-fill', + class: 'hds-alert--color-critical', + }, + }; + // assert deprecated @type arg values map to expected color + for (const type in OLD_TYPES) { + this.type = type; + const color = OLD_TYPES[type]; + await render(hbs``); + assert + .dom(`[data-test-icon="${color.icon}"]`) + .exists(`deprecated @type="${type}" renders icon: ${color.icon}`); + assert + .dom('[data-test-inline-alert]') + .hasClass(color.class, `deprecated @type="${type}" renders class: ${color.class}`); + } + }); + + test('it mimics loading when message changes', async function (assert) { + this.message = 'some very important alert'; + await render(hbs` + + `); + assert + .dom('[data-test-inline-error-message]') + .hasText('some very important alert', 'it renders original message'); + + this.set('message', 'some changed alert!!!'); + await waitUntil(() => find('[data-test-icon="loading"]')); + assert.ok(find('[data-test-icon="loading"]'), 'it shows loading icon when message changes'); + await settled(); + assert + .dom('[data-test-inline-error-message]') + .hasText('some changed alert!!!', 'it shows updated message'); + }); +}); diff --git a/changelog/24246.txt b/changelog/24246.txt new file mode 100644 index 000000000000..0496ce57d621 --- /dev/null +++ b/changelog/24246.txt @@ -0,0 +1,18 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + + {{#unless this.isRefreshing}} + {{@message}} + {{/unless}} + \ No newline at end of file diff --git a/changelog/24250.txt b/changelog/24250.txt new file mode 100644 index 000000000000..8a25a6fc286b --- /dev/null +++ b/changelog/24250.txt @@ -0,0 +1,68 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import Component from '@glimmer/component'; +import { action } from '@ember/object'; +import { later } from '@ember/runloop'; +import { tracked } from '@glimmer/tracking'; +import { assert } from '@ember/debug'; + +/** + * @module AlertInline + * * Use Hds::Alert @type="compact" for displaying alert messages. + * This component renders a compact Hds::Alert that displays a loading icon if the + * @message arg changes and then re-renders the updated @message text. + * (Example: submitting a form and displaying the number of errors because on re-submit the number may change) + * + * @example + * ``` + * + * ``` + * + * @deprecated {string} type - color getter maps type to the Hds::Alert @color + * @param {string} color - Styles alert color and icon, can be one of: critical, warning, success, highlight, neutral + * @param {string} message - The message to display within the alert. + */ + +export default class AlertInlineComponent extends Component { + @tracked isRefreshing = false; + + constructor() { + super(...arguments); + assert('@type arg is deprecated, pass @color="critical" instead', this.args.type !== 'critical'); + if (this.args.color) { + const possibleColors = ['critical', 'warning', 'success', 'highlight', 'neutral']; + assert( + `${this.args.color} is not a valid color. @color must be one of: ${possibleColors.join(', ')}`, + possibleColors.includes(this.args.color) + ); + } + } + + get color() { + if (this.args.color) return this.args.color; + // @type arg is deprecated, this is for backward compatibility of old implementation + switch (this.args.type) { + case 'danger': + return 'critical'; + case 'success': + return 'success'; + case 'warning': + return 'warning'; + case 'info': + return 'highlight'; + default: + return 'neutral'; + } + } + + @action + refresh() { + this.isRefreshing = true; + later(() => { + this.isRefreshing = false; + }, 200); + } +} diff --git a/changelog/24252.txt b/changelog/24252.txt new file mode 100644 index 000000000000..cc9ee5555101 --- /dev/null +++ b/changelog/24252.txt @@ -0,0 +1,47 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +const selectors = { + versionDisplay: '[data-test-footer-version]', + upgradeLink: '[data-test-footer-upgrade-link]', + docsLink: '[data-test-footer-documentation-link]', +}; + +module('Integration | Component | app-footer', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.versionSvc = this.owner.lookup('service:version'); + }); + + test('it renders a sane default', async function (assert) { + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault', 'Vault without version by default'); + assert.dom(selectors.upgradeLink).hasText('Upgrade to Vault Enterprise', 'upgrade link shows'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); + + test('it renders for community version', async function (assert) { + this.versionSvc.version = '1.15.1'; + this.versionSvc.type = 'community'; + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault 1.15.1', 'Vault shows version when available'); + assert.dom(selectors.upgradeLink).hasText('Upgrade to Vault Enterprise', 'upgrade link shows'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); + test('it renders for ent version', async function (assert) { + this.versionSvc.version = '1.15.1+hsm'; + this.versionSvc.type = 'enterprise'; + await render(hbs``); + assert.dom(selectors.versionDisplay).hasText('Vault 1.15.1+hsm', 'shows version when available'); + assert.dom(selectors.upgradeLink).doesNotExist('upgrade link not shown'); + assert.dom(selectors.docsLink).hasText('Documentation', 'displays docs link'); + }); +}); diff --git a/changelog/24256.txt b/changelog/24256.txt new file mode 100644 index 000000000000..616feb27d332 --- /dev/null +++ b/changelog/24256.txt @@ -0,0 +1,31 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + + + {{#if this.isDevelopment}} +
+
+ + Local development + +
+
+ {{/if}} +
+ + Vault + {{this.version.version}} + + {{#if this.version.isCommunity}} + + Upgrade to Vault Enterprise + + {{/if}} + + Documentation + + +
\ No newline at end of file diff --git a/changelog/24270.txt b/changelog/24270.txt new file mode 100644 index 000000000000..2a64928d1d67 --- /dev/null +++ b/changelog/24270.txt @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { service } from '@ember/service'; +import Component from '@glimmer/component'; +import ENV from 'vault/config/environment'; + +export default class AppFooterComponent extends Component { + @service version; + + get isDevelopment() { + return ENV.environment === 'development'; + } +} diff --git a/changelog/24281.txt b/changelog/24281.txt new file mode 100644 index 000000000000..ca5ec0a0529d --- /dev/null +++ b/changelog/24281.txt @@ -0,0 +1,122 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import Application from '@ember/application'; +import Resolver from 'ember-resolver'; +import loadInitializers from 'ember-load-initializers'; +import config from 'vault/config/environment'; + +export default class App extends Application { + modulePrefix = config.modulePrefix; + podModulePrefix = config.podModulePrefix; + Resolver = Resolver; + engines = { + openApiExplorer: { + dependencies: { + services: ['auth', 'flash-messages', 'namespace', 'router', 'version'], + }, + }, + replication: { + dependencies: { + services: [ + 'auth', + 'flash-messages', + 'namespace', + 'replication-mode', + 'router', + 'store', + 'version', + '-portal', + ], + externalRoutes: { + replication: 'vault.cluster.replication.index', + vault: 'vault.cluster', + }, + }, + }, + kmip: { + dependencies: { + services: [ + 'auth', + 'download', + 'flash-messages', + 'namespace', + 'path-help', + 'router', + 'store', + 'version', + 'secret-mount-path', + ], + externalRoutes: { + secrets: 'vault.cluster.secrets.backends', + }, + }, + }, + kubernetes: { + dependencies: { + services: ['router', 'store', 'secret-mount-path', 'flash-messages'], + externalRoutes: { + secrets: 'vault.cluster.secrets.backends', + }, + }, + }, + ldap: { + dependencies: { + services: ['router', 'store', 'secret-mount-path', 'flash-messages', 'auth'], + externalRoutes: { + secrets: 'vault.cluster.secrets.backends', + }, + }, + }, + kv: { + dependencies: { + services: [ + 'download', + 'namespace', + 'router', + 'store', + 'secret-mount-path', + 'flash-messages', + 'control-group', + ], + externalRoutes: { + secrets: 'vault.cluster.secrets.backends', + syncDestination: 'vault.cluster.sync.secrets.destinations.destination', + }, + }, + }, + pki: { + dependencies: { + services: [ + 'auth', + 'download', + 'flash-messages', + 'namespace', + 'path-help', + 'router', + 'secret-mount-path', + 'store', + 'version', + ], + externalRoutes: { + secrets: 'vault.cluster.secrets.backends', + externalMountIssuer: 'vault.cluster.secrets.backend.pki.issuers.issuer.details', + secretsListRootConfiguration: 'vault.cluster.secrets.backend.configuration', + }, + }, + }, + sync: { + dependencies: { + services: ['flash-messages', 'router', 'store', 'version'], + externalRoutes: { + kvSecretDetails: 'vault.cluster.secrets.backend.kv.secret.details', + clientCountDashboard: 'vault.cluster.clients.dashboard', + }, + }, + }, + }; +} + +loadInitializers(App, config.modulePrefix); diff --git a/changelog/24283.txt b/changelog/24283.txt new file mode 100644 index 000000000000..1e8c8ad0b3ff --- /dev/null +++ b/changelog/24283.txt @@ -0,0 +1,34 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + + + + Replication + + {{#if (not-eq this.model.mode "unsupported")}} + {{#if (has-feature "DR Replication")}} + + {{/if}} + {{#if (has-feature "Performance Replication")}} + + {{/if}} + {{/if}} + + +{{outlet}} \ No newline at end of file diff --git a/changelog/24290.txt b/changelog/24290.txt new file mode 100644 index 000000000000..75351718fa08 --- /dev/null +++ b/changelog/24290.txt @@ -0,0 +1,53 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { inject as service } from '@ember/service'; +import { setProperties } from '@ember/object'; +import { hash } from 'rsvp'; +import Route from '@ember/routing/route'; +import ClusterRoute from 'vault/mixins/cluster-route'; + +export default Route.extend(ClusterRoute, { + version: service(), + store: service(), + auth: service(), + router: service(), + + beforeModel() { + if (this.auth.activeCluster.replicationRedacted) { + // disallow replication access if endpoints are redacted + return this.router.transitionTo('vault.cluster'); + } + return this.version.fetchFeatures().then(() => { + return this._super(...arguments); + }); + }, + + model() { + return this.auth.activeCluster; + }, + + afterModel(model) { + return hash({ + canEnablePrimary: this.store + .findRecord('capabilities', 'sys/replication/primary/enable') + .then((c) => c.get('canUpdate')), + canEnableSecondary: this.store + .findRecord('capabilities', 'sys/replication/secondary/enable') + .then((c) => c.get('canUpdate')), + }).then(({ canEnablePrimary, canEnableSecondary }) => { + setProperties(model, { + canEnablePrimary, + canEnableSecondary, + }); + return model; + }); + }, + actions: { + refresh() { + this.refresh(); + }, + }, +}); diff --git a/changelog/24292.txt b/changelog/24292.txt new file mode 100644 index 000000000000..13ff8044f6db --- /dev/null +++ b/changelog/24292.txt @@ -0,0 +1,69 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
+
+ + + + {{#if this.modelValidations.name.errors}} + + {{/if}} + + +
+
+ + + {{#if this.modelValidations.targets.errors}} + + {{/if}} +
+
\ No newline at end of file diff --git a/changelog/24297.txt b/changelog/24297.txt new file mode 100644 index 000000000000..5418b26b5b27 --- /dev/null +++ b/changelog/24297.txt @@ -0,0 +1,136 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +{{! only show side-by-side horizontal bar charts if data is from a single, historical month }} +
+ + {{#if this.barChartTotalClients}} + {{#if @isHistoricalMonth}} +
+

New clients

+

{{this.chartText.newCopy}}

+ +
+ +
+

Total clients

+

{{this.chartText.totalCopy}}

+ +
+ {{else}} +
+ +
+
+

{{this.chartText.totalCopy}}

+
+ +
+

Top {{this.attributionBreakdown}}

+

{{this.topClientCounts.label}}

+
+ +
+

Clients in {{this.attributionBreakdown}}

+

{{format-number this.topClientCounts.clients}}

+
+ {{/if}} +
+ {{capitalize (get @chartLegend "0.label")}} + {{capitalize (get @chartLegend "1.label")}} +
+ {{else}} +
+ +
+ {{/if}} +
+ {{#if @responseTimestamp}} + Updated + {{date-format @responseTimestamp "MMM d yyyy, h:mm:ss aaa" withTimeZone=true}} + {{/if}} +
+
+ +{{! MODAL FOR CSV DOWNLOAD }} +{{#if this.showCSVDownloadModal}} + + + Export attribution data + + +

+ This export will include the namespace path, authentication method path, and the associated total, entity, and + non-entity clients for the below + {{if this.formattedEndDate "date range" "month"}}. +

+

SELECTED DATE {{if this.formattedEndDate " RANGE"}}

+

+ {{this.formattedStartDate}} + {{if this.formattedEndDate "-"}} + {{this.formattedEndDate}}

+
+ + + + + + {{#if @upgradeExplanation}} + + + Your data contains an upgrade. + {{@upgradeExplanation}} + Visit our + + Client count FAQ + + for more information. + + + {{/if}} + +
+{{/if}} \ No newline at end of file diff --git a/changelog/24299.txt b/changelog/24299.txt new file mode 100644 index 000000000000..1c59140a9a98 --- /dev/null +++ b/changelog/24299.txt @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + + "github.com/hashicorp/cli" +) + +var _ cli.Command = (*AuditCommand)(nil) + +type AuditCommand struct { + *BaseCommand +} + +func (c *AuditCommand) Synopsis() string { + return "Interact with audit devices" +} + +func (c *AuditCommand) Help() string { + helpText := ` +Usage: vault audit [options] [args] + + This command groups subcommands for interacting with Vault's audit devices. + Users can list, enable, and disable audit devices. + + *NOTE*: Once an audit device has been enabled, failure to audit could prevent + Vault from servicing future requests. It is highly recommended that you enable + multiple audit devices. + + List all enabled audit devices: + + $ vault audit list + + Enable a new audit device "file"; + + $ vault audit enable file file_path=/var/log/audit.log + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *AuditCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/changelog/24305.txt b/changelog/24305.txt new file mode 100644 index 000000000000..342a4387008e --- /dev/null +++ b/changelog/24305.txt @@ -0,0 +1,333 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package vault + +import ( + "context" + "fmt" + "runtime/debug" + "sync" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/eventlogger" + log "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/internal/observability/event" + "github.com/hashicorp/vault/sdk/logical" +) + +type backendEntry struct { + backend audit.Backend + local bool +} + +// AuditBroker is used to provide a single ingest interface to auditable +// events given that multiple backends may be configured. +type AuditBroker struct { + sync.RWMutex + backends map[string]backendEntry + logger log.Logger + + broker *eventlogger.Broker +} + +// NewAuditBroker creates a new audit broker +func NewAuditBroker(log log.Logger, useEventLogger bool) (*AuditBroker, error) { + var eventBroker *eventlogger.Broker + var err error + + if useEventLogger { + eventBroker, err = eventlogger.NewBroker(eventlogger.WithNodeRegistrationPolicy(eventlogger.DenyOverwrite), eventlogger.WithPipelineRegistrationPolicy(eventlogger.DenyOverwrite)) + if err != nil { + return nil, fmt.Errorf("error creating event broker for audit events: %w", err) + } + } + + b := &AuditBroker{ + backends: make(map[string]backendEntry), + logger: log, + broker: eventBroker, + } + return b, nil +} + +// Register is used to add new audit backend to the broker +func (a *AuditBroker) Register(name string, b audit.Backend, local bool) error { + a.Lock() + defer a.Unlock() + + a.backends[name] = backendEntry{ + backend: b, + local: local, + } + + if a.broker != nil { + err := a.broker.SetSuccessThresholdSinks(eventlogger.EventType(event.AuditType.String()), 1) + if err != nil { + return err + } + + err = b.RegisterNodesAndPipeline(a.broker, name) + if err != nil { + return err + } + } + + return nil +} + +// Deregister is used to remove an audit backend from the broker +func (a *AuditBroker) Deregister(ctx context.Context, name string) error { + a.Lock() + defer a.Unlock() + + // Remove the Backend from the map first, so that if an error occurs while + // removing the pipeline and nodes, we can quickly exit this method with + // the error. + delete(a.backends, name) + + if a.broker != nil { + if len(a.backends) == 0 { + err := a.broker.SetSuccessThresholdSinks(eventlogger.EventType(event.AuditType.String()), 0) + if err != nil { + return err + } + } + + // The first return value, a bool, indicates whether + // RemovePipelineAndNodes encountered the error while evaluating + // pre-conditions (false) or once it started removing the pipeline and + // the nodes (true). This code doesn't care either way. + _, err := a.broker.RemovePipelineAndNodes(ctx, eventlogger.EventType(event.AuditType.String()), eventlogger.PipelineID(name)) + if err != nil { + return err + } + } + + return nil +} + +// IsRegistered is used to check if a given audit backend is registered +func (a *AuditBroker) IsRegistered(name string) bool { + a.RLock() + defer a.RUnlock() + + _, ok := a.backends[name] + return ok +} + +// IsLocal is used to check if a given audit backend is registered +func (a *AuditBroker) IsLocal(name string) (bool, error) { + a.RLock() + defer a.RUnlock() + be, ok := a.backends[name] + if ok { + return be.local, nil + } + return false, fmt.Errorf("unknown audit backend %q", name) +} + +// GetHash returns a hash using the salt of the given backend +func (a *AuditBroker) GetHash(ctx context.Context, name string, input string) (string, error) { + a.RLock() + defer a.RUnlock() + be, ok := a.backends[name] + if !ok { + return "", fmt.Errorf("unknown audit backend %q", name) + } + + return audit.HashString(ctx, be.backend, input) +} + +// LogRequest is used to ensure all the audit backends have an opportunity to +// log the given request and that *at least one* succeeds. +func (a *AuditBroker) LogRequest(ctx context.Context, in *logical.LogInput, headersConfig *AuditedHeadersConfig) (ret error) { + defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now()) + + a.RLock() + defer a.RUnlock() + + if in.Request.InboundSSCToken != "" { + if in.Auth != nil { + reqAuthToken := in.Auth.ClientToken + in.Auth.ClientToken = in.Request.InboundSSCToken + defer func() { + in.Auth.ClientToken = reqAuthToken + }() + } + } + + var retErr *multierror.Error + + defer func() { + if r := recover(); r != nil { + a.logger.Error("panic during logging", "request_path", in.Request.Path, "error", r, "stacktrace", string(debug.Stack())) + retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log")) + } + + ret = retErr.ErrorOrNil() + failure := float32(0.0) + if ret != nil { + failure = 1.0 + } + metrics.IncrCounter([]string{"audit", "log_request_failure"}, failure) + }() + + headers := in.Request.Headers + defer func() { + in.Request.Headers = headers + }() + + // Old behavior (no events) + if a.broker == nil { + // Ensure at least one backend logs + anyLogged := false + for name, be := range a.backends { + in.Request.Headers = nil + transHeaders, thErr := headersConfig.ApplyConfig(ctx, headers, be.backend) + if thErr != nil { + a.logger.Error("backend failed to include headers", "backend", name, "error", thErr) + continue + } + in.Request.Headers = transHeaders + + start := time.Now() + lrErr := be.backend.LogRequest(ctx, in) + metrics.MeasureSince([]string{"audit", name, "log_request"}, start) + if lrErr != nil { + a.logger.Error("backend failed to log request", "backend", name, "error", lrErr) + } else { + anyLogged = true + } + } + if !anyLogged && len(a.backends) > 0 { + retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the request")) + } + } else { + if len(a.backends) > 0 { + e, err := audit.NewEvent(audit.RequestType) + if err != nil { + retErr = multierror.Append(retErr, err) + } + + e.Data = in + + status, err := a.broker.Send(ctx, eventlogger.EventType(event.AuditType.String()), e) + if err != nil { + retErr = multierror.Append(retErr, multierror.Append(err, status.Warnings...)) + } + } + } + + return retErr.ErrorOrNil() +} + +// LogResponse is used to ensure all the audit backends have an opportunity to +// log the given response and that *at least one* succeeds. +func (a *AuditBroker) LogResponse(ctx context.Context, in *logical.LogInput, headersConfig *AuditedHeadersConfig) (ret error) { + defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now()) + a.RLock() + defer a.RUnlock() + if in.Request.InboundSSCToken != "" { + if in.Auth != nil { + reqAuthToken := in.Auth.ClientToken + in.Auth.ClientToken = in.Request.InboundSSCToken + defer func() { + in.Auth.ClientToken = reqAuthToken + }() + } + } + + var retErr *multierror.Error + + defer func() { + if r := recover(); r != nil { + a.logger.Error("panic during logging", "request_path", in.Request.Path, "error", r, "stacktrace", string(debug.Stack())) + retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log")) + } + + ret = retErr.ErrorOrNil() + + failure := float32(0.0) + if ret != nil { + failure = 1.0 + } + metrics.IncrCounter([]string{"audit", "log_response_failure"}, failure) + }() + + headers := in.Request.Headers + defer func() { + in.Request.Headers = headers + }() + + // Ensure at least one backend logs + if a.broker == nil { + anyLogged := false + for name, be := range a.backends { + in.Request.Headers = nil + transHeaders, thErr := headersConfig.ApplyConfig(ctx, headers, be.backend) + if thErr != nil { + a.logger.Error("backend failed to include headers", "backend", name, "error", thErr) + continue + } + in.Request.Headers = transHeaders + + start := time.Now() + lrErr := be.backend.LogResponse(ctx, in) + metrics.MeasureSince([]string{"audit", name, "log_response"}, start) + if lrErr != nil { + a.logger.Error("backend failed to log response", "backend", name, "error", lrErr) + } else { + anyLogged = true + } + } + if !anyLogged && len(a.backends) > 0 { + retErr = multierror.Append(retErr, fmt.Errorf("no audit backend succeeded in logging the response")) + } + } else { + if len(a.backends) > 0 { + e, err := audit.NewEvent(audit.ResponseType) + if err != nil { + return multierror.Append(retErr, err) + } + + e.Data = in + + // In cases where we are trying to audit the response, we detach + // ourselves from the original context (keeping only the namespace). + // This is so that we get a fair run at writing audit entries if Vault + // Took up a lot of time handling the request before audit (response) + // is triggered. Pipeline nodes may check for a cancelled context and + // refuse to process the nodes further. + ns, err := namespace.FromContext(ctx) + if err != nil { + retErr = multierror.Append(retErr, fmt.Errorf("namespace missing from context: %w", err)) + return retErr.ErrorOrNil() + } + + auditContext, auditCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer auditCancel() + auditContext = namespace.ContextWithNamespace(auditContext, ns) + status, err := a.broker.Send(auditContext, eventlogger.EventType(event.AuditType.String()), e) + if err != nil { + retErr = multierror.Append(retErr, multierror.Append(err, status.Warnings...)) + } + } + } + + return retErr.ErrorOrNil() +} + +func (a *AuditBroker) Invalidate(ctx context.Context, key string) { + // For now, we ignore the key as this would only apply to salts. We just + // sort of brute force it on each one. + a.Lock() + defer a.Unlock() + for _, be := range a.backends { + be.backend.Invalidate(ctx) + } +} diff --git a/changelog/24325.txt b/changelog/24325.txt new file mode 100644 index 000000000000..79914ea5d993 --- /dev/null +++ b/changelog/24325.txt @@ -0,0 +1,92 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuditDisableCommand)(nil) + _ cli.CommandAutocomplete = (*AuditDisableCommand)(nil) +) + +type AuditDisableCommand struct { + *BaseCommand +} + +func (c *AuditDisableCommand) Synopsis() string { + return "Disables an audit device" +} + +func (c *AuditDisableCommand) Help() string { + helpText := ` +Usage: vault audit disable [options] PATH + + Disables an audit device. Once an audit device is disabled, no future audit + logs are dispatched to it. The data associated with the audit device is not + affected. + + The argument corresponds to the PATH of audit device, not the TYPE! + + Disable the audit device enabled at "file/": + + $ vault audit disable file/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuditDisableCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *AuditDisableCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAudits() +} + +func (c *AuditDisableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuditDisableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + path := ensureTrailingSlash(sanitizePath(args[0])) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if err := client.Sys().DisableAudit(path); err != nil { + c.UI.Error(fmt.Sprintf("Error disabling audit device: %s", err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Disabled audit device (if it was enabled) at: %s", path)) + + return 0 +} diff --git a/changelog/24336.txt b/changelog/24336.txt new file mode 100644 index 000000000000..786140ee326e --- /dev/null +++ b/changelog/24336.txt @@ -0,0 +1,163 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" +) + +func testAuditDisableCommand(tb testing.TB) (*cli.MockUi, *AuditDisableCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuditDisableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuditDisableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "not_real", + []string{"not_real"}, + "Success! Disabled audit device (if it was enabled) at: not_real/", + 0, + }, + { + "default", + []string{"file"}, + "Success! Disabled audit device (if it was enabled) at: file/", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "discard", + }, + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testAuditDisableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().EnableAuditWithOptions("integration_audit_disable", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "discard", + }, + }); err != nil { + t.Fatal(err) + } + + ui, cmd := testAuditDisableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "integration_audit_disable/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Disabled audit device (if it was enabled) at: integration_audit_disable/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + if _, ok := mounts["integration_audit_disable"]; ok { + t.Errorf("expected mount to not exist: %#v", mounts) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuditDisableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "file", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error disabling audit device: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuditDisableCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/changelog/24343.txt b/changelog/24343.txt new file mode 100644 index 000000000000..a163f471cc4f --- /dev/null +++ b/changelog/24343.txt @@ -0,0 +1,159 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*AuditEnableCommand)(nil) + _ cli.CommandAutocomplete = (*AuditEnableCommand)(nil) +) + +type AuditEnableCommand struct { + *BaseCommand + + flagDescription string + flagPath string + flagLocal bool + + testStdin io.Reader // For tests +} + +func (c *AuditEnableCommand) Synopsis() string { + return "Enables an audit device" +} + +func (c *AuditEnableCommand) Help() string { + helpText := ` +Usage: vault audit enable [options] TYPE [CONFIG K=V...] + + Enables an audit device at a given path. + + This command enables an audit device of TYPE. Additional options for + configuring the audit device can be specified after the type in the same + format as the "vault write" command in key/value pairs. + + For example, to configure the file audit device to write audit logs at the + path "/var/log/audit.log": + + $ vault audit enable file file_path=/var/log/audit.log + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuditEnableCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "description", + Target: &c.flagDescription, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Human-friendly description for the purpose of this audit " + + "device.", + }) + + f.StringVar(&StringVar{ + Name: "path", + Target: &c.flagPath, + Default: "", // The default is complex, so we have to manually document + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Place where the audit device will be accessible. This must be " + + "unique across all audit devices. This defaults to the \"type\" of the " + + "audit device.", + }) + + f.BoolVar(&BoolVar{ + Name: "local", + Target: &c.flagLocal, + Default: false, + EnvVar: "", + Usage: "Mark the audit device as a local-only device. Local devices " + + "are not replicated or removed by replication.", + }) + + return set +} + +func (c *AuditEnableCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictSet( + "file", + "syslog", + "socket", + ) +} + +func (c *AuditEnableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuditEnableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) < 1 { + c.UI.Error("Error enabling audit device: audit type missing. Valid types include 'file', 'socket' and 'syslog'.") + return 1 + } + + // Grab the type + auditType := strings.TrimSpace(args[0]) + + auditPath := c.flagPath + if auditPath == "" { + auditPath = auditType + } + auditPath = ensureTrailingSlash(auditPath) + + // Pull our fake stdin if needed + stdin := (io.Reader)(os.Stdin) + if c.testStdin != nil { + stdin = c.testStdin + } + + options, err := parseArgsDataString(stdin, args[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if err := client.Sys().EnableAuditWithOptions(auditPath, &api.EnableAuditOptions{ + Type: auditType, + Description: c.flagDescription, + Options: options, + Local: c.flagLocal, + }); err != nil { + c.UI.Error(fmt.Sprintf("Error enabling audit device: %s", err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Enabled the %s audit device at: %s", auditType, auditPath)) + return 0 +} diff --git a/command/agent.go b/command/agent.go index 220679beb35b..6c0c769e23ed 100644 --- a/command/agent.go +++ b/command/agent.go @@ -4,1250 +4,211 @@ package command import ( - "context" - "crypto/tls" - "errors" - "flag" - "fmt" - "io" - "net" - "net/http" + "io/ioutil" "os" - "sort" "strings" - "sync" - "time" + "testing" - systemd "github.com/coreos/go-systemd/daemon" - ctconfig "github.com/hashicorp/consul-template/config" - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/gatedwriter" - "github.com/hashicorp/go-secure-stdlib/parseutil" - "github.com/hashicorp/go-secure-stdlib/reloadutil" - "github.com/hashicorp/vault/api" - agentConfig "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/command/agent/exec" - "github.com/hashicorp/vault/command/agent/template" - "github.com/hashicorp/vault/command/agentproxyshared" - "github.com/hashicorp/vault/command/agentproxyshared/auth" - cache "github.com/hashicorp/vault/command/agentproxyshared/cache" - "github.com/hashicorp/vault/command/agentproxyshared/sink" - "github.com/hashicorp/vault/command/agentproxyshared/sink/file" - "github.com/hashicorp/vault/command/agentproxyshared/sink/inmem" - "github.com/hashicorp/vault/command/agentproxyshared/winsvc" - "github.com/hashicorp/vault/helper/logging" - "github.com/hashicorp/vault/helper/metricsutil" - "github.com/hashicorp/vault/helper/useragent" - "github.com/hashicorp/vault/internalshared/configutil" - "github.com/hashicorp/vault/internalshared/listenerutil" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/version" - "github.com/kr/pretty" - "github.com/mitchellh/cli" - "github.com/oklog/run" - "github.com/posener/complete" - "golang.org/x/text/cases" - "golang.org/x/text/language" - "google.golang.org/grpc/test/bufconn" + "github.com/hashicorp/cli" ) -var ( - _ cli.Command = (*AgentCommand)(nil) - _ cli.CommandAutocomplete = (*AgentCommand)(nil) -) - -const ( - // flagNameAgentExitAfterAuth is used as an Agent specific flag to indicate - // that agent should exit after a single successful auth - flagNameAgentExitAfterAuth = "exit-after-auth" -) - -type AgentCommand struct { - *BaseCommand - logFlags logFlags - - config *agentConfig.Config - - ShutdownCh chan struct{} - SighupCh chan struct{} - - tlsReloadFuncsLock sync.RWMutex - tlsReloadFuncs []reloadutil.ReloadFunc - - logWriter io.Writer - logGate *gatedwriter.Writer - logger log.Logger - - // Telemetry object - metricsHelper *metricsutil.MetricsHelper - - cleanupGuard sync.Once - - startedCh chan struct{} // for tests - reloadedCh chan struct{} // for tests - - flagConfigs []string - flagExitAfterAuth bool - flagTestVerifyOnly bool -} - -func (c *AgentCommand) Synopsis() string { - return "Start a Vault agent" -} - -func (c *AgentCommand) Help() string { - helpText := ` -Usage: vault agent [options] - - This command starts a Vault Agent that can perform automatic authentication - in certain environments. - - Start an agent with a configuration file: - - $ vault agent -config=/etc/vault/config.hcl - - For a full list of examples, please see the documentation. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *AgentCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) - - f := set.NewFlagSet("Command Options") - - // Augment with the log flags - f.addLogFlags(&c.logFlags) - - f.StringSliceVar(&StringSliceVar{ - Name: "config", - Target: &c.flagConfigs, - Completion: complete.PredictOr( - complete.PredictFiles("*.hcl"), - complete.PredictFiles("*.json"), - ), - Usage: "Path to a configuration file. This configuration file should " + - "contain only agent directives.", - }) - - f.BoolVar(&BoolVar{ - Name: flagNameAgentExitAfterAuth, - Target: &c.flagExitAfterAuth, - Default: false, - Usage: "If set to true, the agent will exit with code 0 after a single " + - "successful auth, where success means that a token was retrieved and " + - "all sinks successfully wrote it", - }) - - // Internal-only flags to follow. - // - // Why hello there little source code reader! Welcome to the Vault source - // code. The remaining options are intentionally undocumented and come with - // no warranty or backwards-compatibility promise. Do not use these flags - // in production. Do not build automation using these flags. Unless you are - // developing against Vault, you should not need any of these flags. - f.BoolVar(&BoolVar{ - Name: "test-verify-only", - Target: &c.flagTestVerifyOnly, - Default: false, - Hidden: true, - }) - - // End internal-only flags. - - return set -} - -func (c *AgentCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} - -func (c *AgentCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *AgentCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - // Create a logger. We wrap it in a gated writer so that it doesn't - // start logging too early. - c.logGate = gatedwriter.NewWriter(os.Stderr) - c.logWriter = c.logGate - - if c.logFlags.flagCombineLogs { - c.logWriter = os.Stdout - } - - // Validation - if len(c.flagConfigs) < 1 { - c.UI.Error("Must specify exactly at least one config path using -config") - return 1 - } - - config, err := c.loadConfig(c.flagConfigs) - if err != nil { - c.outputErrors(err) - return 1 - } - - if config.AutoAuth == nil { - c.UI.Info("No auto_auth block found in config, the automatic authentication feature will not be started") - } - - c.applyConfigOverrides(f, config) // This only needs to happen on start-up to aggregate config from flags and env vars - c.config = config - - l, err := c.newLogger() - if err != nil { - c.outputErrors(err) - return 1 - } - c.logger = l - - infoKeys := make([]string, 0, 10) - info := make(map[string]string) - info["log level"] = config.LogLevel - infoKeys = append(infoKeys, "log level") - - infoKeys = append(infoKeys, "version") - verInfo := version.GetVersion() - info["version"] = verInfo.FullVersionNumber(false) - if verInfo.Revision != "" { - info["version sha"] = strings.Trim(verInfo.Revision, "'") - infoKeys = append(infoKeys, "version sha") - } - infoKeys = append(infoKeys, "cgo") - info["cgo"] = "disabled" - if version.CgoEnabled { - info["cgo"] = "enabled" - } - - // Tests might not want to start a vault server and just want to verify - // the configuration. - if c.flagTestVerifyOnly { - if os.Getenv("VAULT_TEST_VERIFY_ONLY_DUMP_CONFIG") != "" { - c.UI.Output(fmt.Sprintf( - "\nConfiguration:\n%s\n", - pretty.Sprint(*c.config))) - } - return 0 - } - - // Ignore any setting of Agent's address. This client is used by the Agent - // to reach out to Vault. This should never loop back to agent. - c.flagAgentProxyAddress = "" - client, err := c.Client() - if err != nil { - c.UI.Error(fmt.Sprintf( - "Error fetching client: %v", - err)) - return 1 - } - - serverHealth, err := client.Sys().Health() - if err == nil { - // We don't exit on error here, as this is not worth stopping Agent over - serverVersion := serverHealth.Version - agentVersion := version.GetVersion().VersionNumber() - if serverVersion != agentVersion { - c.UI.Info("==> Note: Vault Agent version does not match Vault server version. " + - fmt.Sprintf("Vault Agent version: %s, Vault server version: %s", agentVersion, serverVersion)) - } - } - - if config.IsDefaultListerDefined() { - // Notably, we cannot know for sure if they are using the API proxy functionality unless - // we log on each API proxy call, which would be too noisy. - // A customer could have a listener defined but only be using e.g. the cache-clear API, - // even though the API proxy is something they have available. - c.UI.Warn("==> Note: Vault Agent will be deprecating API proxy functionality in a future " + - "release, and this functionality has moved to a new subcommand, vault proxy. If you rely on this " + - "functionality, plan to move to Vault Proxy instead.") - } - - // ctx and cancelFunc are passed to the AuthHandler, SinkServer, ExecServer and - // TemplateServer that periodically listen for ctx.Done() to fire and shut - // down accordingly. - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - - // telemetry configuration - inmemMetrics, _, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ - Config: config.Telemetry, - Ui: c.UI, - ServiceName: "vault", - DisplayName: "Vault", - UserAgent: useragent.AgentString(), - ClusterName: config.ClusterName, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) - return 1 - } - c.metricsHelper = metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) - - var method auth.AuthMethod - var sinks []*sink.SinkConfig - var templateNamespace string - if config.AutoAuth != nil { - if client.Headers().Get(consts.NamespaceHeaderName) == "" && config.AutoAuth.Method.Namespace != "" { - client.SetNamespace(config.AutoAuth.Method.Namespace) - } - templateNamespace = client.Headers().Get(consts.NamespaceHeaderName) - - sinkClient, err := client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for file sink: %v", err)) - return 1 - } - - if config.DisableIdleConnsAutoAuth { - sinkClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAutoAuth { - sinkClient.SetDisableKeepAlives(true) - } - - for _, sc := range config.AutoAuth.Sinks { - switch sc.Type { - case "file": - config := &sink.SinkConfig{ - Logger: c.logger.Named("sink.file"), - Config: sc.Config, - Client: sinkClient, - WrapTTL: sc.WrapTTL, - DHType: sc.DHType, - DeriveKey: sc.DeriveKey, - DHPath: sc.DHPath, - AAD: sc.AAD, - } - s, err := file.NewFileSink(config) - if err != nil { - c.UI.Error(fmt.Errorf("error creating file sink: %w", err).Error()) - return 1 - } - config.Sink = s - sinks = append(sinks, config) - default: - c.UI.Error(fmt.Sprintf("Unknown sink type %q", sc.Type)) - return 1 - } - } - - authConfig := &auth.AuthConfig{ - Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)), - MountPath: config.AutoAuth.Method.MountPath, - Config: config.AutoAuth.Method.Config, - } - method, err = agentproxyshared.GetAutoAuthMethodFromConfig(config.AutoAuth.Method.Type, authConfig, config.Vault.Address) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating %s auth method: %v", config.AutoAuth.Method.Type, err)) - return 1 - } - } - - // We do this after auto-auth has been configured, because we don't want to - // confuse the issue of retries for auth failures which have their own - // config and are handled a bit differently. - if os.Getenv(api.EnvVaultMaxRetries) == "" { - client.SetMaxRetries(ctconfig.DefaultRetryAttempts) - if config.Vault != nil { - if config.Vault.Retry != nil { - client.SetMaxRetries(config.Vault.Retry.NumRetries) - } - } - } - - enforceConsistency := cache.EnforceConsistencyNever - whenInconsistent := cache.WhenInconsistentFail - if config.APIProxy != nil { - switch config.APIProxy.EnforceConsistency { - case "always": - enforceConsistency = cache.EnforceConsistencyAlways - case "never", "": - default: - c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for enforce_consistency: %q", config.APIProxy.EnforceConsistency)) - return 1 - } - - switch config.APIProxy.WhenInconsistent { - case "retry": - whenInconsistent = cache.WhenInconsistentRetry - case "forward": - whenInconsistent = cache.WhenInconsistentForward - case "fail", "": - default: - c.UI.Error(fmt.Sprintf("Unknown api_proxy setting for when_inconsistent: %q", config.APIProxy.WhenInconsistent)) - return 1 - } - } - // Keep Cache configuration for legacy reasons, but error if defined alongside API Proxy - if config.Cache != nil { - switch config.Cache.EnforceConsistency { - case "always": - if enforceConsistency != cache.EnforceConsistencyNever { - c.UI.Error("enforce_consistency configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") - return 1 - } else { - enforceConsistency = cache.EnforceConsistencyAlways +func testAuditEnableCommand(tb testing.TB) (*cli.MockUi, *AuditEnableCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &AuditEnableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestAuditEnableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "empty", + nil, + "Error enabling audit device: audit type missing. Valid types include 'file', 'socket' and 'syslog'.", + 1, + }, + { + "not_a_valid_type", + []string{"nope_definitely_not_a_valid_type_like_ever"}, + "", + 2, + }, + { + "enable", + []string{"file", "file_path=discard"}, + "Success! Enabled the file audit device at: file/", + 0, + }, + { + "enable_path", + []string{ + "-path", "audit_path", + "file", + "file_path=discard", + }, + "Success! Enabled the file audit device at: audit_path/", + 0, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuditEnableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) } - case "never", "": - default: - c.UI.Error(fmt.Sprintf("Unknown cache setting for enforce_consistency: %q", config.Cache.EnforceConsistency)) - return 1 - } - - switch config.Cache.WhenInconsistent { - case "retry": - if whenInconsistent != cache.WhenInconsistentFail { - c.UI.Error("when_inconsistent configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") - return 1 - } else { - whenInconsistent = cache.WhenInconsistentRetry - } - case "forward": - if whenInconsistent != cache.WhenInconsistentFail { - c.UI.Error("when_inconsistent configured in both api_proxy and cache blocks. Please remove this configuration from the cache block.") - return 1 - } else { - whenInconsistent = cache.WhenInconsistentForward - } - case "fail", "": - default: - c.UI.Error(fmt.Sprintf("Unknown cache setting for when_inconsistent: %q", config.Cache.WhenInconsistent)) - return 1 - } - } - - // Warn if cache _and_ cert auto-auth is enabled but certificates were not - // provided in the auto_auth.method["cert"].config stanza. - if config.Cache != nil && (config.AutoAuth != nil && config.AutoAuth.Method != nil && config.AutoAuth.Method.Type == "cert") { - _, okCertFile := config.AutoAuth.Method.Config["client_cert"] - _, okCertKey := config.AutoAuth.Method.Config["client_key"] - - // If neither of these exists in the cert stanza, agent will use the - // certs from the vault stanza. - if !okCertFile && !okCertKey { - c.UI.Warn(wrapAtLength("WARNING! Cache is enabled and using the same certificates " + - "from the 'cert' auto-auth method specified in the 'vault' stanza. Consider " + - "specifying certificate information in the 'cert' auto-auth's config stanza.")) - } - - } - - // Output the header that the agent has started - if !c.logFlags.flagCombineLogs { - c.UI.Output("==> Vault Agent started! Log data will stream in below:\n") - } - - var leaseCache *cache.LeaseCache - var previousToken string - - proxyClient, err := client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for proxying: %v", err)) - return 1 - } - - if config.DisableIdleConnsAPIProxy { - proxyClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAPIProxy { - proxyClient.SetDisableKeepAlives(true) - } - - apiProxyLogger := c.logger.Named("apiproxy") - - // The API proxy to be used, if listeners are configured - apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{ - Client: proxyClient, - Logger: apiProxyLogger, - EnforceConsistency: enforceConsistency, - WhenInconsistentAction: whenInconsistent, - UserAgentStringFunction: useragent.AgentProxyStringWithProxiedUserAgent, - UserAgentString: useragent.AgentProxyString(), - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating API proxy: %v", err)) - return 1 - } - - // Parse agent cache configurations - if config.Cache != nil { - cacheLogger := c.logger.Named("cache") - - // Create the lease cache proxier and set its underlying proxier to - // the API proxier. - leaseCache, err = cache.NewLeaseCache(&cache.LeaseCacheConfig{ - Client: proxyClient, - BaseContext: ctx, - Proxier: apiProxy, - Logger: cacheLogger.Named("leasecache"), - CacheDynamicSecrets: true, - UserAgentToUse: useragent.ProxyAPIProxyString(), }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating lease cache: %v", err)) - return 1 - } - - // Configure persistent storage and add to LeaseCache - if config.Cache.Persist != nil { - deferFunc, oldToken, err := agentproxyshared.AddPersistentStorageToLeaseCache(ctx, leaseCache, config.Cache.Persist, cacheLogger) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating persistent cache: %v", err)) - return 1 - } - previousToken = oldToken - if deferFunc != nil { - defer deferFunc() - } - } - } - - var listeners []net.Listener - - // If there are templates, add an in-process listener - if len(config.Templates) > 0 || len(config.EnvTemplates) > 0 { - config.Listeners = append(config.Listeners, &configutil.Listener{Type: listenerutil.BufConnType}) - } - - // Ensure we've added all the reload funcs for TLS before anyone triggers a reload. - c.tlsReloadFuncsLock.Lock() - - for i, lnConfig := range config.Listeners { - var ln net.Listener - var tlsCfg *tls.Config - - if lnConfig.Type == listenerutil.BufConnType { - inProcListener := bufconn.Listen(1024 * 1024) - if config.Cache != nil { - config.Cache.InProcDialer = listenerutil.NewBufConnWrapper(inProcListener) - } - ln = inProcListener - } else { - lnBundle, err := cache.StartListener(lnConfig) - if err != nil { - c.UI.Error(fmt.Sprintf("Error starting listener: %v", err)) - return 1 - } - - tlsCfg = lnBundle.TLSConfig - ln = lnBundle.Listener - - // Track the reload func, so we can reload later if needed. - c.tlsReloadFuncs = append(c.tlsReloadFuncs, lnBundle.TLSReloadFunc) - } - - listeners = append(listeners, ln) - - proxyVaultToken := true - var inmemSink sink.Sink - if config.APIProxy != nil { - if config.APIProxy.UseAutoAuthToken { - apiProxyLogger.Debug("auto-auth token is allowed to be used; configuring inmem sink") - inmemSink, err = inmem.New(&sink.SinkConfig{ - Logger: apiProxyLogger, - }, leaseCache) - if err != nil { - c.UI.Error(fmt.Sprintf("Error creating inmem sink for cache: %v", err)) - return 1 - } - sinks = append(sinks, &sink.SinkConfig{ - Logger: apiProxyLogger, - Sink: inmemSink, - }) - } - proxyVaultToken = !config.APIProxy.ForceAutoAuthToken - } - - var muxHandler http.Handler - if leaseCache != nil { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, leaseCache, inmemSink, proxyVaultToken) - } else { - muxHandler = cache.ProxyHandler(ctx, apiProxyLogger, apiProxy, inmemSink, proxyVaultToken) - } - - // Parse 'require_request_header' listener config option, and wrap - // the request handler if necessary - if lnConfig.RequireRequestHeader && ("metrics_only" != lnConfig.Role) { - muxHandler = verifyRequestHeader(muxHandler) - } - - // Create a muxer and add paths relevant for the lease cache layer - mux := http.NewServeMux() - quitEnabled := lnConfig.AgentAPI != nil && lnConfig.AgentAPI.EnableQuit - - mux.Handle(consts.AgentPathMetrics, c.handleMetrics()) - if "metrics_only" != lnConfig.Role { - mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx)) - mux.Handle(consts.AgentPathQuit, c.handleQuit(quitEnabled)) - mux.Handle("/", muxHandler) - } - - scheme := "https://" - if tlsCfg == nil { - scheme = "http://" - } - if ln.Addr().Network() == "unix" { - scheme = "unix://" - } - - infoKey := fmt.Sprintf("api address %d", i+1) - info[infoKey] = scheme + ln.Addr().String() - infoKeys = append(infoKeys, infoKey) - - server := &http.Server{ - Addr: ln.Addr().String(), - TLSConfig: tlsCfg, - Handler: mux, - ReadHeaderTimeout: 10 * time.Second, - ReadTimeout: 30 * time.Second, - IdleTimeout: 5 * time.Minute, - ErrorLog: apiProxyLogger.StandardLogger(nil), - } - - go server.Serve(ln) - } - - c.tlsReloadFuncsLock.Unlock() - - // Ensure that listeners are closed at all the exits - listenerCloseFunc := func() { - for _, ln := range listeners { - ln.Close() - } } - defer c.cleanupGuard.Do(listenerCloseFunc) - - // Inform any tests that the server is ready - if c.startedCh != nil { - close(c.startedCh) - } - - var g run.Group - - g.Add(func() error { - for { - select { - case <-c.SighupCh: - c.UI.Output("==> Vault Agent config reload triggered") - err := c.reloadConfig(c.flagConfigs) - if err != nil { - c.outputErrors(err) - } - // Send the 'reloaded' message on the relevant channel - select { - case c.reloadedCh <- struct{}{}: - default: - } - case <-ctx.Done(): - return nil - } - } - }, func(error) { - cancelFunc() - }) - - // This run group watches for signal termination - g.Add(func() error { - for { - select { - case <-c.ShutdownCh: - c.UI.Output("==> Vault Agent shutdown triggered") - // Notify systemd that the server is shutting down - // Let the lease cache know this is a shutdown; no need to evict everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - return nil - case <-ctx.Done(): - return nil - case <-winsvc.ShutdownChannel(): - return nil - } - } - }, func(error) {}) - - // Start auto-auth and sink servers - if method != nil { - enableTemplateTokenCh := len(config.Templates) > 0 - enableEnvTemplateTokenCh := len(config.EnvTemplates) > 0 - - // Auth Handler is going to set its own retry values, so we want to - // work on a copy of the client to not affect other subsystems. - ahClient, err := c.client.CloneWithHeaders() - if err != nil { - c.UI.Error(fmt.Sprintf("Error cloning client for auth handler: %v", err)) - return 1 - } - - if config.DisableIdleConnsAutoAuth { - ahClient.SetMaxIdleConnections(-1) - } - - if config.DisableKeepAlivesAutoAuth { - ahClient.SetDisableKeepAlives(true) - } - - ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{ - Logger: c.logger.Named("auth.handler"), - Client: ahClient, - WrapTTL: config.AutoAuth.Method.WrapTTL, - MinBackoff: config.AutoAuth.Method.MinBackoff, - MaxBackoff: config.AutoAuth.Method.MaxBackoff, - EnableReauthOnNewCredentials: config.AutoAuth.EnableReauthOnNewCredentials, - EnableTemplateTokenCh: enableTemplateTokenCh, - EnableExecTokenCh: enableEnvTemplateTokenCh, - Token: previousToken, - ExitOnError: config.AutoAuth.Method.ExitOnError, - UserAgent: useragent.AgentAutoAuthString(), - MetricsSignifier: "agent", - }) - - ss := sink.NewSinkServer(&sink.SinkServerConfig{ - Logger: c.logger.Named("sink.server"), - Client: ahClient, - ExitAfterAuth: config.ExitAfterAuth, - }) - - ts := template.NewServer(&template.ServerConfig{ - Logger: c.logger.Named("template.server"), - LogLevel: c.logger.GetLevel(), - LogWriter: c.logWriter, - AgentConfig: c.config, - Namespace: templateNamespace, - ExitAfterAuth: config.ExitAfterAuth, - }) - - es, err := exec.NewServer(&exec.ServerConfig{ - AgentConfig: c.config, - Namespace: templateNamespace, - Logger: c.logger.Named("exec.server"), - LogLevel: c.logger.GetLevel(), - LogWriter: c.logWriter, - }) - if err != nil { - c.logger.Error("could not create exec server", "error", err) - return 1 - } - - g.Add(func() error { - return ah.Run(ctx, method) - }, func(error) { - // Let the lease cache know this is a shutdown; no need to evict - // everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - cancelFunc() - }) - - g.Add(func() error { - err := ss.Run(ctx, ah.OutputCh, sinks) - c.logger.Info("sinks finished, exiting") - // Start goroutine to drain from ah.OutputCh from this point onward - // to prevent ah.Run from being blocked. - go func() { - for { - select { - case <-ctx.Done(): - return - case <-ah.OutputCh: - } - } - }() - - // Wait until templates are rendered - if len(config.Templates) > 0 { - <-ts.DoneCh - } + t.Run("integration", func(t *testing.T) { + t.Parallel() - return err - }, func(error) { - // Let the lease cache know this is a shutdown; no need to evict - // everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - cancelFunc() - }) + client, closer := testVaultServer(t) + defer closer() - g.Add(func() error { - return ts.Run(ctx, ah.TemplateTokenCh, config.Templates) - }, func(error) { - // Let the lease cache know this is a shutdown; no need to evict - // everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - cancelFunc() - ts.Stop() - }) + ui, cmd := testAuditEnableCommand(t) + cmd.client = client - g.Add(func() error { - return es.Run(ctx, ah.ExecTokenCh) - }, func(err error) { - // Let the lease cache know this is a shutdown; no need to evict - // everything - if leaseCache != nil { - leaseCache.SetShuttingDown(true) - } - cancelFunc() - es.Close() + code := cmd.Run([]string{ + "-path", "audit_enable_integration/", + "-description", "The best kind of test", + "file", + "file_path=discard", }) - - } - - // Server configuration output - padding := 24 - sort.Strings(infoKeys) - caser := cases.Title(language.English) - c.UI.Output("==> Vault Agent configuration:\n") - for _, k := range infoKeys { - c.UI.Output(fmt.Sprintf( - "%s%s: %s", - strings.Repeat(" ", padding-len(k)), - caser.String(k), - info[k])) - } - c.UI.Output("") - - // Release the log gate. - c.logGate.Flush() - - // Write out the PID to the file now that server has successfully started - if err := c.storePidFile(config.PidFile); err != nil { - c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) - return 1 - } - - // Notify systemd that the server is ready (if applicable) - c.notifySystemd(systemd.SdNotifyReady) - - defer func() { - if err := c.removePidFile(config.PidFile); err != nil { - c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) - } - }() - - var exitCode int - if err := g.Run(); err != nil { - var processExitError *exec.ProcessExitError - if errors.As(err, &processExitError) { - exitCode = processExitError.ExitCode - } else { - exitCode = 1 + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) } - if exitCode != 0 { - c.logger.Error("runtime error encountered", "error", err, "exitCode", exitCode) - c.UI.Error("Error encountered during run, refer to logs for more details.") + expected := "Success! Enabled the file audit device at: audit_enable_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) } - } - - c.notifySystemd(systemd.SdNotifyStopping) - - return exitCode -} -// applyConfigOverrides ensures that the config object accurately reflects the desired -// settings as configured by the user. It applies the relevant config setting based -// on the precedence (env var overrides file config, cli overrides env var). -// It mutates the config object supplied. -func (c *AgentCommand) applyConfigOverrides(f *FlagSets, config *agentConfig.Config) { - if config.Vault == nil { - config.Vault = &agentConfig.Vault{} - } - - f.applyLogConfigOverrides(config.SharedConfig) - - f.Visit(func(fl *flag.Flag) { - if fl.Name == flagNameAgentExitAfterAuth { - config.ExitAfterAuth = c.flagExitAfterAuth + audits, err := client.Sys().ListAudit() + if err != nil { + t.Fatal(err) } - }) - c.setStringFlag(f, config.Vault.Address, &StringVar{ - Name: flagNameAddress, - Target: &c.flagAddress, - Default: "https://127.0.0.1:8200", - EnvVar: api.EnvVaultAddress, - }) - config.Vault.Address = c.flagAddress - c.setStringFlag(f, config.Vault.CACert, &StringVar{ - Name: flagNameCACert, - Target: &c.flagCACert, - Default: "", - EnvVar: api.EnvVaultCACert, - }) - config.Vault.CACert = c.flagCACert - c.setStringFlag(f, config.Vault.CAPath, &StringVar{ - Name: flagNameCAPath, - Target: &c.flagCAPath, - Default: "", - EnvVar: api.EnvVaultCAPath, - }) - config.Vault.CAPath = c.flagCAPath - c.setStringFlag(f, config.Vault.ClientCert, &StringVar{ - Name: flagNameClientCert, - Target: &c.flagClientCert, - Default: "", - EnvVar: api.EnvVaultClientCert, - }) - config.Vault.ClientCert = c.flagClientCert - c.setStringFlag(f, config.Vault.ClientKey, &StringVar{ - Name: flagNameClientKey, - Target: &c.flagClientKey, - Default: "", - EnvVar: api.EnvVaultClientKey, - }) - config.Vault.ClientKey = c.flagClientKey - c.setBoolFlag(f, config.Vault.TLSSkipVerify, &BoolVar{ - Name: flagNameTLSSkipVerify, - Target: &c.flagTLSSkipVerify, - Default: false, - EnvVar: api.EnvVaultSkipVerify, - }) - config.Vault.TLSSkipVerify = c.flagTLSSkipVerify - c.setStringFlag(f, config.Vault.TLSServerName, &StringVar{ - Name: flagTLSServerName, - Target: &c.flagTLSServerName, - Default: "", - EnvVar: api.EnvVaultTLSServerName, - }) - config.Vault.TLSServerName = c.flagTLSServerName -} - -// verifyRequestHeader wraps an http.Handler inside a Handler that checks for -// the request header that is used for SSRF protection. -func verifyRequestHeader(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if val, ok := r.Header[consts.RequestHeaderName]; !ok || len(val) != 1 || val[0] != "true" { - logical.RespondError(w, - http.StatusPreconditionFailed, - fmt.Errorf("missing %q header", consts.RequestHeaderName)) - return + auditInfo, ok := audits["audit_enable_integration/"] + if !ok { + t.Fatalf("expected audit to exist") } - - handler.ServeHTTP(w, r) - }) -} - -func (c *AgentCommand) notifySystemd(status string) { - sent, err := systemd.SdNotify(false, status) - if err != nil { - c.logger.Error("error notifying systemd", "error", err) - } else { - if sent { - c.logger.Debug("sent systemd notification", "notification", status) - } else { - c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) + if exp := "file"; auditInfo.Type != exp { + t.Errorf("expected %q to be %q", auditInfo.Type, exp) } - } -} - -func (c *AgentCommand) setStringFlag(f *FlagSets, configVal string, fVar *StringVar) { - var isFlagSet bool - f.Visit(func(f *flag.Flag) { - if f.Name == fVar.Name { - isFlagSet = true + if exp := "The best kind of test"; auditInfo.Description != exp { + t.Errorf("expected %q to be %q", auditInfo.Description, exp) } - }) - - flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) - switch { - case isFlagSet: - // Don't do anything as the flag is already set from the command line - case flagEnvSet: - // Use value from env var - *fVar.Target = flagEnvValue - case configVal != "": - // Use value from config - *fVar.Target = configVal - default: - // Use the default value - *fVar.Target = fVar.Default - } -} -func (c *AgentCommand) setBoolFlag(f *FlagSets, configVal bool, fVar *BoolVar) { - var isFlagSet bool - f.Visit(func(f *flag.Flag) { - if f.Name == fVar.Name { - isFlagSet = true + filePath, ok := auditInfo.Options["file_path"] + if !ok || filePath != "discard" { + t.Errorf("missing some options: %#v", auditInfo) } }) - flagEnvValue, flagEnvSet := os.LookupEnv(fVar.EnvVar) - switch { - case isFlagSet: - // Don't do anything as the flag is already set from the command line - case flagEnvSet: - // Use value from env var - *fVar.Target = flagEnvValue != "" - case configVal: - // Use value from config - *fVar.Target = configVal - default: - // Use the default value - *fVar.Target = fVar.Default - } -} - -// storePidFile is used to write out our PID to a file if necessary -func (c *AgentCommand) storePidFile(pidPath string) error { - // Quit fast if no pidfile - if pidPath == "" { - return nil - } - - // Open the PID file - pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) - if err != nil { - return fmt.Errorf("could not open pid file: %w", err) - } - defer pidFile.Close() - - // Write out the PID - pid := os.Getpid() - _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) - if err != nil { - return fmt.Errorf("could not write to pid file: %w", err) - } - return nil -} - -// removePidFile is used to cleanup the PID file if necessary -func (c *AgentCommand) removePidFile(pidPath string) error { - if pidPath == "" { - return nil - } - return os.Remove(pidPath) -} + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() -func (c *AgentCommand) handleMetrics() http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodGet { - logical.RespondError(w, http.StatusMethodNotAllowed, nil) - return - } + client, closer := testVaultServerBad(t) + defer closer() - if err := r.ParseForm(); err != nil { - logical.RespondError(w, http.StatusBadRequest, err) - return - } + ui, cmd := testAuditEnableCommand(t) + cmd.client = client - format := r.Form.Get("format") - if format == "" { - format = metricsutil.FormatFromRequest(&logical.Request{ - Headers: r.Header, - }) + code := cmd.Run([]string{ + "pki", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) } - resp := c.metricsHelper.ResponseForFormat(format) - - status := resp.Data[logical.HTTPStatusCode].(int) - w.Header().Set("Content-Type", resp.Data[logical.HTTPContentType].(string)) - switch v := resp.Data[logical.HTTPRawBody].(type) { - case string: - w.WriteHeader(status) - w.Write([]byte(v)) - case []byte: - w.WriteHeader(status) - w.Write(v) - default: - logical.RespondError(w, http.StatusInternalServerError, fmt.Errorf("wrong response returned")) + expected := "Error enabling audit device: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) } }) -} -func (c *AgentCommand) handleQuit(enabled bool) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !enabled { - w.WriteHeader(http.StatusNotFound) - return - } - - switch r.Method { - case http.MethodPost: - default: - w.WriteHeader(http.StatusMethodNotAllowed) - return - } + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() - c.logger.Debug("received quit request") - close(c.ShutdownCh) + _, cmd := testAuditEnableCommand(t) + assertNoTabs(t, cmd) }) -} - -// newLogger creates a logger based on parsed config field on the Agent Command struct. -func (c *AgentCommand) newLogger() (log.InterceptLogger, error) { - if c.config == nil { - return nil, fmt.Errorf("cannot create logger, no config") - } - - var errors error - - // Parse all the log related config - logLevel, err := logging.ParseLogLevel(c.config.LogLevel) - if err != nil { - errors = multierror.Append(errors, err) - } - - logFormat, err := logging.ParseLogFormat(c.config.LogFormat) - if err != nil { - errors = multierror.Append(errors, err) - } - - logRotateDuration, err := parseutil.ParseDurationSecond(c.config.LogRotateDuration) - if err != nil { - errors = multierror.Append(errors, err) - } - if errors != nil { - return nil, errors - } - - logCfg := &logging.LogConfig{ - Name: "agent", - LogLevel: logLevel, - LogFormat: logFormat, - LogFilePath: c.config.LogFile, - LogRotateDuration: logRotateDuration, - LogRotateBytes: c.config.LogRotateBytes, - LogRotateMaxFiles: c.config.LogRotateMaxFiles, - } - - l, err := logging.Setup(logCfg, c.logWriter) - if err != nil { - return nil, err - } + t.Run("mount_all", func(t *testing.T) { + t.Parallel() - return l, nil -} - -// loadConfig attempts to generate an Agent config from the file(s) specified. -func (c *AgentCommand) loadConfig(paths []string) (*agentConfig.Config, error) { - var errors error - cfg := agentConfig.NewConfig() + client, closer := testVaultServerAllBackends(t) + defer closer() - for _, configPath := range paths { - configFromPath, err := agentConfig.LoadConfig(configPath) + files, err := ioutil.ReadDir("../builtin/audit") if err != nil { - errors = multierror.Append(errors, fmt.Errorf("error loading configuration from %s: %w", configPath, err)) - } else { - cfg = cfg.Merge(configFromPath) + t.Fatal(err) } - } - - if errors != nil { - return nil, errors - } - - if err := cfg.ValidateConfig(); err != nil { - return nil, fmt.Errorf("error validating configuration: %w", err) - } - - return cfg, nil -} - -// reloadConfig will attempt to reload the config from file(s) and adjust certain -// config values without requiring a restart of the Vault Agent. -// If config is retrieved without error it is stored in the config field of the AgentCommand. -// This operation is not atomic and could result in updated config but partially applied config settings. -// The error returned from this func may be a multierror. -// This function will most likely be called due to Vault Agent receiving a SIGHUP signal. -// Currently only reloading the following are supported: -// * log level -// * TLS certs for listeners -func (c *AgentCommand) reloadConfig(paths []string) error { - // Notify systemd that the server is reloading - c.notifySystemd(systemd.SdNotifyReloading) - defer c.notifySystemd(systemd.SdNotifyReady) - - var errors error - - // Reload the config - cfg, err := c.loadConfig(paths) - if err != nil { - // Returning single error as we won't continue with bad config and won't 'commit' it. - return err - } - c.config = cfg - - // Update the log level - err = c.reloadLogLevel() - if err != nil { - errors = multierror.Append(errors, err) - } - // Update certs - err = c.reloadCerts() - if err != nil { - errors = multierror.Append(errors, err) - } - - return errors -} - -// reloadLogLevel will attempt to update the log level for the logger attached -// to the AgentComment struct using the value currently set in config. -func (c *AgentCommand) reloadLogLevel() error { - logLevel, err := logging.ParseLogLevel(c.config.LogLevel) - if err != nil { - return err - } - - c.logger.SetLevel(logLevel) - - return nil -} - -// reloadCerts will attempt to reload certificates using a reload func which -// was provided when the listeners were configured, only funcs that were appended -// to the AgentCommand slice will be invoked. -// This function returns a multierror type so that every func can report an error -// if it encounters one. -func (c *AgentCommand) reloadCerts() error { - var errors error - - c.tlsReloadFuncsLock.RLock() - defer c.tlsReloadFuncsLock.RUnlock() - - for _, reloadFunc := range c.tlsReloadFuncs { - // Non-TLS listeners will have a nil reload func. - if reloadFunc != nil { - err := reloadFunc() - if err != nil { - errors = multierror.Append(errors, err) + var backends []string + for _, f := range files { + if f.IsDir() { + backends = append(backends, f.Name()) } } - } - return errors -} + for _, b := range backends { + ui, cmd := testAuditEnableCommand(t) + cmd.client = client -// outputErrors will take an error or multierror and handle outputting each to the UI -func (c *AgentCommand) outputErrors(err error) { - if err != nil { - if me, ok := err.(*multierror.Error); ok { - for _, err := range me.Errors { - c.UI.Error(err.Error()) + args := []string{ + b, + } + switch b { + case "file": + args = append(args, "file_path=discard") + case "socket": + args = append(args, "address=127.0.0.1:8888", + "skip_test=true") + case "syslog": + if _, exists := os.LookupEnv("WSLENV"); exists { + t.Log("skipping syslog test on WSL") + continue + } + if os.Getenv("CIRCLECI") == "true" { + // TODO install syslog in docker image we run our tests in + t.Log("skipping syslog test on CircleCI") + continue + } + } + code := cmd.Run(args) + if exp := 0; code != exp { + t.Errorf("type %s, expected %d to be %d - %s", b, code, exp, ui.OutputWriter.String()+ui.ErrorWriter.String()) } - } else { - c.UI.Error(err.Error()) } - } + }) } diff --git a/command/agent/internal/ctmanager/runner_config.go b/command/agent/internal/ctmanager/runner_config.go index 21b3bc8e82a1..cf3a16f0f079 100644 --- a/command/agent/internal/ctmanager/runner_config.go +++ b/command/agent/internal/ctmanager/runner_config.go @@ -1,152 +1,171 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package ctmanager +package command import ( "fmt" - "io" + "sort" "strings" - ctconfig "github.com/hashicorp/consul-template/config" - ctlogging "github.com/hashicorp/consul-template/logging" - "github.com/hashicorp/go-hclog" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) - "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/sdk/helper/pointerutil" +var ( + _ cli.Command = (*AuditListCommand)(nil) + _ cli.CommandAutocomplete = (*AuditListCommand)(nil) ) -type ManagerConfig struct { - AgentConfig *config.Config - Namespace string - LogLevel hclog.Level - LogWriter io.Writer +type AuditListCommand struct { + *BaseCommand + + flagDetailed bool } -// NewConfig returns a consul-template runner configuration, setting the -// Vault and Consul configurations based on the clients configs. -func NewConfig(mc ManagerConfig, templates ctconfig.TemplateConfigs) (*ctconfig.Config, error) { - conf := ctconfig.DefaultConfig() - conf.Templates = templates.Copy() +func (c *AuditListCommand) Synopsis() string { + return "Lists enabled audit devices" +} - // Setup the Vault config - // Always set these to ensure nothing is picked up from the environment - conf.Vault.RenewToken = pointerutil.BoolPtr(false) - conf.Vault.Token = pointerutil.StringPtr("") - conf.Vault.Address = &mc.AgentConfig.Vault.Address +func (c *AuditListCommand) Help() string { + helpText := ` +Usage: vault audit list [options] - if mc.Namespace != "" { - conf.Vault.Namespace = &mc.Namespace - } + Lists the enabled audit devices in the Vault server. The output lists the + enabled audit devices and the options for those devices. - if mc.AgentConfig.TemplateConfig != nil && mc.AgentConfig.TemplateConfig.StaticSecretRenderInt != 0 { - conf.Vault.DefaultLeaseDuration = &mc.AgentConfig.TemplateConfig.StaticSecretRenderInt - } + List all audit devices: + + $ vault audit list + + List detailed output about the audit devices: + + $ vault audit list -detailed + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuditListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + EnvVar: "", + Usage: "Print detailed information such as options and replication " + + "status about each auth device.", + }) + + return set +} + +func (c *AuditListCommand) AutocompleteArgs() complete.Predictor { + return nil +} - if mc.AgentConfig.DisableIdleConnsTemplating { - idleConns := -1 - conf.Vault.Transport.MaxIdleConns = &idleConns +func (c *AuditListCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuditListCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 } - if mc.AgentConfig.DisableKeepAlivesTemplating { - conf.Vault.Transport.DisableKeepAlives = pointerutil.BoolPtr(true) + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 } - conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(false), - Verify: pointerutil.BoolPtr(false), - Cert: pointerutil.StringPtr(""), - Key: pointerutil.StringPtr(""), - CaCert: pointerutil.StringPtr(""), - CaPath: pointerutil.StringPtr(""), - ServerName: pointerutil.StringPtr(""), + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 } - // If Vault.Retry isn't specified, use the default of 12 retries. - // This retry value will be respected regardless of if we use the cache. - attempts := ctconfig.DefaultRetryAttempts - if mc.AgentConfig.Vault != nil && mc.AgentConfig.Vault.Retry != nil { - attempts = mc.AgentConfig.Vault.Retry.NumRetries + audits, err := client.Sys().ListAudit() + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing audits: %s", err)) + return 2 } - // Use the cache if available or fallback to the Vault server values. - if mc.AgentConfig.Cache != nil { - if mc.AgentConfig.Cache.InProcDialer == nil { - return nil, fmt.Errorf("missing in-process dialer configuration") + switch Format(c.UI) { + case "table": + if len(audits) == 0 { + c.UI.Output("No audit devices are enabled.") + return 2 } - if conf.Vault.Transport == nil { - conf.Vault.Transport = &ctconfig.TransportConfig{} - } - conf.Vault.Transport.CustomDialer = mc.AgentConfig.Cache.InProcDialer - // The in-process dialer ignores the address passed in, but we're still - // setting it here to override the setting at the top of this function, - // and to prevent the vault/http client from defaulting to https. - conf.Vault.Address = pointerutil.StringPtr("http://127.0.0.1:8200") - } else if strings.HasPrefix(mc.AgentConfig.Vault.Address, "https") || mc.AgentConfig.Vault.CACert != "" { - skipVerify := mc.AgentConfig.Vault.TLSSkipVerify - verify := !skipVerify - conf.Vault.SSL = &ctconfig.SSLConfig{ - Enabled: pointerutil.BoolPtr(true), - Verify: &verify, - Cert: &mc.AgentConfig.Vault.ClientCert, - Key: &mc.AgentConfig.Vault.ClientKey, - CaCert: &mc.AgentConfig.Vault.CACert, - CaPath: &mc.AgentConfig.Vault.CAPath, - ServerName: &mc.AgentConfig.Vault.TLSServerName, + + if c.flagDetailed { + c.UI.Output(tableOutput(c.detailedAudits(audits), nil)) + return 0 } + c.UI.Output(tableOutput(c.simpleAudits(audits), nil)) + return 0 + default: + return OutputData(c.UI, audits) + } +} + +func (c *AuditListCommand) simpleAudits(audits map[string]*api.Audit) []string { + paths := make([]string, 0, len(audits)) + for path := range audits { + paths = append(paths, path) } - enabled := attempts > 0 - conf.Vault.Retry = &ctconfig.RetryConfig{ - Attempts: &attempts, - Enabled: &enabled, + sort.Strings(paths) + + columns := []string{"Path | Type | Description"} + for _, path := range paths { + audit := audits[path] + columns = append(columns, fmt.Sprintf("%s | %s | %s", + audit.Path, + audit.Type, + audit.Description, + )) } - // Sync Consul Template's retry with user set auto-auth initial backoff value. - // This is helpful if Auto Auth cannot get a new token and CT is trying to fetch - // secrets. - if mc.AgentConfig.AutoAuth != nil && mc.AgentConfig.AutoAuth.Method != nil { - if mc.AgentConfig.AutoAuth.Method.MinBackoff > 0 { - conf.Vault.Retry.Backoff = &mc.AgentConfig.AutoAuth.Method.MinBackoff - } + return columns +} - if mc.AgentConfig.AutoAuth.Method.MaxBackoff > 0 { - conf.Vault.Retry.MaxBackoff = &mc.AgentConfig.AutoAuth.Method.MaxBackoff - } +func (c *AuditListCommand) detailedAudits(audits map[string]*api.Audit) []string { + paths := make([]string, 0, len(audits)) + for path := range audits { + paths = append(paths, path) } + sort.Strings(paths) - conf.Finalize() + columns := []string{"Path | Type | Description | Replication | Options"} + for _, path := range paths { + audit := audits[path] - // setup log level from TemplateServer config - conf.LogLevel = logLevelToStringPtr(mc.LogLevel) + opts := make([]string, 0, len(audit.Options)) + for k, v := range audit.Options { + opts = append(opts, k+"="+v) + } - if err := ctlogging.Setup(&ctlogging.Config{ - Level: *conf.LogLevel, - Writer: mc.LogWriter, - }); err != nil { - return nil, err - } - return conf, nil -} + replication := "replicated" + if audit.Local { + replication = "local" + } -// logLevelToString converts a go-hclog level to a matching, uppercase string -// value. It's used to convert Vault Agent's hclog level to a string version -// suitable for use in Consul Template's runner configuration input. -func logLevelToStringPtr(level hclog.Level) *string { - // consul template's default level is WARN, but Vault Agent's default is INFO, - // so we use that for the Runner's default. - var levelStr string - - switch level { - case hclog.Trace: - levelStr = "TRACE" - case hclog.Debug: - levelStr = "DEBUG" - case hclog.Warn: - levelStr = "WARN" - case hclog.Error: - levelStr = "ERR" - default: - levelStr = "INFO" + columns = append(columns, fmt.Sprintf("%s | %s | %s | %s | %s", + path, + audit.Type, + audit.Description, + replication, + strings.Join(opts, " "), + )) } - return pointerutil.StringPtr(levelStr) + + return columns } diff --git a/command/agent_generate_config.go b/command/agent_generate_config.go index f2ff1abf9559..43ddbacf91f2 100644 --- a/command/agent_generate_config.go +++ b/command/agent_generate_config.go @@ -4,439 +4,111 @@ package command import ( - "context" - "fmt" - "io" - "os" - paths "path" - "sort" "strings" - "unicode" + "testing" - "github.com/hashicorp/hcl/v2/gohcl" - "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/mitchellh/go-homedir" - "github.com/posener/complete" ) -var ( - _ cli.Command = (*AgentGenerateConfigCommand)(nil) - _ cli.CommandAutocomplete = (*AgentGenerateConfigCommand)(nil) -) - -type AgentGenerateConfigCommand struct { - *BaseCommand - - flagType string - flagPaths []string - flagExec string -} - -func (c *AgentGenerateConfigCommand) Synopsis() string { - return "Generate a Vault Agent configuration file." -} - -func (c *AgentGenerateConfigCommand) Help() string { - helpText := ` -Usage: vault agent generate-config [options] [path/to/config.hcl] - - Generates a simple Vault Agent configuration file from the given parameters. - - Currently, the only supported configuration type is 'env-template', which - helps you generate a configuration file with environment variable templates - for running Vault Agent in process supervisor mode. - - For every specified secret -path, the command will attempt to generate one or - multiple 'env_template' entries based on the JSON key(s) stored in the - specified secret. If the secret -path ends with '/*', the command will - attempt to recurse through the secrets tree rooted at the given path, - generating 'env_template' entries for each encountered secret. Currently, - only kv-v1 and kv-v2 paths are supported. - - The command specified in the '-exec' option will be used to generate an - 'exec' entry, which will tell Vault Agent which child process to run. - - In addition to env_template entries, the command generates an 'auto_auth' - section with 'token_file' authentication method. While this method is very - convenient for local testing, it should NOT be used in production. Please - see https://developer.hashicorp.com/vault/docs/agent-and-proxy/autoauth/methods - for a list of production-ready auto_auth methods that you can use instead. - - By default, the file will be generated in the local directory as 'agent.hcl' - unless a path is specified as an argument. - - Generate a simple environment variable template configuration: - - $ vault agent generate-config -type="env-template" \ - -exec="./my-app arg1 arg2" \ - -path="secret/foo" - - Generate an environment variable template configuration for multiple secrets: - - $ vault agent generate-config -type="env-template" \ - -exec="./my-app arg1 arg2" \ - -path="secret/foo" \ - -path="secret/bar" \ - -path="secret/my-app/*" - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *AgentGenerateConfigCommand) Flags() *FlagSets { - // Include client-modifying flags (-address, -namespace, etc.) - set := c.flagSet(FlagSetHTTP) - - // Common Options - f := set.NewFlagSet("Command Options") - - f.StringVar(&StringVar{ - Name: "type", - Target: &c.flagType, - Usage: "Type of configuration file to generate; currently, only 'env-template' is supported.", - Completion: complete.PredictSet( - "env-template", - ), - }) - - f.StringSliceVar(&StringSliceVar{ - Name: "path", - Target: &c.flagPaths, - Usage: "Path to a kv-v1 or kv-v2 secret (e.g. secret/data/foo, kv-v2/prefix/*); multiple secrets and tail '*' wildcards are allowed.", - Completion: c.PredictVaultFolders(), - }) - - f.StringVar(&StringVar{ - Name: "exec", - Target: &c.flagExec, - Default: "env", - Usage: "The command to execute in agent process supervisor mode.", - }) - - return set -} - -func (c *AgentGenerateConfigCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictNothing -} +func testAuditListCommand(tb testing.TB) (*cli.MockUi, *AuditListCommand) { + tb.Helper() -func (c *AgentGenerateConfigCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *AgentGenerateConfigCommand) Run(args []string) int { - flags := c.Flags() - - if err := flags.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = flags.Args() - - if len(args) > 1 { - c.UI.Error(fmt.Sprintf("Too many arguments (expected at most 1, got %d)", len(args))) - return 1 - } - - if c.flagType == "" { - c.UI.Error(`Please specify a -type flag; currently only -type="env-template" is supported.`) - return 1 - } - - if c.flagType != "env-template" { - c.UI.Error(fmt.Sprintf(`%q is not a supported configuration type; currently only -type="env-template" is supported.`, c.flagType)) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - config, err := generateConfiguration(context.Background(), client, c.flagExec, c.flagPaths) - if err != nil { - c.UI.Error(fmt.Sprintf("Error: %v", err)) - return 2 - } - - var configPath string - if len(args) == 1 { - configPath = args[0] - } else { - configPath = "agent.hcl" - } - - f, err := os.Create(configPath) - if err != nil { - c.UI.Error(fmt.Sprintf("Could not create configuration file %q: %v", configPath, err)) - return 3 - } - defer func() { - if err := f.Close(); err != nil { - c.UI.Error(fmt.Sprintf("Could not close configuration file %q: %v", configPath, err)) - } - }() - - if _, err := config.WriteTo(f); err != nil { - c.UI.Error(fmt.Sprintf("Could not write to configuration file %q: %v", configPath, err)) - return 3 + ui := cli.NewMockUi() + return ui, &AuditListCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, } - - c.UI.Info(fmt.Sprintf("Successfully generated %q configuration file!", configPath)) - - c.UI.Warn("Warning: the generated file uses 'token_file' authentication method, which is not suitable for production environments.") - - return 0 } -func generateConfiguration(ctx context.Context, client *api.Client, flagExec string, flagPaths []string) (io.WriterTo, error) { - var execCommand []string - if flagExec != "" { - execCommand = strings.Split(flagExec, " ") - } else { - execCommand = []string{"env"} - } - - tokenPath, err := homedir.Expand("~/.vault-token") - if err != nil { - return nil, fmt.Errorf("could not expand home directory: %w", err) - } +func TestAuditListCommand_Run(t *testing.T) { + t.Parallel() - templates, err := constructTemplates(ctx, client, flagPaths) - if err != nil { - return nil, fmt.Errorf("could not generate templates: %w", err) - } - - config := generatedConfig{ - AutoAuth: generatedConfigAutoAuth{ - Method: generatedConfigAutoAuthMethod{ - Type: "token_file", - Config: generatedConfigAutoAuthMethodConfig{ - TokenFilePath: tokenPath, - }, - }, - }, - TemplateConfig: generatedConfigTemplateConfig{ - StaticSecretRenderInterval: "5m", - ExitOnRetryFailure: true, + cases := []struct { + name string + args []string + out string + code int + }{ + { + "too_many_args", + []string{"foo"}, + "Too many arguments", + 1, }, - Vault: generatedConfigVault{ - Address: client.Address(), + { + "lists", + nil, + "Path", + 0, }, - Exec: generatedConfigExec{ - Command: execCommand, - RestartOnSecretChanges: "always", - RestartStopSignal: "SIGTERM", + { + "detailed", + []string{"-detailed"}, + "Options", + 0, }, - EnvTemplates: templates, } - contents := hclwrite.NewEmptyFile() + for _, tc := range cases { + tc := tc - gohcl.EncodeIntoBody(&config, contents.Body()) - - return contents, nil -} + t.Run(tc.name, func(t *testing.T) { + t.Parallel() -func constructTemplates(ctx context.Context, client *api.Client, paths []string) ([]generatedConfigEnvTemplate, error) { - var templates []generatedConfigEnvTemplate + client, closer := testVaultServer(t) + defer closer() - for _, path := range paths { - path = sanitizePath(path) + if err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ + Type: "file", + Options: map[string]string{ + "file_path": "discard", + }, + }); err != nil { + t.Fatal(err) + } - mountPath, v2, err := isKVv2(path, client) - if err != nil { - return nil, fmt.Errorf("could not validate secret path %q: %w", path, err) - } + ui, cmd := testAuditListCommand(t) + cmd.client = client - switch { - case strings.HasSuffix(path, "/*"): - // this path contains a tail wildcard, attempt to walk the tree - t, err := constructTemplatesFromTree(ctx, client, path[:len(path)-2], mountPath, v2) - if err != nil { - return nil, fmt.Errorf("could not traverse sercet at %q: %w", path, err) + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) } - templates = append(templates, t...) - - case strings.Contains(path, "*"): - // don't allow any other wildcards - return nil, fmt.Errorf("the path %q cannot contain '*' wildcard characters except as the last element of the path", path) - default: - // regular secret path - t, err := constructTemplatesFromSecret(ctx, client, path, mountPath, v2) - if err != nil { - return nil, fmt.Errorf("could not read secret at %q: %v", path, err) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) } - templates = append(templates, t...) - } + }) } - return templates, nil -} + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() -func constructTemplatesFromTree(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { - var templates []generatedConfigEnvTemplate + client, closer := testVaultServerBad(t) + defer closer() - if v2 { - metadataPath := strings.Replace( - path, - paths.Join(mountPath, "data"), - paths.Join(mountPath, "metadata"), - 1, - ) - if path != metadataPath { - path = metadataPath - } else { - path = addPrefixToKVPath(path, mountPath, "metadata", true) - } - } + ui, cmd := testAuditListCommand(t) + cmd.client = client - err := walkSecretsTree(ctx, client, path, func(child string, directory bool) error { - if directory { - return nil + code := cmd.Run([]string{}) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) } - dataPath := strings.Replace( - child, - paths.Join(mountPath, "metadata"), - paths.Join(mountPath, "data"), - 1, - ) - - t, err := constructTemplatesFromSecret(ctx, client, dataPath, mountPath, v2) - if err != nil { - return err + expected := "Error listing audits: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) } - templates = append(templates, t...) - - return nil }) - if err != nil { - return nil, err - } - - return templates, nil -} - -func constructTemplatesFromSecret(ctx context.Context, client *api.Client, path, mountPath string, v2 bool) ([]generatedConfigEnvTemplate, error) { - var templates []generatedConfigEnvTemplate - - if v2 { - path = addPrefixToKVPath(path, mountPath, "data", true) - } - - resp, err := client.Logical().ReadWithContext(ctx, path) - if err != nil { - return nil, fmt.Errorf("error querying: %w", err) - } - if resp == nil { - return nil, fmt.Errorf("secret not found") - } - - var data map[string]interface{} - if v2 { - internal, ok := resp.Data["data"] - if !ok { - return nil, fmt.Errorf("secret.Data not found") - } - data = internal.(map[string]interface{}) - } else { - data = resp.Data - } - - fields := make([]string, 0, len(data)) - - for field := range data { - fields = append(fields, field) - } - - // sort for a deterministic output - sort.Strings(fields) - - var dataContents string - if v2 { - dataContents = ".Data.data" - } else { - dataContents = ".Data" - } - - for _, field := range fields { - templates = append(templates, generatedConfigEnvTemplate{ - Name: constructDefaultEnvironmentKey(path, field), - Contents: fmt.Sprintf(`{{ with secret "%s" }}{{ %s.%s }}{{ end }}`, path, dataContents, field), - ErrorOnMissingKey: true, - }) - } - - return templates, nil -} - -func constructDefaultEnvironmentKey(path string, field string) string { - pathParts := strings.Split(path, "/") - pathPartsLast := pathParts[len(pathParts)-1] - - notLetterOrNumber := func(r rune) bool { - return !unicode.IsLetter(r) && !unicode.IsNumber(r) - } - - p1 := strings.FieldsFunc(pathPartsLast, notLetterOrNumber) - p2 := strings.FieldsFunc(field, notLetterOrNumber) - - keyParts := append(p1, p2...) - - return strings.ToUpper(strings.Join(keyParts, "_")) -} -// Below, we are redefining a subset of the configuration-related structures -// defined under command/agent/config. Using these structures we can tailor the -// output of the generated config, while using the original structures would -// have produced an HCL document with many empty fields. The structures below -// should not be used for anything other than generation. + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() -type generatedConfig struct { - AutoAuth generatedConfigAutoAuth `hcl:"auto_auth,block"` - TemplateConfig generatedConfigTemplateConfig `hcl:"template_config,block"` - Vault generatedConfigVault `hcl:"vault,block"` - EnvTemplates []generatedConfigEnvTemplate `hcl:"env_template,block"` - Exec generatedConfigExec `hcl:"exec,block"` -} - -type generatedConfigTemplateConfig struct { - StaticSecretRenderInterval string `hcl:"static_secret_render_interval"` - ExitOnRetryFailure bool `hcl:"exit_on_retry_failure"` -} - -type generatedConfigExec struct { - Command []string `hcl:"command"` - RestartOnSecretChanges string `hcl:"restart_on_secret_changes"` - RestartStopSignal string `hcl:"restart_stop_signal"` -} - -type generatedConfigEnvTemplate struct { - Name string `hcl:"name,label"` - Contents string `hcl:"contents,attr"` - ErrorOnMissingKey bool `hcl:"error_on_missing_key"` -} - -type generatedConfigVault struct { - Address string `hcl:"address"` -} - -type generatedConfigAutoAuth struct { - Method generatedConfigAutoAuthMethod `hcl:"method,block"` -} - -type generatedConfigAutoAuthMethod struct { - Type string `hcl:"type"` - Config generatedConfigAutoAuthMethodConfig `hcl:"config,block"` -} - -type generatedConfigAutoAuthMethodConfig struct { - TokenFilePath string `hcl:"token_file_path"` + _, cmd := testAuditListCommand(t) + assertNoTabs(t, cmd) + }) } diff --git a/command/agent_test.go b/command/agent_test.go index d3868d43a114..ace2085cfb28 100644 --- a/command/agent_test.go +++ b/command/agent_test.go @@ -1,3086 +1,33 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "net" - "net/http" - "os" - "path/filepath" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/hashicorp/go-hclog" - vaultjwt "github.com/hashicorp/vault-plugin-auth-jwt" - logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" - "github.com/hashicorp/vault/api" - credAppRole "github.com/hashicorp/vault/builtin/credential/approle" - "github.com/hashicorp/vault/command/agent" - agentConfig "github.com/hashicorp/vault/command/agent/config" - "github.com/hashicorp/vault/helper/testhelpers/minimal" - "github.com/hashicorp/vault/helper/useragent" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/sdk/helper/pointerutil" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - BasicHclConfig = ` -log_file = "TMPDIR/juan.log" -log_level="warn" -log_rotate_max_files=2 -log_rotate_bytes=1048576 -vault { - address = "http://127.0.0.1:8200" - retry { - num_retries = 5 - } -} - -listener "tcp" { - address = "127.0.0.1:8100" - tls_disable = false - tls_cert_file = "TMPDIR/reload_cert.pem" - tls_key_file = "TMPDIR/reload_key.pem" -}` - BasicHclConfig2 = ` -log_file = "TMPDIR/juan.log" -log_level="debug" -log_rotate_max_files=-1 -log_rotate_bytes=1048576 -vault { - address = "http://127.0.0.1:8200" - retry { - num_retries = 5 - } -} - -listener "tcp" { - address = "127.0.0.1:8100" - tls_disable = false - tls_cert_file = "TMPDIR/reload_cert.pem" - tls_key_file = "TMPDIR/reload_key.pem" -}` -) - -func testAgentCommand(tb testing.TB, logger hclog.Logger) (*cli.MockUi, *AgentCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &AgentCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - logger: logger, - startedCh: make(chan struct{}, 5), - reloadedCh: make(chan struct{}, 5), - } -} - -func TestAgent_ExitAfterAuth(t *testing.T) { - t.Run("via_config", func(t *testing.T) { - testAgentExitAfterAuth(t, false) - }) - - t.Run("via_flag", func(t *testing.T) { - testAgentExitAfterAuth(t, true) - }) -} - -func testAgentExitAfterAuth(t *testing.T, viaFlag bool) { - logger := logging.NewVaultLogger(hclog.Trace) - coreConfig := &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "jwt": vaultjwt.Factory, - }, - } - cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - client := cluster.Cores[0].Client - - // Setup Vault - err := client.Sys().EnableAuthWithOptions("jwt", &api.EnableAuthOptions{ - Type: "jwt", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("auth/jwt/config", map[string]interface{}{ - "bound_issuer": "https://team-vault.auth0.com/", - "jwt_validation_pubkeys": agent.TestECDSAPubKey, - "jwt_supported_algs": "ES256", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("auth/jwt/role/test", map[string]interface{}{ - "role_type": "jwt", - "bound_subject": "r3qXcK2bix9eFECzsU3Sbmh0K16fatW6@clients", - "bound_audiences": "https://vault.plugin.auth.jwt.test", - "user_claim": "https://vault/user", - "groups_claim": "https://vault/groups", - "policies": "test", - "period": "3s", - }) - if err != nil { - t.Fatal(err) - } - - inf, err := os.CreateTemp("", "auth.jwt.test.") - if err != nil { - t.Fatal(err) - } - in := inf.Name() - inf.Close() - os.Remove(in) - t.Logf("input: %s", in) - - sink1f, err := os.CreateTemp("", "sink1.jwt.test.") - if err != nil { - t.Fatal(err) - } - sink1 := sink1f.Name() - sink1f.Close() - os.Remove(sink1) - t.Logf("sink1: %s", sink1) - - sink2f, err := os.CreateTemp("", "sink2.jwt.test.") - if err != nil { - t.Fatal(err) - } - sink2 := sink2f.Name() - sink2f.Close() - os.Remove(sink2) - t.Logf("sink2: %s", sink2) - - conff, err := os.CreateTemp("", "conf.jwt.test.") - if err != nil { - t.Fatal(err) - } - conf := conff.Name() - conff.Close() - os.Remove(conf) - t.Logf("config: %s", conf) - - jwtToken, _ := agent.GetTestJWT(t) - if err := os.WriteFile(in, []byte(jwtToken), 0o600); err != nil { - t.Fatal(err) - } else { - logger.Trace("wrote test jwt", "path", in) - } - - exitAfterAuthTemplText := "exit_after_auth = true" - if viaFlag { - exitAfterAuthTemplText = "" - } - - config := ` -%s - -auto_auth { - method { - type = "jwt" - config = { - role = "test" - path = "%s" - } - } - - sink { - type = "file" - config = { - path = "%s" - } - } - - sink "file" { - config = { - path = "%s" - } - } -} -` - - config = fmt.Sprintf(config, exitAfterAuthTemplText, in, sink1, sink2) - if err := os.WriteFile(conf, []byte(config), 0o600); err != nil { - t.Fatal(err) - } else { - logger.Trace("wrote test config", "path", conf) - } - - doneCh := make(chan struct{}) - go func() { - ui, cmd := testAgentCommand(t, logger) - cmd.client = client - - args := []string{"-config", conf} - if viaFlag { - args = append(args, "-exit-after-auth") - } - - code := cmd.Run(args) - if code != 0 { - t.Errorf("expected %d to be %d", code, 0) - t.Logf("output from agent:\n%s", ui.OutputWriter.String()) - t.Logf("error from agent:\n%s", ui.ErrorWriter.String()) - } - close(doneCh) - }() - - select { - case <-doneCh: - break - case <-time.After(1 * time.Minute): - t.Fatal("timeout reached while waiting for agent to exit") - } - - sink1Bytes, err := os.ReadFile(sink1) - if err != nil { - t.Fatal(err) - } - if len(sink1Bytes) == 0 { - t.Fatal("got no output from sink 1") - } - - sink2Bytes, err := os.ReadFile(sink2) - if err != nil { - t.Fatal(err) - } - if len(sink2Bytes) == 0 { - t.Fatal("got no output from sink 2") - } - - if string(sink1Bytes) != string(sink2Bytes) { - t.Fatal("sink 1/2 values don't match") - } -} - -func TestAgent_RequireRequestHeader(t *testing.T) { - // newApiClient creates an *api.Client. - newApiClient := func(addr string, includeVaultRequestHeader bool) *api.Client { - conf := api.DefaultConfig() - conf.Address = addr - cli, err := api.NewClient(conf) - if err != nil { - t.Fatalf("err: %s", err) - } - - h := cli.Headers() - val, ok := h[consts.RequestHeaderName] - if !ok || !reflect.DeepEqual(val, []string{"true"}) { - t.Fatalf("invalid %s header", consts.RequestHeaderName) - } - if !includeVaultRequestHeader { - delete(h, consts.RequestHeaderName) - cli.SetHeaders(h) - } - - return cli - } - - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - - // Start a vault server - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - }, - &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Enable the approle auth method - roleIDPath, secretIDPath := setupAppRole(t, serverClient) - - // Create a config file - config := ` -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - } - } -} - -cache { - use_auto_auth_token = true -} - -listener "tcp" { - address = "%s" - tls_disable = true -} -listener "tcp" { - address = "%s" - tls_disable = true - require_request_header = false -} -listener "tcp" { - address = "%s" - tls_disable = true - require_request_header = true -} -` - listenAddr1 := generateListenerAddress(t) - listenAddr2 := generateListenerAddress(t) - listenAddr3 := generateListenerAddress(t) - config = fmt.Sprintf( - config, - roleIDPath, - secretIDPath, - listenAddr1, - listenAddr2, - listenAddr3, - ) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - ui, cmd := testAgentCommand(t, logger) - cmd.client = serverClient - cmd.startedCh = make(chan struct{}) - - var output string - var code int - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - code = cmd.Run([]string{"-config", configPath}) - if code != 0 { - output = ui.ErrorWriter.String() + ui.OutputWriter.String() - } - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Fatalf("timeout") - } - - // defer agent shutdown - defer func() { - cmd.ShutdownCh <- struct{}{} - wg.Wait() - if code != 0 { - t.Fatalf("got a non-zero exit status: %d, stdout/stderr: %s", code, output) - } - }() - - //---------------------------------------------------- - // Perform the tests - //---------------------------------------------------- - - // Test against a listener configuration that omits - // 'require_request_header', with the header missing from the request. - agentClient := newApiClient("http://"+listenAddr1, false) - req := agentClient.NewRequest("GET", "/v1/sys/health") - request(t, agentClient, req, 200) - - // Test against a listener configuration that sets 'require_request_header' - // to 'false', with the header missing from the request. - agentClient = newApiClient("http://"+listenAddr2, false) - req = agentClient.NewRequest("GET", "/v1/sys/health") - request(t, agentClient, req, 200) - - // Test against a listener configuration that sets 'require_request_header' - // to 'true', with the header missing from the request. - agentClient = newApiClient("http://"+listenAddr3, false) - req = agentClient.NewRequest("GET", "/v1/sys/health") - resp, err := agentClient.RawRequest(req) - if err == nil { - t.Fatalf("expected error") - } - if resp.StatusCode != http.StatusPreconditionFailed { - t.Fatalf("expected status code %d, not %d", http.StatusPreconditionFailed, resp.StatusCode) - } - - // Test against a listener configuration that sets 'require_request_header' - // to 'true', with an invalid header present in the request. - agentClient = newApiClient("http://"+listenAddr3, false) - h := agentClient.Headers() - h[consts.RequestHeaderName] = []string{"bogus"} - agentClient.SetHeaders(h) - req = agentClient.NewRequest("GET", "/v1/sys/health") - resp, err = agentClient.RawRequest(req) - if err == nil { - t.Fatalf("expected error") - } - if resp.StatusCode != http.StatusPreconditionFailed { - t.Fatalf("expected status code %d, not %d", http.StatusPreconditionFailed, resp.StatusCode) - } - - // Test against a listener configuration that sets 'require_request_header' - // to 'true', with the proper header present in the request. - agentClient = newApiClient("http://"+listenAddr3, true) - req = agentClient.NewRequest("GET", "/v1/sys/health") - request(t, agentClient, req, 200) -} - -// TestAgent_RequireAutoAuthWithForce ensures that the client exits with a -// non-zero code if configured to force the use of an auto-auth token without -// configuring the auto_auth block -func TestAgent_RequireAutoAuthWithForce(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - // Create a config file - config := fmt.Sprintf(` -cache { - use_auto_auth_token = "force" -} - -listener "tcp" { - address = "%s" - tls_disable = true -} -`, generateListenerAddress(t)) - - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - ui, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - code := cmd.Run([]string{"-config", configPath}) - if code == 0 { - t.Errorf("expected error code, but got 0: %d", code) - t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) - } -} - -// TestAgent_Template_UserAgent Validates that the User-Agent sent to Vault -// as part of Templating requests is correct. Uses the custom handler -// userAgentHandler struct defined in this test package, so that Vault validates the -// User-Agent on requests sent by Agent. -func TestAgent_Template_UserAgent(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - var h userAgentHandler - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.AgentTemplatingString() - h.pathToCheck = "/v1/secret/data" - h.requestMethodToCheck = "GET" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Setenv(api.EnvVaultAddress, serverClient.Address()) - - roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) - - // make a temp directory to hold renders. Each test will create a temp dir - // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDirRoot) - // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, "TestAgent_Template_UserAgent") - if err != nil { - t.Fatal(err) - } - - // make some template files - var templatePaths []string - fileName := filepath.Join(tmpDir, "render_0.tmpl") - if err := os.WriteFile(fileName, []byte(templateContents(0)), 0o600); err != nil { - t.Fatal(err) - } - templatePaths = append(templatePaths, fileName) - - // build up the template config to be added to the Agent config.hcl file - var templateConfigStrings []string - for i, t := range templatePaths { - index := fmt.Sprintf("render_%d.json", i) - s := fmt.Sprintf(templateConfigString, t, tmpDir, index) - templateConfigStrings = append(templateConfigStrings, s) - } - - // Create a config file - config := ` -vault { - address = "%s" - tls_skip_verify = true -} - -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false - } - } -} - -%s -` - - // flatten the template configs - templateConfig := strings.Join(templateConfigStrings, " ") - - config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - ui, cmd := testAgentCommand(t, logger) - cmd.client = serverClient - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - code := cmd.Run([]string{"-config", configPath}) - if code != 0 { - t.Errorf("non-zero return code when running agent: %d", code) - t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) - } - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - // We need to shut down the Agent command - defer func() { - cmd.ShutdownCh <- struct{}{} - wg.Wait() - }() - - verify := func(suffix string) { - t.Helper() - // We need to poll for a bit to give Agent time to render the - // templates. Without this, the test will attempt to read - // the temp dir before Agent has had time to render and will - // likely fail the test - tick := time.Tick(1 * time.Second) - timeout := time.After(10 * time.Second) - var err error - for { - select { - case <-timeout: - t.Fatalf("timed out waiting for templates to render, last error: %v", err) - case <-tick: - } - // Check for files rendered in the directory and break - // early for shutdown if we do have all the files - // rendered - - //---------------------------------------------------- - // Perform the tests - //---------------------------------------------------- - - if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { - err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) - continue - } - - for i := range templatePaths { - fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) - var c []byte - c, err = os.ReadFile(fileName) - if err != nil { - continue - } - if string(c) != templateRendered(i)+suffix { - err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) - continue - } - } - return - } - } - - verify("") - - fileName = filepath.Join(tmpDir, "render_0.tmpl") - if err := os.WriteFile(fileName, []byte(templateContents(0)+"{}"), 0o600); err != nil { - t.Fatal(err) - } - - verify("{}") -} - -// TestAgent_Template tests rendering templates -func TestAgent_Template_Basic(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Setenv(api.EnvVaultAddress, serverClient.Address()) - - roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) - - // make a temp directory to hold renders. Each test will create a temp dir - // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDirRoot) - - // start test cases here - testCases := map[string]struct { - templateCount int - exitAfterAuth bool - }{ - "one": { - templateCount: 1, - }, - "one_with_exit": { - templateCount: 1, - exitAfterAuth: true, - }, - "many": { - templateCount: 15, - }, - "many_with_exit": { - templateCount: 13, - exitAfterAuth: true, - }, - } - - for tcname, tc := range testCases { - t.Run(tcname, func(t *testing.T) { - // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) - if err != nil { - t.Fatal(err) - } - - // make some template files - var templatePaths []string - for i := 0; i < tc.templateCount; i++ { - fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := os.WriteFile(fileName, []byte(templateContents(i)), 0o600); err != nil { - t.Fatal(err) - } - templatePaths = append(templatePaths, fileName) - } - - // build up the template config to be added to the Agent config.hcl file - var templateConfigStrings []string - for i, t := range templatePaths { - index := fmt.Sprintf("render_%d.json", i) - s := fmt.Sprintf(templateConfigString, t, tmpDir, index) - templateConfigStrings = append(templateConfigStrings, s) - } - - // Create a config file - config := ` -vault { - address = "%s" - tls_skip_verify = true -} - -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false - } - } -} - -%s - -%s -` - - // conditionally set the exit_after_auth flag - exitAfterAuth := "" - if tc.exitAfterAuth { - exitAfterAuth = "exit_after_auth = true" - } - - // flatten the template configs - templateConfig := strings.Join(templateConfigStrings, " ") - - config = fmt.Sprintf(config, serverClient.Address(), roleIDPath, secretIDPath, templateConfig, exitAfterAuth) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - ui, cmd := testAgentCommand(t, logger) - cmd.client = serverClient - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - code := cmd.Run([]string{"-config", configPath}) - if code != 0 { - t.Errorf("non-zero return code when running agent: %d", code) - t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) - } - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - // if using exit_after_auth, then the command will have returned at the - // end and no longer be running. If we are not using exit_after_auth, then - // we need to shut down the command - if !tc.exitAfterAuth { - defer func() { - cmd.ShutdownCh <- struct{}{} - wg.Wait() - }() - } - - verify := func(suffix string) { - t.Helper() - // We need to poll for a bit to give Agent time to render the - // templates. Without this this, the test will attempt to read - // the temp dir before Agent has had time to render and will - // likely fail the test - tick := time.Tick(1 * time.Second) - timeout := time.After(10 * time.Second) - var err error - for { - select { - case <-timeout: - t.Fatalf("timed out waiting for templates to render, last error: %v", err) - case <-tick: - } - // Check for files rendered in the directory and break - // early for shutdown if we do have all the files - // rendered - - //---------------------------------------------------- - // Perform the tests - //---------------------------------------------------- - - if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != len(templatePaths) { - err = fmt.Errorf("expected (%d) templates, got (%d)", len(templatePaths), numFiles) - continue - } - - for i := range templatePaths { - fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.json", i)) - var c []byte - c, err = os.ReadFile(fileName) - if err != nil { - continue - } - if string(c) != templateRendered(i)+suffix { - err = fmt.Errorf("expected=%q, got=%q", templateRendered(i)+suffix, string(c)) - continue - } - } - return - } - } - - verify("") - - for i := 0; i < tc.templateCount; i++ { - fileName := filepath.Join(tmpDir, fmt.Sprintf("render_%d.tmpl", i)) - if err := os.WriteFile(fileName, []byte(templateContents(i)+"{}"), 0o600); err != nil { - t.Fatal(err) - } - } - - verify("{}") - }) - } -} - -func setupAppRole(t *testing.T, serverClient *api.Client) (string, string) { - t.Helper() - // Enable the approle auth method - req := serverClient.NewRequest("POST", "/v1/sys/auth/approle") - req.BodyBytes = []byte(`{ - "type": "approle" - }`) - request(t, serverClient, req, 204) - - // Create a named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role") - req.BodyBytes = []byte(`{ - "token_ttl": "5m", - "token_policies":"default,myapp-read", - "policies":"default,myapp-read" - }`) - request(t, serverClient, req, 204) - - // Fetch the RoleID of the named role - req = serverClient.NewRequest("GET", "/v1/auth/approle/role/test-role/role-id") - body := request(t, serverClient, req, 200) - data := body["data"].(map[string]interface{}) - roleID := data["role_id"].(string) - - // Get a SecretID issued against the named role - req = serverClient.NewRequest("PUT", "/v1/auth/approle/role/test-role/secret-id") - body = request(t, serverClient, req, 200) - data = body["data"].(map[string]interface{}) - secretID := data["secret_id"].(string) - - // Write the RoleID and SecretID to temp files - roleIDPath := makeTempFile(t, "role_id.txt", roleID+"\n") - secretIDPath := makeTempFile(t, "secret_id.txt", secretID+"\n") - t.Cleanup(func() { - os.Remove(roleIDPath) - os.Remove(secretIDPath) - }) - - return roleIDPath, secretIDPath -} - -func setupAppRoleAndKVMounts(t *testing.T, serverClient *api.Client) (string, string) { - roleIDPath, secretIDPath := setupAppRole(t, serverClient) - - // give test-role permissions to read the kv secret - req := serverClient.NewRequest("PUT", "/v1/sys/policy/myapp-read") - req.BodyBytes = []byte(`{ - "policy": "path \"secret/*\" { capabilities = [\"read\", \"list\"] }" - }`) - request(t, serverClient, req, 204) - - // setup the kv secrets - req = serverClient.NewRequest("POST", "/v1/sys/mounts/secret/tune") - req.BodyBytes = []byte(`{ - "options": {"version": "2"} - }`) - request(t, serverClient, req, 200) - - // Secret: myapp - req = serverClient.NewRequest("POST", "/v1/secret/data/myapp") - req.BodyBytes = []byte(`{ - "data": { - "username": "bar", - "password": "zap" - } - }`) - request(t, serverClient, req, 200) - - // Secret: myapp2 - req = serverClient.NewRequest("POST", "/v1/secret/data/myapp2") - req.BodyBytes = []byte(`{ - "data": { - "username": "barstuff", - "password": "zap" - } - }`) - request(t, serverClient, req, 200) - - // Secret: otherapp - req = serverClient.NewRequest("POST", "/v1/secret/data/otherapp") - req.BodyBytes = []byte(`{ - "data": { - "username": "barstuff", - "password": "zap", - "cert": "something" - } - }`) - request(t, serverClient, req, 200) - - return roleIDPath, secretIDPath -} - -// TestAgent_Template_VaultClientFromEnv tests that Vault Agent can read in its -// required `vault` client details from environment variables instead of config. -func TestAgent_Template_VaultClientFromEnv(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) - - // make a temp directory to hold renders. Each test will create a temp dir - // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDirRoot) - - vaultAddr := "https://" + cluster.Cores[0].Listeners[0].Address.String() - testCases := map[string]struct { - env map[string]string - }{ - "VAULT_ADDR and VAULT_CACERT": { - env: map[string]string{ - api.EnvVaultAddress: vaultAddr, - api.EnvVaultCACert: cluster.CACertPEMFile, - }, - }, - "VAULT_ADDR and VAULT_CACERT_BYTES": { - env: map[string]string{ - api.EnvVaultAddress: vaultAddr, - api.EnvVaultCACertBytes: string(cluster.CACertPEM), - }, - }, - } - - for tcname, tc := range testCases { - t.Run(tcname, func(t *testing.T) { - for k, v := range tc.env { - t.Setenv(k, v) - } - tmpDir := t.TempDir() - - // Make a template. - templateFile := filepath.Join(tmpDir, "render.tmpl") - if err := os.WriteFile(templateFile, []byte(templateContents(0)), 0o600); err != nil { - t.Fatal(err) - } - - // build up the template config to be added to the Agent config.hcl file - targetFile := filepath.Join(tmpDir, "render.json") - templateConfig := fmt.Sprintf(` -template { - source = "%s" - destination = "%s" -} - `, templateFile, targetFile) - - // Create a config file - config := ` -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false - } - } -} - -%s -` - - config = fmt.Sprintf(config, roleIDPath, secretIDPath, templateConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - ui, cmd := testAgentCommand(t, logger) - cmd.client = serverClient - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - code := cmd.Run([]string{"-config", configPath}) - if code != 0 { - t.Errorf("non-zero return code when running agent: %d", code) - t.Logf("STDOUT from agent:\n%s", ui.OutputWriter.String()) - t.Logf("STDERR from agent:\n%s", ui.ErrorWriter.String()) - } - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - defer func() { - cmd.ShutdownCh <- struct{}{} - wg.Wait() - }() - - // We need to poll for a bit to give Agent time to render the - // templates. Without this this, the test will attempt to read - // the temp dir before Agent has had time to render and will - // likely fail the test - tick := time.Tick(1 * time.Second) - timeout := time.After(10 * time.Second) - for { - select { - case <-timeout: - t.Fatalf("timed out waiting for templates to render, last error: %v", err) - case <-tick: - } - - contents, err := os.ReadFile(targetFile) - if err != nil { - // If the file simply doesn't exist, continue waiting for - // the template rendering to complete. - if os.IsNotExist(err) { - continue - } - t.Fatal(err) - } - - if string(contents) != templateRendered(0) { - t.Fatalf("expected=%q, got=%q", templateRendered(0), string(contents)) - } - - // Success! Break out of the retry loop. - break - } - }) - } -} - -func testListFiles(t *testing.T, dir, extension string) int { - t.Helper() - - files, err := os.ReadDir(dir) - if err != nil { - t.Fatal(err) - } - var count int - for _, f := range files { - if filepath.Ext(f.Name()) == extension { - count++ - } - } - - return count -} - -// TestAgent_Template_ExitCounter tests that Vault Agent correctly renders all -// templates before exiting when the configuration uses exit_after_auth. This is -// similar to TestAgent_Template_Basic, but differs by using a consistent number -// of secrets from multiple sources, where as the basic test could possibly -// generate a random number of secrets, but all using the same source. This test -// reproduces https://github.com/hashicorp/vault/issues/7883 -func TestAgent_Template_ExitCounter(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Setenv(api.EnvVaultAddress, serverClient.Address()) - - roleIDPath, secretIDPath := setupAppRoleAndKVMounts(t, serverClient) - - // make a temp directory to hold renders. Each test will create a temp dir - // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDirRoot) - - // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, "agent-test") - if err != nil { - t.Fatal(err) - } - - // Create a config file - config := ` -vault { - address = "%s" - tls_skip_verify = true -} - -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false - } - } -} - -template { - contents = "{{ with secret \"secret/myapp\" }}{{ range $k, $v := .Data.data }}{{ $v }}{{ end }}{{ end }}" - destination = "%s/render-pass.txt" -} - -template { - contents = "{{ with secret \"secret/myapp2\" }}{{ .Data.data.username}}{{ end }}" - destination = "%s/render-user.txt" -} - -template { - contents = < 0 { - h.failCount-- - h.t.Logf("%s failing GET request on %s, failures left: %d", time.Now(), req.URL.Path, h.failCount) - resp.WriteHeader(500) - return - } - h.t.Logf("passing GET request on %s", req.URL.Path) - } - vaulthttp.Handler.Handler(h.props).ServeHTTP(resp, req) -} - -// userAgentHandler makes it easy to test the User-Agent header received -// by Vault -type userAgentHandler struct { - props *vault.HandlerProperties - failCount int - userAgentToCheckFor string - pathToCheck string - requestMethodToCheck string - t *testing.T -} - -func (h *userAgentHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if req.Method == h.requestMethodToCheck && strings.Contains(req.RequestURI, h.pathToCheck) { - userAgent := req.UserAgent() - if !(userAgent == h.userAgentToCheckFor) { - h.t.Fatalf("User-Agent string not as expected. Expected to find %s, got %s", h.userAgentToCheckFor, userAgent) - } - } - vaulthttp.Handler.Handler(h.props).ServeHTTP(w, req) -} - -// TestAgent_Template_Retry verifies that the template server retries requests -// based on retry configuration. -func TestAgent_Template_Retry(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - var h handler - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - methodConf, cleanup := prepAgentApproleKV(t, serverClient) - defer cleanup() - - err := serverClient.Sys().TuneMount("secret", api.MountConfigInput{ - Options: map[string]string{ - "version": "2", - }, - }) - if err != nil { - t.Fatal(err) - } - - _, err = serverClient.Logical().Write("secret/data/otherapp", map[string]interface{}{ - "data": map[string]interface{}{ - "username": "barstuff", - "password": "zap", - "cert": "something", - }, - }) - if err != nil { - t.Fatal(err) - } - - // make a temp directory to hold renders. Each test will create a temp dir - // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDirRoot) - - intRef := func(i int) *int { - return &i - } - // start test cases here - testCases := map[string]struct { - retries *int - expectError bool - }{ - "none": { - retries: intRef(-1), - expectError: true, - }, - "one": { - retries: intRef(1), - expectError: true, - }, - "two": { - retries: intRef(2), - expectError: false, - }, - "missing": { - retries: nil, - expectError: false, - }, - "default": { - retries: intRef(0), - expectError: false, - }, - } - - for tcname, tc := range testCases { - t.Run(tcname, func(t *testing.T) { - // We fail the first 6 times. The consul-template code creates - // a Vault client with MaxRetries=2, so for every consul-template - // retry configured, it will in practice make up to 3 requests. - // Thus if consul-template is configured with "one" retry, it will - // fail given our failCount, but if configured with "two" retries, - // they will consume our 6th failure, and on the "third (from its - // perspective) attempt, it will succeed. - h.failCount = 6 - - // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, tcname) - if err != nil { - t.Fatal(err) - } - - // make some template files - templatePath := filepath.Join(tmpDir, "render_0.tmpl") - if err := os.WriteFile(templatePath, []byte(templateContents(0)), 0o600); err != nil { - t.Fatal(err) - } - templateConfig := fmt.Sprintf(templateConfigString, templatePath, tmpDir, "render_0.json") - - var retryConf string - if tc.retries != nil { - retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) - } - - config := fmt.Sprintf(` -%s -vault { - address = "%s" - %s - tls_skip_verify = true -} -%s -template_config { - exit_on_retry_failure = true -} -`, methodConf, serverClient.Address(), retryConf, templateConfig) - - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - var code int - go func() { - code = cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - verify := func() error { - t.Helper() - // We need to poll for a bit to give Agent time to render the - // templates. Without this this, the test will attempt to read - // the temp dir before Agent has had time to render and will - // likely fail the test - tick := time.Tick(1 * time.Second) - timeout := time.After(15 * time.Second) - var err error - for { - select { - case <-timeout: - return fmt.Errorf("timed out waiting for templates to render, last error: %v", err) - case <-tick: - } - // Check for files rendered in the directory and break - // early for shutdown if we do have all the files - // rendered - - //---------------------------------------------------- - // Perform the tests - //---------------------------------------------------- - - if numFiles := testListFiles(t, tmpDir, ".json"); numFiles != 1 { - err = fmt.Errorf("expected 1 template, got (%d)", numFiles) - continue - } - - fileName := filepath.Join(tmpDir, "render_0.json") - var c []byte - c, err = os.ReadFile(fileName) - if err != nil { - continue - } - if string(c) != templateRendered(0) { - err = fmt.Errorf("expected=%q, got=%q", templateRendered(0), string(c)) - continue - } - return nil - } - } - - err = verify() - close(cmd.ShutdownCh) - wg.Wait() - - switch { - case (code != 0 || err != nil) && tc.expectError: - case code == 0 && err == nil && !tc.expectError: - default: - t.Fatalf("%s expectError=%v error=%v code=%d", tcname, tc.expectError, err, code) - } - }) - } -} - -// prepAgentApproleKV configures a Vault instance for approle authentication, -// such that the resulting token will have global permissions across /kv -// and /secret mounts. Returns the auto_auth config stanza to setup an Agent -// to connect using approle. -func prepAgentApproleKV(t *testing.T, client *api.Client) (string, func()) { - t.Helper() - - policyAutoAuthAppRole := ` -path "/kv/*" { - capabilities = ["create", "read", "update", "delete", "list"] -} -path "/secret/*" { - capabilities = ["create", "read", "update", "delete", "list"] -} -` - // Add an kv-admin policy - if err := client.Sys().PutPolicy("test-autoauth", policyAutoAuthAppRole); err != nil { - t.Fatal(err) - } - - // Enable approle - err := client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{ - Type: "approle", - }) - if err != nil { - t.Fatal(err) - } - - _, err = client.Logical().Write("auth/approle/role/test1", map[string]interface{}{ - "bind_secret_id": "true", - "token_ttl": "1h", - "token_max_ttl": "2h", - "policies": []string{"test-autoauth"}, - }) - if err != nil { - t.Fatal(err) - } - - resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil) - if err != nil { - t.Fatal(err) - } - secretID := resp.Data["secret_id"].(string) - secretIDFile := makeTempFile(t, "secret_id.txt", secretID+"\n") - - resp, err = client.Logical().Read("auth/approle/role/test1/role-id") - if err != nil { - t.Fatal(err) - } - roleID := resp.Data["role_id"].(string) - roleIDFile := makeTempFile(t, "role_id.txt", roleID+"\n") - - config := fmt.Sprintf(` -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - remove_secret_id_file_after_reading = false - } - } -} -`, roleIDFile, secretIDFile) - - cleanup := func() { - _ = os.Remove(roleIDFile) - _ = os.Remove(secretIDFile) - } - return config, cleanup -} - -// TestAgent_AutoAuth_UserAgent tests that the User-Agent sent -// to Vault by Vault Agent is correct when performing Auto-Auth. -// Uses the custom handler userAgentHandler (defined above) so -// that Vault validates the User-Agent on requests sent by Agent. -func TestAgent_AutoAuth_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - var h userAgentHandler - cluster := vault.NewTestCluster(t, &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - }, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.AgentAutoAuthString() - h.requestMethodToCheck = "PUT" - h.pathToCheck = "auth/approle/login" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Enable the approle auth method - roleIDPath, secretIDPath := setupAppRole(t, serverClient) - - sinkf, err := os.CreateTemp("", "sink.test.") - if err != nil { - t.Fatal(err) - } - sink := sinkf.Name() - sinkf.Close() - os.Remove(sink) - - autoAuthConfig := fmt.Sprintf(` -auto_auth { - method "approle" { - mount_path = "auth/approle" - config = { - role_id_file_path = "%s" - secret_id_file_path = "%s" - } - } - - sink "file" { - config = { - path = "%s" - } - } -}`, roleIDPath, secretIDPath, sink) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -api_proxy { - use_auto_auth_token = true -} -%s -%s -`, serverClient.Address(), listenConfig, autoAuthConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - // Validate that the auto-auth token has been correctly attained - // and works for LookupSelf - conf := api.DefaultConfig() - conf.Address = "http://" + listenAddr - agentClient, err := api.NewClient(conf) - if err != nil { - t.Fatalf("err: %s", err) - } - - agentClient.SetToken("") - err = agentClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - // Wait for the token to be sent to syncs and be available to be used - time.Sleep(5 * time.Second) - - req := agentClient.NewRequest("GET", "/v1/auth/token/lookup-self") - request(t, agentClient, req, 200) - - close(cmd.ShutdownCh) - wg.Wait() -} - -// TestAgent_APIProxyWithoutCache_UserAgent tests that the User-Agent sent -// to Vault by Vault Agent is correct using the API proxy without -// the cache configured. Uses the custom handler -// userAgentHandler struct defined in this test package, so that Vault validates the -// User-Agent on requests sent by Agent. -func TestAgent_APIProxyWithoutCache_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - userAgentForProxiedClient := "proxied-client" - var h userAgentHandler - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) - h.pathToCheck = "/v1/auth/token/lookup-self" - h.requestMethodToCheck = "GET" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -`, serverClient.Address(), listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - agentClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - agentClient.AddHeader("User-Agent", userAgentForProxiedClient) - agentClient.SetToken(serverClient.Token()) - agentClient.SetMaxRetries(0) - err = agentClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - _, err = agentClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - - close(cmd.ShutdownCh) - wg.Wait() -} - -// TestAgent_APIProxyWithCache_UserAgent tests that the User-Agent sent -// to Vault by Vault Agent is correct using the API proxy with -// the cache configured. Uses the custom handler -// userAgentHandler struct defined in this test package, so that Vault validates the -// User-Agent on requests sent by Agent. -func TestAgent_APIProxyWithCache_UserAgent(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - userAgentForProxiedClient := "proxied-client" - var h userAgentHandler - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc( - func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.userAgentToCheckFor = useragent.AgentProxyStringWithProxiedUserAgent(userAgentForProxiedClient) - h.pathToCheck = "/v1/auth/token/lookup-self" - h.requestMethodToCheck = "GET" - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - cacheConfig := ` -cache { -}` - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -%s -`, serverClient.Address(), listenConfig, cacheConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - agentClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - agentClient.AddHeader("User-Agent", userAgentForProxiedClient) - agentClient.SetToken(serverClient.Token()) - agentClient.SetMaxRetries(0) - err = agentClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - _, err = agentClient.Auth().Token().LookupSelf() - if err != nil { - t.Fatal(err) - } - - close(cmd.ShutdownCh) - wg.Wait() -} - -func TestAgent_Cache_DynamicSecret(t *testing.T) { - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - cacheConfig := ` -cache { -} -` - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - config := fmt.Sprintf(` -vault { - address = "%s" - tls_skip_verify = true -} -%s -%s -`, serverClient.Address(), cacheConfig, listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - agentClient, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - agentClient.SetToken(serverClient.Token()) - agentClient.SetMaxRetries(0) - err = agentClient.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - - renewable := true - tokenCreateRequest := &api.TokenCreateRequest{ - Policies: []string{"default"}, - TTL: "30m", - Renewable: &renewable, - } - - // This was the simplest test I could find to trigger the caching behaviour, - // i.e. the most concise I could make the test that I can tell - // creating an orphan token returns Auth, is renewable, and isn't a token - // that's managed elsewhere (since it's an orphan) - secret, err := agentClient.Auth().Token().CreateOrphan(tokenCreateRequest) - if err != nil { - t.Fatal(err) - } - if secret == nil || secret.Auth == nil { - t.Fatalf("secret not as expected: %v", secret) - } - - token := secret.Auth.ClientToken - - secret, err = agentClient.Auth().Token().CreateOrphan(tokenCreateRequest) - if err != nil { - t.Fatal(err) - } - if secret == nil || secret.Auth == nil { - t.Fatalf("secret not as expected: %v", secret) - } - - token2 := secret.Auth.ClientToken - - if token != token2 { - t.Fatalf("token create response not cached when it should have been, as tokens differ") - } - - close(cmd.ShutdownCh) - wg.Wait() -} - -func TestAgent_ApiProxy_Retry(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - var h handler - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.HandlerFunc(func(properties *vault.HandlerProperties) http.Handler { - h.props = properties - h.t = t - return &h - }), - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - _, err := serverClient.Logical().Write("secret/foo", map[string]interface{}{ - "bar": "baz", - }) - if err != nil { - t.Fatal(err) - } - - intRef := func(i int) *int { - return &i - } - // start test cases here - testCases := map[string]struct { - retries *int - expectError bool - }{ - "none": { - retries: intRef(-1), - expectError: true, - }, - "one": { - retries: intRef(1), - expectError: true, - }, - "two": { - retries: intRef(2), - expectError: false, - }, - "missing": { - retries: nil, - expectError: false, - }, - "default": { - retries: intRef(0), - expectError: false, - }, - } - - for tcname, tc := range testCases { - t.Run(tcname, func(t *testing.T) { - h.failCount = 2 - - cacheConfig := ` -cache { -} -` - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - var retryConf string - if tc.retries != nil { - retryConf = fmt.Sprintf("retry { num_retries = %d }", *tc.retries) - } - - config := fmt.Sprintf(` -vault { - address = "%s" - %s - tls_skip_verify = true -} -%s -%s -`, serverClient.Address(), retryConf, cacheConfig, listenConfig) - configPath := makeTempFile(t, "config.hcl", config) - defer os.Remove(configPath) - - // Start the agent - _, cmd := testAgentCommand(t, logger) - cmd.startedCh = make(chan struct{}) - - wg := &sync.WaitGroup{} - wg.Add(1) - go func() { - cmd.Run([]string{"-config", configPath}) - wg.Done() - }() - - select { - case <-cmd.startedCh: - case <-time.After(5 * time.Second): - t.Errorf("timeout") - } - - client, err := api.NewClient(api.DefaultConfig()) - if err != nil { - t.Fatal(err) - } - client.SetToken(serverClient.Token()) - client.SetMaxRetries(0) - err = client.SetAddress("http://" + listenAddr) - if err != nil { - t.Fatal(err) - } - secret, err := client.Logical().Read("secret/foo") - switch { - case (err != nil || secret == nil) && tc.expectError: - case (err == nil || secret != nil) && !tc.expectError: - default: - t.Fatalf("%s expectError=%v error=%v secret=%v", tcname, tc.expectError, err, secret) - } - if secret != nil && secret.Data["foo"] != nil { - val := secret.Data["foo"].(map[string]interface{}) - if !reflect.DeepEqual(val, map[string]interface{}{"bar": "baz"}) { - t.Fatalf("expected key 'foo' to yield bar=baz, got: %v", val) - } - } - time.Sleep(time.Second) - - close(cmd.ShutdownCh) - wg.Wait() - }) - } -} - -func TestAgent_TemplateConfig_ExitOnRetryFailure(t *testing.T) { - //---------------------------------------------------- - // Start the server and agent - //---------------------------------------------------- - logger := logging.NewVaultLogger(hclog.Trace) - cluster := vault.NewTestCluster(t, - &vault.CoreConfig{ - CredentialBackends: map[string]logical.Factory{ - "approle": credAppRole.Factory, - }, - LogicalBackends: map[string]logical.Factory{ - "kv": logicalKv.Factory, - }, - }, - &vault.TestClusterOptions{ - NumCores: 1, - HandlerFunc: vaulthttp.Handler, - }) - cluster.Start() - defer cluster.Cleanup() - - vault.TestWaitActive(t, cluster.Cores[0].Core) - serverClient := cluster.Cores[0].Client - - // Unset the environment variable so that agent picks up the right test - // cluster address - defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) - os.Unsetenv(api.EnvVaultAddress) - - autoAuthConfig, cleanup := prepAgentApproleKV(t, serverClient) - defer cleanup() - - err := serverClient.Sys().TuneMount("secret", api.MountConfigInput{ - Options: map[string]string{ - "version": "2", - }, - }) - if err != nil { - t.Fatal(err) - } - - _, err = serverClient.Logical().Write("secret/data/otherapp", map[string]interface{}{ - "data": map[string]interface{}{ - "username": "barstuff", - "password": "zap", - "cert": "something", - }, - }) - if err != nil { - t.Fatal(err) - } - - // make a temp directory to hold renders. Each test will create a temp dir - // inside this one - tmpDirRoot, err := os.MkdirTemp("", "agent-test-renders") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDirRoot) - - // Note that missing key is different from a non-existent secret. A missing - // key (2xx response with missing keys in the response map) can still yield - // a successful render unless error_on_missing_key is specified, whereas a - // missing secret (4xx response) always results in an error. - missingKeyTemplateContent := `{{- with secret "secret/otherapp"}}{"secret": "other", -{{- if .Data.data.foo}}"foo":"{{ .Data.data.foo}}"{{- end }}} -{{- end }}` - missingKeyTemplateRender := `{"secret": "other",}` - - badTemplateContent := `{{- with secret "secret/non-existent"}}{"secret": "other", -{{- if .Data.data.foo}}"foo":"{{ .Data.data.foo}}"{{- end }}} -{{- end }}` - - testCases := map[string]struct { - exitOnRetryFailure *bool - templateContents string - expectTemplateRender string - templateErrorOnMissingKey bool - expectError bool - expectExitFromError bool - }{ - "true, no template error": { - exitOnRetryFailure: pointerutil.BoolPtr(true), - templateContents: templateContents(0), - expectTemplateRender: templateRendered(0), - templateErrorOnMissingKey: false, - expectError: false, - expectExitFromError: false, - }, - "true, with non-existent secret": { - exitOnRetryFailure: pointerutil.BoolPtr(true), - templateContents: badTemplateContent, - expectTemplateRender: "", - templateErrorOnMissingKey: false, - expectError: true, - expectExitFromError: true, - }, - "true, with missing key": { - exitOnRetryFailure: pointerutil.BoolPtr(true), - templateContents: missingKeyTemplateContent, - expectTemplateRender: missingKeyTemplateRender, - templateErrorOnMissingKey: false, - expectError: false, - expectExitFromError: false, - }, - "true, with missing key, with error_on_missing_key": { - exitOnRetryFailure: pointerutil.BoolPtr(true), - templateContents: missingKeyTemplateContent, - expectTemplateRender: "", - templateErrorOnMissingKey: true, - expectError: true, - expectExitFromError: true, - }, - "false, no template error": { - exitOnRetryFailure: pointerutil.BoolPtr(false), - templateContents: templateContents(0), - expectTemplateRender: templateRendered(0), - templateErrorOnMissingKey: false, - expectError: false, - expectExitFromError: false, - }, - "false, with non-existent secret": { - exitOnRetryFailure: pointerutil.BoolPtr(false), - templateContents: badTemplateContent, - expectTemplateRender: "", - templateErrorOnMissingKey: false, - expectError: true, - expectExitFromError: false, - }, - "false, with missing key": { - exitOnRetryFailure: pointerutil.BoolPtr(false), - templateContents: missingKeyTemplateContent, - expectTemplateRender: missingKeyTemplateRender, - templateErrorOnMissingKey: false, - expectError: false, - expectExitFromError: false, - }, - "false, with missing key, with error_on_missing_key": { - exitOnRetryFailure: pointerutil.BoolPtr(false), - templateContents: missingKeyTemplateContent, - expectTemplateRender: missingKeyTemplateRender, - templateErrorOnMissingKey: true, - expectError: true, - expectExitFromError: false, - }, - "missing": { - exitOnRetryFailure: nil, - templateContents: templateContents(0), - expectTemplateRender: templateRendered(0), - templateErrorOnMissingKey: false, - expectError: false, - expectExitFromError: false, - }, - } - - for tcName, tc := range testCases { - t.Run(tcName, func(t *testing.T) { - // create temp dir for this test run - tmpDir, err := os.MkdirTemp(tmpDirRoot, tcName) - if err != nil { - t.Fatal(err) - } - - listenAddr := generateListenerAddress(t) - listenConfig := fmt.Sprintf(` -listener "tcp" { - address = "%s" - tls_disable = true -} -`, listenAddr) - - var exitOnRetryFailure string - if tc.exitOnRetryFailure != nil { - exitOnRetryFailure = fmt.Sprintf("exit_on_retry_failure = %t", *tc.exitOnRetryFailure) - } - templateConfig := fmt.Sprintf(` -template_config = { - %s -} -`, exitOnRetryFailure) - - template := fmt.Sprintf(` -template { - contents = < + + {{#if this.isOpen}} +
+ +
+ +
+ +
+ {{/if}} + +{{/unless}} \ No newline at end of file diff --git a/command/agentproxyshared/cache/cacheboltdb/bolt.go b/command/agentproxyshared/cache/cacheboltdb/bolt.go index 6100ef896298..8d06e36dcd86 100644 --- a/command/agentproxyshared/cache/cacheboltdb/bolt.go +++ b/command/agentproxyshared/cache/cacheboltdb/bolt.go @@ -1,448 +1,204 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package cacheboltdb - -import ( - "context" - "encoding/binary" - "fmt" - "os" - "path/filepath" - "time" - - "github.com/golang/protobuf/proto" - "github.com/hashicorp/go-hclog" - wrapping "github.com/hashicorp/go-kms-wrapping/v2" - "github.com/hashicorp/go-multierror" - bolt "go.etcd.io/bbolt" -) - -const ( - // Keep track of schema version for future migrations - storageVersionKey = "version" - storageVersion = "2" // v2 merges auth-lease and secret-lease buckets into one ordered bucket - - // DatabaseFileName - filename for the persistent cache file - DatabaseFileName = "vault-agent-cache.db" - - // metaBucketName - naming the meta bucket that holds the version and - // bootstrapping keys - metaBucketName = "meta" - - // DEPRECATED: secretLeaseType - v1 Bucket/type for leases with secret info - secretLeaseType = "secret-lease" - - // DEPRECATED: authLeaseType - v1 Bucket/type for leases with auth info - authLeaseType = "auth-lease" - - // TokenType - Bucket/type for auto-auth tokens - TokenType = "token" - - // StaticSecretType - Bucket/type for static secrets - StaticSecretType = "static-secret" - - // TokenCapabilitiesType - Bucket/type for the token capabilities that - // are used to govern access to static secrets. These will be updated - // periodically to ensure that access to the cached secret remains. - TokenCapabilitiesType = "token-capabilities" - - // LeaseType - v2 Bucket/type for auth AND secret leases. - // - // This bucket stores keys in the same order they were created using - // auto-incrementing keys and the fact that BoltDB stores keys in byte - // slice order. This means when we iterate through this bucket during - // restore, we will always restore parent tokens before their children, - // allowing us to correctly attach child contexts to their parent's context. - LeaseType = "lease" - - // lookupType - v2 Bucket/type to map from a memcachedb index ID to an - // auto-incrementing BoltDB key. Facilitates deletes from the lease - // bucket using an ID instead of the auto-incrementing BoltDB key. - lookupType = "lookup" - - // AutoAuthToken - key for the latest auto-auth token - AutoAuthToken = "auto-auth-token" - - // RetrievalTokenMaterial is the actual key or token in the key bucket - RetrievalTokenMaterial = "retrieval-token-material" -) - -// BoltStorage is a persistent cache using a bolt db. Items are organized with -// the version and bootstrapping items in the "meta" bucket, and tokens, auth -// leases, and secret leases in their own buckets. -type BoltStorage struct { - db *bolt.DB - logger hclog.Logger - wrapper wrapping.Wrapper - aad string -} - -// BoltStorageConfig is the collection of input parameters for setting up bolt -// storage -type BoltStorageConfig struct { - Path string - Logger hclog.Logger - Wrapper wrapping.Wrapper - AAD string -} - -// NewBoltStorage opens a new bolt db at the specified file path and returns it. -// If the db already exists the buckets will just be created if they don't -// exist. -func NewBoltStorage(config *BoltStorageConfig) (*BoltStorage, error) { - dbPath := filepath.Join(config.Path, DatabaseFileName) - db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) - if err != nil { - return nil, err - } - err = db.Update(func(tx *bolt.Tx) error { - return createBoltSchema(tx, storageVersion) - }) - if err != nil { - return nil, err - } - bs := &BoltStorage{ - db: db, - logger: config.Logger, - wrapper: config.Wrapper, - aad: config.AAD, - } - return bs, nil -} - -func createBoltSchema(tx *bolt.Tx, createVersion string) error { - switch { - case createVersion == "1": - if err := createV1BoltSchema(tx); err != nil { - return err - } - case createVersion == "2": - if err := createV2BoltSchema(tx); err != nil { - return err - } - default: - return fmt.Errorf("schema version %s not supported", createVersion) - } - - meta, err := tx.CreateBucketIfNotExists([]byte(metaBucketName)) - if err != nil { - return fmt.Errorf("failed to create bucket %s: %w", metaBucketName, err) - } - - // Check and set file version in the meta bucket. - version := meta.Get([]byte(storageVersionKey)) - switch { - case version == nil: - err = meta.Put([]byte(storageVersionKey), []byte(createVersion)) - if err != nil { - return fmt.Errorf("failed to set storage version: %w", err) - } - - return nil - - case string(version) == createVersion: - return nil - - case string(version) == "1" && createVersion == "2": - return migrateFromV1ToV2Schema(tx) - - default: - return fmt.Errorf("storage migration from %s to %s not implemented", string(version), createVersion) - } -} - -func createV1BoltSchema(tx *bolt.Tx) error { - // Create the buckets for tokens and leases. - for _, bucket := range []string{TokenType, authLeaseType, secretLeaseType} { - if _, err := tx.CreateBucketIfNotExists([]byte(bucket)); err != nil { - return fmt.Errorf("failed to create %s bucket: %w", bucket, err) - } - } - - return nil -} - -func createV2BoltSchema(tx *bolt.Tx) error { - // Create the buckets for tokens and leases. - for _, bucket := range []string{TokenType, LeaseType, lookupType, StaticSecretType, TokenCapabilitiesType} { - if _, err := tx.CreateBucketIfNotExists([]byte(bucket)); err != nil { - return fmt.Errorf("failed to create %s bucket: %w", bucket, err) - } - } - - return nil -} - -func migrateFromV1ToV2Schema(tx *bolt.Tx) error { - if err := createV2BoltSchema(tx); err != nil { - return err - } - - for _, v1BucketType := range []string{authLeaseType, secretLeaseType} { - if bucket := tx.Bucket([]byte(v1BucketType)); bucket != nil { - bucket.ForEach(func(key, value []byte) error { - autoIncKey, err := autoIncrementedLeaseKey(tx, string(key)) - if err != nil { - return fmt.Errorf("error migrating %s %q key to auto incremented key: %w", v1BucketType, string(key), err) - } - if err := tx.Bucket([]byte(LeaseType)).Put(autoIncKey, value); err != nil { - return fmt.Errorf("error migrating %s %q from v1 to v2 schema: %w", v1BucketType, string(key), err) - } - return nil - }) - - if err := tx.DeleteBucket([]byte(v1BucketType)); err != nil { - return fmt.Errorf("failed to clean up %s bucket during v1 to v2 schema migration: %w", v1BucketType, err) - } - } - } - - meta, err := tx.CreateBucketIfNotExists([]byte(metaBucketName)) - if err != nil { - return fmt.Errorf("failed to create meta bucket: %w", err) - } - if err := meta.Put([]byte(storageVersionKey), []byte(storageVersion)); err != nil { - return fmt.Errorf("failed to update schema from v1 to v2: %w", err) - } - - return nil -} - -func autoIncrementedLeaseKey(tx *bolt.Tx, id string) ([]byte, error) { - leaseBucket := tx.Bucket([]byte(LeaseType)) - keyValue, err := leaseBucket.NextSequence() - if err != nil { - return nil, fmt.Errorf("failed to generate lookup key for id %q: %w", id, err) - } - - key := make([]byte, 8) - // MUST be big endian, because keys are ordered by byte slice comparison - // which progressively compares each byte in the slice starting at index 0. - // BigEndian in the range [255-257] looks like this: - // [0 0 0 0 0 0 0 255] - // [0 0 0 0 0 0 1 0] - // [0 0 0 0 0 0 1 1] - // LittleEndian in the same range looks like this: - // [255 0 0 0 0 0 0 0] - // [0 1 0 0 0 0 0 0] - // [1 1 0 0 0 0 0 0] - binary.BigEndian.PutUint64(key, keyValue) - - err = tx.Bucket([]byte(lookupType)).Put([]byte(id), key) - if err != nil { - return nil, err - } - - return key, nil -} - -// Set an index (token or lease) in bolt storage -func (b *BoltStorage) Set(ctx context.Context, id string, plaintext []byte, indexType string) error { - blob, err := b.wrapper.Encrypt(ctx, plaintext, wrapping.WithAad([]byte(b.aad))) - if err != nil { - return fmt.Errorf("error encrypting %s index: %w", indexType, err) - } - - protoBlob, err := proto.Marshal(blob) - if err != nil { - return err - } - - return b.db.Update(func(tx *bolt.Tx) error { - var key []byte - switch indexType { - case LeaseType: - // If this is a lease type, generate an auto-incrementing key and - // store an ID -> key lookup entry - key, err = autoIncrementedLeaseKey(tx, id) - if err != nil { - return err - } - case TokenType: - // If this is an auto-auth token, also stash it in the meta bucket for - // easy retrieval upon restore - key = []byte(id) - meta := tx.Bucket([]byte(metaBucketName)) - if err := meta.Put([]byte(AutoAuthToken), protoBlob); err != nil { - return fmt.Errorf("failed to set latest auto-auth token: %w", err) - } - case StaticSecretType: - key = []byte(id) - case TokenCapabilitiesType: - key = []byte(id) - default: - return fmt.Errorf("called Set for unsupported type %q", indexType) - } - s := tx.Bucket([]byte(indexType)) - if s == nil { - return fmt.Errorf("bucket %q not found", indexType) - } - return s.Put(key, protoBlob) - }) -} - -// Delete an index (token or lease) by key from bolt storage -func (b *BoltStorage) Delete(id string, indexType string) error { - return b.db.Update(func(tx *bolt.Tx) error { - key := []byte(id) - if indexType == LeaseType { - key = tx.Bucket([]byte(lookupType)).Get(key) - if key == nil { - return fmt.Errorf("failed to lookup bolt DB key for id %q", id) - } - - err := tx.Bucket([]byte(lookupType)).Delete([]byte(id)) - if err != nil { - return fmt.Errorf("failed to delete %q from lookup bucket: %w", id, err) - } - } - - bucket := tx.Bucket([]byte(indexType)) - if bucket == nil { - return fmt.Errorf("bucket %q not found during delete", indexType) - } - if err := bucket.Delete(key); err != nil { - return fmt.Errorf("failed to delete %q from %q bucket: %w", id, indexType, err) - } - b.logger.Trace("deleted index from bolt db", "id", id) - return nil - }) -} - -func (b *BoltStorage) decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) { - var blob wrapping.BlobInfo - if err := proto.Unmarshal(ciphertext, &blob); err != nil { - return nil, err - } - - return b.wrapper.Decrypt(ctx, &blob, wrapping.WithAad([]byte(b.aad))) -} - -// GetByType returns a list of stored items of the specified type -func (b *BoltStorage) GetByType(ctx context.Context, indexType string) ([][]byte, error) { - var returnBytes [][]byte - - err := b.db.View(func(tx *bolt.Tx) error { - var errors *multierror.Error - - bucket := tx.Bucket([]byte(indexType)) - if bucket == nil { - return fmt.Errorf("bucket %q not found", indexType) - } - bucket.ForEach(func(key, ciphertext []byte) error { - plaintext, err := b.decrypt(ctx, ciphertext) - if err != nil { - errors = multierror.Append(errors, fmt.Errorf("error decrypting entry %s: %w", key, err)) - return nil - } - - returnBytes = append(returnBytes, plaintext) - return nil - }) - return errors.ErrorOrNil() - }) - - return returnBytes, err -} - -// GetAutoAuthToken retrieves the latest auto-auth token, and returns nil if non -// exists yet -func (b *BoltStorage) GetAutoAuthToken(ctx context.Context) ([]byte, error) { - var encryptedToken []byte - - err := b.db.View(func(tx *bolt.Tx) error { - meta := tx.Bucket([]byte(metaBucketName)) - if meta == nil { - return fmt.Errorf("bucket %q not found", metaBucketName) - } - value := meta.Get([]byte(AutoAuthToken)) - if value != nil { - encryptedToken = make([]byte, len(value)) - copy(encryptedToken, value) - } - return nil - }) - if err != nil { - return nil, err - } - - if encryptedToken == nil { - return nil, nil - } - - plaintext, err := b.decrypt(ctx, encryptedToken) - if err != nil { - return nil, fmt.Errorf("failed to decrypt auto-auth token: %w", err) - } - return plaintext, nil -} - -// GetRetrievalToken retrieves a plaintext token from the KeyBucket, which will -// be used by the key manager to retrieve the encryption key, nil if none set -func (b *BoltStorage) GetRetrievalToken() ([]byte, error) { - var token []byte - - err := b.db.View(func(tx *bolt.Tx) error { - metaBucket := tx.Bucket([]byte(metaBucketName)) - if metaBucket == nil { - return fmt.Errorf("bucket %q not found", metaBucketName) - } - value := metaBucket.Get([]byte(RetrievalTokenMaterial)) - if value != nil { - token = make([]byte, len(value)) - copy(token, value) - } - return nil - }) - if err != nil { - return nil, err - } - - return token, err -} - -// StoreRetrievalToken sets plaintext token material in the RetrievalTokenBucket -func (b *BoltStorage) StoreRetrievalToken(token []byte) error { - return b.db.Update(func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte(metaBucketName)) - if bucket == nil { - return fmt.Errorf("bucket %q not found", metaBucketName) - } - return bucket.Put([]byte(RetrievalTokenMaterial), token) - }) -} - -// Close the boltdb -func (b *BoltStorage) Close() error { - b.logger.Trace("closing bolt db", "path", b.db.Path()) - return b.db.Close() -} - -// Clear the boltdb by deleting all the token and lease buckets and recreating -// the schema/layout -func (b *BoltStorage) Clear() error { - return b.db.Update(func(tx *bolt.Tx) error { - for _, name := range []string{TokenType, LeaseType, lookupType, StaticSecretType, TokenCapabilitiesType} { - b.logger.Trace("deleting bolt bucket", "name", name) - if err := tx.DeleteBucket([]byte(name)); err != nil { - return err - } - } - return createBoltSchema(tx, storageVersion) - }) -} - -// DBFileExists checks whether the vault agent cache file at `filePath` exists -func DBFileExists(path string) (bool, error) { - checkFile, err := os.OpenFile(filepath.Join(path, DatabaseFileName), os.O_RDWR, 0o600) - defer checkFile.Close() - switch { - case err == nil: - return true, nil - case os.IsNotExist(err): - return false, nil - default: - return false, fmt.Errorf("failed to check if bolt file exists at path %s: %w", path, err) - } -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
+ {{#if (and this.waitingForOktaNumberChallenge (not this.cancelAuthForOktaNumberChallenge))}} + + {{else}} + {{#if this.hasMethodsWithPath}} + + {{/if}} +
+ + {{#if this.selectedAuthBackend.path}} +
+

{{this.selectedAuthBackend.path}}

+ + {{this.selectedAuthBackend.mountDescription}} + +
+ {{/if}} + {{#if (or (not this.hasMethodsWithPath) (not this.selectedAuthIsPath))}} + +
+
+ {{else if (eq this.providerName "token")}} +
+ +
+ +
+
+ {{else}} +
+ +
+ +
+
+
+ +
+ +
+
+ {{/if}} + {{#if (not-eq this.selectedAuthBackend.type "token")}} + + {{/if}} + + {{#if (and this.delayAuthMessageReminder.isIdle this.showLoading)}} + + {{/if}} + + {{/if}} + + {{/if}} + \ No newline at end of file diff --git a/command/agentproxyshared/cache/cacheboltdb/bolt_test.go b/command/agentproxyshared/cache/cacheboltdb/bolt_test.go index dbfafdce7bb4..c58de16aba1b 100644 --- a/command/agentproxyshared/cache/cacheboltdb/bolt_test.go +++ b/command/agentproxyshared/cache/cacheboltdb/bolt_test.go @@ -1,400 +1,68 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package cacheboltdb - -import ( - "context" - "fmt" - "os" - "path" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - bolt "go.etcd.io/bbolt" -) - -func getTestKeyManager(t *testing.T) keymanager.KeyManager { - t.Helper() - - km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) - require.NoError(t, err) - - return km -} - -func TestBolt_SetGet(t *testing.T) { - ctx := context.Background() - - path, err := os.MkdirTemp("", "bolt-test") - require.NoError(t, err) - defer os.RemoveAll(path) - - b, err := NewBoltStorage(&BoltStorageConfig{ - Path: path, - Logger: hclog.Default(), - Wrapper: getTestKeyManager(t).Wrapper(), - }) - require.NoError(t, err) - - secrets, err := b.GetByType(ctx, LeaseType) - assert.NoError(t, err) - require.Len(t, secrets, 0) - - err = b.Set(ctx, "test1", []byte("hello"), LeaseType) - assert.NoError(t, err) - secrets, err = b.GetByType(ctx, LeaseType) - assert.NoError(t, err) - require.Len(t, secrets, 1) - assert.Equal(t, []byte("hello"), secrets[0]) -} - -func TestBoltDelete(t *testing.T) { - ctx := context.Background() - - path, err := os.MkdirTemp("", "bolt-test") - require.NoError(t, err) - defer os.RemoveAll(path) - - b, err := NewBoltStorage(&BoltStorageConfig{ - Path: path, - Logger: hclog.Default(), - Wrapper: getTestKeyManager(t).Wrapper(), - }) - require.NoError(t, err) - - err = b.Set(ctx, "secret-test1", []byte("hello1"), LeaseType) - require.NoError(t, err) - err = b.Set(ctx, "secret-test2", []byte("hello2"), LeaseType) - require.NoError(t, err) - - secrets, err := b.GetByType(ctx, LeaseType) - require.NoError(t, err) - assert.Len(t, secrets, 2) - assert.ElementsMatch(t, [][]byte{[]byte("hello1"), []byte("hello2")}, secrets) - - err = b.Delete("secret-test1", LeaseType) - require.NoError(t, err) - secrets, err = b.GetByType(ctx, LeaseType) - require.NoError(t, err) - require.Len(t, secrets, 1) - assert.Equal(t, []byte("hello2"), secrets[0]) -} - -func TestBoltClear(t *testing.T) { - ctx := context.Background() - - path, err := os.MkdirTemp("", "bolt-test") - require.NoError(t, err) - defer os.RemoveAll(path) - - b, err := NewBoltStorage(&BoltStorageConfig{ - Path: path, - Logger: hclog.Default(), - Wrapper: getTestKeyManager(t).Wrapper(), - }) - require.NoError(t, err) - - // Populate the bolt db - err = b.Set(ctx, "secret-test1", []byte("hello1"), LeaseType) - require.NoError(t, err) - secrets, err := b.GetByType(ctx, LeaseType) - require.NoError(t, err) - require.Len(t, secrets, 1) - assert.Equal(t, []byte("hello1"), secrets[0]) - - err = b.Set(ctx, "auth-test1", []byte("hello2"), LeaseType) - require.NoError(t, err) - auths, err := b.GetByType(ctx, LeaseType) - require.NoError(t, err) - require.Len(t, auths, 2) - assert.Equal(t, []byte("hello1"), auths[0]) - assert.Equal(t, []byte("hello2"), auths[1]) - - err = b.Set(ctx, "token-test1", []byte("hello"), TokenType) - require.NoError(t, err) - tokens, err := b.GetByType(ctx, TokenType) - require.NoError(t, err) - require.Len(t, tokens, 1) - assert.Equal(t, []byte("hello"), tokens[0]) - - err = b.Set(ctx, "static-secret", []byte("hello"), StaticSecretType) - require.NoError(t, err) - staticSecrets, err := b.GetByType(ctx, StaticSecretType) - require.NoError(t, err) - require.Len(t, staticSecrets, 1) - assert.Equal(t, []byte("hello"), staticSecrets[0]) - - err = b.Set(ctx, "capabilities-index", []byte("hello"), TokenCapabilitiesType) - require.NoError(t, err) - capabilities, err := b.GetByType(ctx, TokenCapabilitiesType) - require.NoError(t, err) - require.Len(t, capabilities, 1) - assert.Equal(t, []byte("hello"), capabilities[0]) - - // Clear the bolt db, and check that it's indeed clear - err = b.Clear() - require.NoError(t, err) - auths, err = b.GetByType(ctx, LeaseType) - require.NoError(t, err) - assert.Len(t, auths, 0) - tokens, err = b.GetByType(ctx, TokenType) - require.NoError(t, err) - assert.Len(t, tokens, 0) - staticSecrets, err = b.GetByType(ctx, StaticSecretType) - require.NoError(t, err) - require.Len(t, staticSecrets, 0) - capabilities, err = b.GetByType(ctx, TokenCapabilitiesType) - require.NoError(t, err) - require.Len(t, capabilities, 0) -} - -func TestBoltSetAutoAuthToken(t *testing.T) { - ctx := context.Background() - - path, err := os.MkdirTemp("", "bolt-test") - require.NoError(t, err) - defer os.RemoveAll(path) - - b, err := NewBoltStorage(&BoltStorageConfig{ - Path: path, - Logger: hclog.Default(), - Wrapper: getTestKeyManager(t).Wrapper(), - }) - require.NoError(t, err) - - token, err := b.GetAutoAuthToken(ctx) - assert.NoError(t, err) - assert.Nil(t, token) - - // set first token - err = b.Set(ctx, "token-test1", []byte("hello 1"), TokenType) - require.NoError(t, err) - secrets, err := b.GetByType(ctx, TokenType) - require.NoError(t, err) - require.Len(t, secrets, 1) - assert.Equal(t, []byte("hello 1"), secrets[0]) - token, err = b.GetAutoAuthToken(ctx) - assert.NoError(t, err) - assert.Equal(t, []byte("hello 1"), token) - - // set second token - err = b.Set(ctx, "token-test2", []byte("hello 2"), TokenType) - require.NoError(t, err) - secrets, err = b.GetByType(ctx, TokenType) - require.NoError(t, err) - require.Len(t, secrets, 2) - assert.ElementsMatch(t, [][]byte{[]byte("hello 1"), []byte("hello 2")}, secrets) - token, err = b.GetAutoAuthToken(ctx) - assert.NoError(t, err) - assert.Equal(t, []byte("hello 2"), token) -} - -func TestDBFileExists(t *testing.T) { - testCases := []struct { - name string - mkDir bool - createFile bool - expectExist bool - }{ - { - name: "all exists", - mkDir: true, - createFile: true, - expectExist: true, - }, - { - name: "dir exist, file missing", - mkDir: true, - createFile: false, - expectExist: false, - }, - { - name: "all missing", - mkDir: false, - createFile: false, - expectExist: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - var tmpPath string - var err error - if tc.mkDir { - tmpPath, err = os.MkdirTemp("", "test-db-path") - require.NoError(t, err) - } - if tc.createFile { - err = os.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0o600) - require.NoError(t, err) - } - exists, err := DBFileExists(tmpPath) - assert.NoError(t, err) - assert.Equal(t, tc.expectExist, exists) - }) - } -} - -func Test_SetGetRetrievalToken(t *testing.T) { - testCases := []struct { - name string - tokenToSet []byte - expectedToken []byte - }{ - { - name: "normal set and get", - tokenToSet: []byte("test token"), - expectedToken: []byte("test token"), - }, - { - name: "no token set", - tokenToSet: nil, - expectedToken: nil, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - path, err := os.MkdirTemp("", "bolt-test") - require.NoError(t, err) - defer os.RemoveAll(path) - - b, err := NewBoltStorage(&BoltStorageConfig{ - Path: path, - Logger: hclog.Default(), - Wrapper: getTestKeyManager(t).Wrapper(), - }) - require.NoError(t, err) - defer b.Close() - - if tc.tokenToSet != nil { - err := b.StoreRetrievalToken(tc.tokenToSet) - require.NoError(t, err) - } - gotKey, err := b.GetRetrievalToken() - assert.NoError(t, err) - assert.Equal(t, tc.expectedToken, gotKey) - }) - } -} - -func TestBolt_MigrateFromV1ToV2Schema(t *testing.T) { - ctx := context.Background() - - path, err := os.MkdirTemp("", "bolt-test") - require.NoError(t, err) - defer os.RemoveAll(path) - - dbPath := filepath.Join(path, DatabaseFileName) - db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) - require.NoError(t, err) - err = db.Update(func(tx *bolt.Tx) error { - return createBoltSchema(tx, "1") - }) - require.NoError(t, err) - b := &BoltStorage{ - db: db, - logger: hclog.Default(), - wrapper: getTestKeyManager(t).Wrapper(), - } - - // Manually insert some items into the v1 schema. - err = db.Update(func(tx *bolt.Tx) error { - blob, err := b.wrapper.Encrypt(ctx, []byte("ignored-contents")) - if err != nil { - return fmt.Errorf("error encrypting contents: %w", err) - } - protoBlob, err := proto.Marshal(blob) - if err != nil { - return err - } - - if err := tx.Bucket([]byte(authLeaseType)).Put([]byte("test-auth-id-1"), protoBlob); err != nil { - return err - } - if err := tx.Bucket([]byte(authLeaseType)).Put([]byte("test-auth-id-2"), protoBlob); err != nil { - return err - } - if err := tx.Bucket([]byte(secretLeaseType)).Put([]byte("test-secret-id-1"), protoBlob); err != nil { - return err - } - - return nil - }) - require.NoError(t, err) - - // Check we have the contents we would expect for the v1 schema. - leases, err := b.GetByType(ctx, authLeaseType) - require.NoError(t, err) - assert.Len(t, leases, 2) - leases, err = b.GetByType(ctx, secretLeaseType) - require.NoError(t, err) - assert.Len(t, leases, 1) - leases, err = b.GetByType(ctx, LeaseType) - require.Error(t, err) - assert.True(t, strings.Contains(err.Error(), "not found")) - - // Now migrate to the v2 schema. - err = db.Update(migrateFromV1ToV2Schema) - require.NoError(t, err) - - // Check all the leases have been migrated into one bucket. - leases, err = b.GetByType(ctx, authLeaseType) - require.Error(t, err) - assert.True(t, strings.Contains(err.Error(), "not found")) - leases, err = b.GetByType(ctx, secretLeaseType) - require.Error(t, err) - assert.True(t, strings.Contains(err.Error(), "not found")) - leases, err = b.GetByType(ctx, LeaseType) - require.NoError(t, err) - assert.Len(t, leases, 3) -} - -func TestBolt_MigrateFromInvalidToV2Schema(t *testing.T) { - ctx := context.Background() - - path, err := os.MkdirTemp("", "bolt-test") - require.NoError(t, err) - defer os.RemoveAll(path) - - dbPath := filepath.Join(path, DatabaseFileName) - db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) - require.NoError(t, err) - b := &BoltStorage{ - db: db, - logger: hclog.Default(), - wrapper: getTestKeyManager(t).Wrapper(), - } - - // All GetByType calls should fail as there's no schema - for _, bucket := range []string{authLeaseType, secretLeaseType, LeaseType} { - _, err = b.GetByType(ctx, bucket) - require.Error(t, err) - assert.True(t, strings.Contains(err.Error(), "not found")) - } - - // Now migrate to the v2 schema. - err = db.Update(migrateFromV1ToV2Schema) - require.NoError(t, err) - - // Deprecated auth and secret lease buckets still shouldn't exist - // All GetByType calls should fail as there's no schema - for _, bucket := range []string{authLeaseType, secretLeaseType} { - _, err = b.GetByType(ctx, bucket) - require.Error(t, err) - assert.True(t, strings.Contains(err.Error(), "not found")) - } - - // GetByType for LeaseType should now return an empty result - leases, err := b.GetByType(ctx, LeaseType) - require.NoError(t, err) - require.Len(t, leases, 0) -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
+
+ +
+ +
+ +
+ {{#unless this.isOIDC}} +
+ +
+ +
+
+ {{/unless}} +
+ {{yield}} +
+ + {{#if this.isOIDC}} + + {{else}} + + {{/if}} + \ No newline at end of file diff --git a/command/agentproxyshared/cache/lease_cache.go b/command/agentproxyshared/cache/lease_cache.go index 1b6dcc1e1147..ec455509f736 100644 --- a/command/agentproxyshared/cache/lease_cache.go +++ b/command/agentproxyshared/cache/lease_cache.go @@ -1,1753 +1,1300 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package cache - -import ( - "bufio" - "bytes" - "context" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-secure-stdlib/base62" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cacheboltdb" - "github.com/hashicorp/vault/command/agentproxyshared/cache/cachememdb" - "github.com/hashicorp/vault/helper/namespace" - nshelper "github.com/hashicorp/vault/helper/namespace" - vaulthttp "github.com/hashicorp/vault/http" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/cryptoutil" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/helper/locksutil" - "github.com/hashicorp/vault/sdk/logical" - gocache "github.com/patrickmn/go-cache" - "go.uber.org/atomic" -) - -const ( - vaultPathTokenCreate = "/v1/auth/token/create" - vaultPathTokenRevoke = "/v1/auth/token/revoke" - vaultPathTokenRevokeSelf = "/v1/auth/token/revoke-self" - vaultPathTokenRevokeAccessor = "/v1/auth/token/revoke-accessor" - vaultPathTokenRevokeOrphan = "/v1/auth/token/revoke-orphan" - vaultPathTokenLookup = "/v1/auth/token/lookup" - vaultPathTokenLookupSelf = "/v1/auth/token/lookup-self" - vaultPathTokenRenew = "/v1/auth/token/renew" - vaultPathTokenRenewSelf = "/v1/auth/token/renew-self" - vaultPathLeaseRevoke = "/v1/sys/leases/revoke" - vaultPathLeaseRevokeForce = "/v1/sys/leases/revoke-force" - vaultPathLeaseRevokePrefix = "/v1/sys/leases/revoke-prefix" -) - -var ( - contextIndexID = contextIndex{} - errInvalidType = errors.New("invalid type provided") - revocationPaths = []string{ - strings.TrimPrefix(vaultPathTokenRevoke, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeSelf, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeAccessor, "/v1"), - strings.TrimPrefix(vaultPathTokenRevokeOrphan, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevoke, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevokeForce, "/v1"), - strings.TrimPrefix(vaultPathLeaseRevokePrefix, "/v1"), - } -) - -type contextIndex struct{} - -type cacheClearRequest struct { - Type string `json:"type"` - Value string `json:"value"` - Namespace string `json:"namespace"` -} - -// LeaseCache is an implementation of Proxier that handles -// the caching of responses. It passes the incoming request -// to an underlying Proxier implementation. -type LeaseCache struct { - client *api.Client - proxier Proxier - logger hclog.Logger - db *cachememdb.CacheMemDB - baseCtxInfo *cachememdb.ContextInfo - l *sync.RWMutex - - // userAgentToUse is the user agent to use when making independent requests - // to Vault. - userAgentToUse string - - // idLocks is used during cache lookup to ensure that identical requests made - // in parallel won't trigger multiple renewal goroutines. - idLocks []*locksutil.LockEntry - - // inflightCache keeps track of inflight requests - inflightCache *gocache.Cache - - // ps is the persistent storage for tokens and leases - ps *cacheboltdb.BoltStorage - - // shuttingDown is used to determine if cache needs to be evicted or not - // when the context is cancelled - shuttingDown atomic.Bool - - // cacheStaticSecrets is used to determine if the cache should also - // cache static secrets, as well as dynamic secrets. - cacheStaticSecrets bool - - // cacheDynamicSecrets is used to determine if the cache should - // cache dynamic secrets - cacheDynamicSecrets bool - - // capabilityManager is used when static secrets are enabled to - // manage the capabilities of cached tokens. - capabilityManager *StaticSecretCapabilityManager -} - -// LeaseCacheConfig is the configuration for initializing a new -// LeaseCache. -type LeaseCacheConfig struct { - Client *api.Client - BaseContext context.Context - Proxier Proxier - Logger hclog.Logger - UserAgentToUse string - Storage *cacheboltdb.BoltStorage - CacheStaticSecrets bool - CacheDynamicSecrets bool -} - -type inflightRequest struct { - // ch is closed by the request that ends up processing the set of - // parallel request - ch chan struct{} - - // remaining is the number of remaining inflight request that needs to - // be processed before this object can be cleaned up - remaining *atomic.Uint64 -} - -func newInflightRequest() *inflightRequest { - return &inflightRequest{ - ch: make(chan struct{}), - remaining: atomic.NewUint64(0), - } -} - -// NewLeaseCache creates a new instance of a LeaseCache. -func NewLeaseCache(conf *LeaseCacheConfig) (*LeaseCache, error) { - if conf == nil { - return nil, errors.New("nil configuration provided") - } - - if conf.Proxier == nil || conf.Logger == nil { - return nil, fmt.Errorf("missing configuration required params: %v", conf) - } - - if conf.Client == nil { - return nil, fmt.Errorf("nil API client") - } - - if conf.UserAgentToUse == "" { - return nil, fmt.Errorf("no user agent specified -- see useragent.go") - } - - db, err := cachememdb.New() - if err != nil { - return nil, err - } - - // Create a base context for the lease cache layer - baseCtxInfo := cachememdb.NewContextInfo(conf.BaseContext) - - return &LeaseCache{ - client: conf.Client, - proxier: conf.Proxier, - logger: conf.Logger, - userAgentToUse: conf.UserAgentToUse, - db: db, - baseCtxInfo: baseCtxInfo, - l: &sync.RWMutex{}, - idLocks: locksutil.CreateLocks(), - inflightCache: gocache.New(gocache.NoExpiration, gocache.NoExpiration), - ps: conf.Storage, - cacheStaticSecrets: conf.CacheStaticSecrets, - cacheDynamicSecrets: conf.CacheDynamicSecrets, - }, nil -} - -// SetCapabilityManager is a setter for CapabilityManager. If set, will manage capabilities -// for capability indexes. -func (c *LeaseCache) SetCapabilityManager(capabilityManager *StaticSecretCapabilityManager) { - c.capabilityManager = capabilityManager -} - -// SetShuttingDown is a setter for the shuttingDown field -func (c *LeaseCache) SetShuttingDown(in bool) { - c.shuttingDown.Store(in) - - // Since we're shutting down, also stop the capability manager's jobs. - // We can do this forcibly since no there's no reason to update - // the cache when we're shutting down. - if c.capabilityManager != nil { - c.capabilityManager.Stop() - } -} - -// SetPersistentStorage is a setter for the persistent storage field in -// LeaseCache -func (c *LeaseCache) SetPersistentStorage(storageIn *cacheboltdb.BoltStorage) { - c.ps = storageIn -} - -// PersistentStorage is a getter for the persistent storage field in -// LeaseCache -func (c *LeaseCache) PersistentStorage() *cacheboltdb.BoltStorage { - return c.ps -} - -// checkCacheForDynamicSecretRequest checks the cache for a particular request based on its -// computed ID. It returns a non-nil *SendResponse if an entry is found. -func (c *LeaseCache) checkCacheForDynamicSecretRequest(id string) (*SendResponse, error) { - return c.checkCacheForRequest(id, nil) -} - -// checkCacheForStaticSecretRequest checks the cache for a particular request based on its -// computed ID. It returns a non-nil *SendResponse if an entry is found. -// If a request is provided, it will validate that the token is allowed to retrieve this -// cache entry, and return nil if it isn't. It will also evict the cache if this is a non-GET -// request. -func (c *LeaseCache) checkCacheForStaticSecretRequest(id string, req *SendRequest) (*SendResponse, error) { - return c.checkCacheForRequest(id, req) -} - -// checkCacheForRequest checks the cache for a particular request based on its -// computed ID. It returns a non-nil *SendResponse if an entry is found. -// If a token is provided, it will validate that the token is allowed to retrieve this -// cache entry, and return nil if it isn't. -func (c *LeaseCache) checkCacheForRequest(id string, req *SendRequest) (*SendResponse, error) { - index, err := c.db.Get(cachememdb.IndexNameID, id) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - return nil, nil - } - if err != nil { - return nil, err - } - - index.IndexLock.RLock() - defer index.IndexLock.RUnlock() - - var token string - if req != nil { - // Req will be non-nil if we're checking for a static secret. - // Token might still be "" if it's going to an unauthenticated - // endpoint, or similar. For static secrets, we only care about - // requests with tokens attached, as KV is authenticated. - token = req.Token - } - - if token != "" { - // We are checking for a static secret. We need to ensure that this token - // has previously demonstrated access to this static secret. - // We could check the capabilities cache here, but since these - // indexes should be in sync, this saves us an extra cache get. - if _, ok := index.Tokens[token]; !ok { - // We don't have access to this static secret, so - // we do not return the cached response. - return nil, nil - } - } - - // Cached request is found, deserialize the response - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - c.logger.Error("failed to deserialize response", "error", err) - return nil, err - } - - sendResp, err := NewSendResponse(&api.Response{Response: resp}, index.Response) - if err != nil { - c.logger.Error("failed to create new send response", "error", err) - return nil, err - } - sendResp.CacheMeta.Hit = true - - respTime, err := http.ParseTime(resp.Header.Get("Date")) - if err != nil { - c.logger.Error("failed to parse cached response date", "error", err) - return nil, err - } - sendResp.CacheMeta.Age = time.Now().Sub(respTime) - - return sendResp, nil -} - -// Send performs a cache lookup on the incoming request. If it's a cache hit, -// it will return the cached response, otherwise it will delegate to the -// underlying Proxier and cache the received response. -func (c *LeaseCache) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) { - // Compute the index ID for both static and dynamic secrets. - // The primary difference is that for dynamic secrets, the - // Vault token forms part of the index. - dynamicSecretCacheId, err := computeIndexID(req) - if err != nil { - c.logger.Error("failed to compute cache key", "error", err) - return nil, err - } - staticSecretCacheId := computeStaticSecretCacheIndex(req) - - // Check the inflight cache to see if there are other inflight requests - // of the same kind, based on the computed ID. If so, we increment a counter - - // Note: we lock both the dynamic secret cache ID and the static secret cache ID - // as at this stage, we don't know what kind of secret it is. - var inflight *inflightRequest - - defer func() { - // Cleanup on the cache if there are no remaining inflight requests. - // This is the last step, so we defer the call first - if inflight != nil && inflight.remaining.Load() == 0 { - c.inflightCache.Delete(dynamicSecretCacheId) - if staticSecretCacheId != "" { - c.inflightCache.Delete(staticSecretCacheId) - } - } - }() - - idLockDynamicSecret := locksutil.LockForKey(c.idLocks, dynamicSecretCacheId) - - // Briefly grab an ID-based lock in here to emulate a load-or-store behavior - // and prevent concurrent cacheable requests from being proxied twice if - // they both miss the cache due to it being clean when peeking the cache - // entry. - idLockDynamicSecret.Lock() - inflightRaw, found := c.inflightCache.Get(dynamicSecretCacheId) - if found { - idLockDynamicSecret.Unlock() - inflight = inflightRaw.(*inflightRequest) - inflight.remaining.Inc() - defer inflight.remaining.Dec() - - // If found it means that there's an inflight request being processed. - // We wait until that's finished before proceeding further. - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-inflight.ch: - } - } else { - if inflight == nil { - inflight = newInflightRequest() - inflight.remaining.Inc() - defer inflight.remaining.Dec() - defer close(inflight.ch) - } - - c.inflightCache.Set(dynamicSecretCacheId, inflight, gocache.NoExpiration) - idLockDynamicSecret.Unlock() - } - - if staticSecretCacheId != "" { - idLockStaticSecret := locksutil.LockForKey(c.idLocks, staticSecretCacheId) - - // Briefly grab an ID-based lock in here to emulate a load-or-store behavior - // and prevent concurrent cacheable requests from being proxied twice if - // they both miss the cache due to it being clean when peeking the cache - // entry. - idLockStaticSecret.Lock() - inflightRaw, found = c.inflightCache.Get(staticSecretCacheId) - if found { - idLockStaticSecret.Unlock() - inflight = inflightRaw.(*inflightRequest) - inflight.remaining.Inc() - defer inflight.remaining.Dec() - - // If found it means that there's an inflight request being processed. - // We wait until that's finished before proceeding further. - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-inflight.ch: - } - } else { - if inflight == nil { - inflight = newInflightRequest() - inflight.remaining.Inc() - defer inflight.remaining.Dec() - defer close(inflight.ch) - } - - c.inflightCache.Set(staticSecretCacheId, inflight, gocache.NoExpiration) - idLockStaticSecret.Unlock() - } - } - - // Check if the response for this request is already in the dynamic secret cache - cachedResp, err := c.checkCacheForDynamicSecretRequest(dynamicSecretCacheId) - if err != nil { - return nil, err - } - if cachedResp != nil { - c.logger.Debug("returning cached response", "path", req.Request.URL.Path) - return cachedResp, nil - } - - // Check if the response for this request is already in the static secret cache - if staticSecretCacheId != "" && req.Request.Method == http.MethodGet { - cachedResp, err = c.checkCacheForStaticSecretRequest(staticSecretCacheId, req) - if err != nil { - return nil, err - } - if cachedResp != nil { - c.logger.Debug("returning cached response", "id", staticSecretCacheId, "path", req.Request.URL.Path) - return cachedResp, nil - } - } - - c.logger.Debug("forwarding request from cache", "method", req.Request.Method, "path", req.Request.URL.Path) - - // Pass the request down and get a response - resp, err := c.proxier.Send(ctx, req) - if err != nil { - return resp, err - } - - // If this is a non-2xx or if the returned response does not contain JSON payload, - // we skip caching - if resp.Response.StatusCode >= 300 || resp.Response.Header.Get("Content-Type") != "application/json" { - return resp, err - } - - // Get the namespace from the request header - namespace := req.Request.Header.Get(consts.NamespaceHeaderName) - // We need to populate an empty value since go-memdb will skip over indexes - // that contain empty values. - if namespace == "" { - namespace = "root/" - } - - // Build the index to cache based on the response received - index := &cachememdb.Index{ - Namespace: namespace, - RequestPath: req.Request.URL.Path, - LastRenewed: time.Now().UTC(), - } - - secret, err := api.ParseSecret(bytes.NewReader(resp.ResponseBody)) - if err != nil { - c.logger.Error("failed to parse response as secret", "error", err) - return nil, err - } - - isRevocation, err := c.handleRevocationRequest(ctx, req, resp) - if err != nil { - c.logger.Error("failed to process the response", "error", err) - return nil, err - } - - // If this is a revocation request, do not go through cache logic. - if isRevocation { - return resp, nil - } - - // Fast path for responses with no secrets - if secret == nil { - c.logger.Debug("pass-through response; no secret in response", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - // There shouldn't be a situation where secret.MountType == "kv" and - // staticSecretCacheId == "", but just in case. - // We restrict this to GETs as those are all we want to cache. - if c.cacheStaticSecrets && secret.MountType == "kv" && - staticSecretCacheId != "" && req.Request.Method == http.MethodGet { - index.Type = cacheboltdb.StaticSecretType - index.ID = staticSecretCacheId - // We set the request path to be the canonical static secret path, so that - // two differently shaped (but equivalent) requests to the same path - // will be the same. - // This differs slightly from dynamic secrets, where the /v1/ will be - // included in the request path. - index.RequestPath = getStaticSecretPathFromRequest(req) - - err := c.cacheStaticSecret(ctx, req, resp, index) - if err != nil { - return nil, err - } - return resp, nil - } else { - // Since it's not a static secret, set the ID to be the dynamic id - index.ID = dynamicSecretCacheId - } - - // Short-circuit if we've been configured to not cache dynamic secrets - if !c.cacheDynamicSecrets { - return resp, nil - } - - // Short-circuit if the secret is not renewable - tokenRenewable, err := secret.TokenIsRenewable() - if err != nil { - c.logger.Error("failed to parse renewable param", "error", err) - return nil, err - } - if !secret.Renewable && !tokenRenewable { - c.logger.Debug("pass-through response; secret not renewable", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - var renewCtxInfo *cachememdb.ContextInfo - switch { - case secret.LeaseID != "": - c.logger.Debug("processing lease response", "method", req.Request.Method, "path", req.Request.URL.Path) - entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - // If the lease belongs to a token that is not managed by the lease cache, - // return the response without caching it. - c.logger.Debug("pass-through lease response; token not managed by lease cache", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - if err != nil { - return nil, err - } - - // Derive a context for renewal using the token's context - renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) - - index.Lease = secret.LeaseID - index.LeaseToken = req.Token - - index.Type = cacheboltdb.LeaseType - - case secret.Auth != nil: - c.logger.Debug("processing auth response", "method", req.Request.Method, "path", req.Request.URL.Path) - - // Check if this token creation request resulted in a non-orphan token, and if so - // correctly set the parentCtx to the request's token context. - var parentCtx context.Context - if !secret.Auth.Orphan { - entry, err := c.db.Get(cachememdb.IndexNameToken, req.Token) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - // If the lease belongs to a token that is not managed by the lease cache, - // return the response without caching it. - c.logger.Debug("pass-through lease response; parent token not managed by lease cache", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - if err != nil { - return nil, err - } - - c.logger.Debug("setting parent context", "method", req.Request.Method, "path", req.Request.URL.Path) - parentCtx = entry.RenewCtxInfo.Ctx - - index.TokenParent = req.Token - } - - renewCtxInfo = c.createCtxInfo(parentCtx) - index.Token = secret.Auth.ClientToken - index.TokenAccessor = secret.Auth.Accessor - - index.Type = cacheboltdb.LeaseType - - default: - // We shouldn't be hitting this, but will err on the side of caution and - // simply proxy. - c.logger.Debug("pass-through response; secret without lease and token", "method", req.Request.Method, "path", req.Request.URL.Path) - return resp, nil - } - - // Serialize the response to store it in the cached index - var respBytes bytes.Buffer - err = resp.Response.Write(&respBytes) - if err != nil { - c.logger.Error("failed to serialize response", "error", err) - return nil, err - } - - // Reset the response body for upper layers to read - if resp.Response.Body != nil { - resp.Response.Body.Close() - } - resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) - - // Set the index's Response - index.Response = respBytes.Bytes() - - // Store the index ID in the lifetimewatcher context - renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) - - // Store the lifetime watcher context in the index - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: renewCtx, - CancelFunc: renewCtxInfo.CancelFunc, - DoneCh: renewCtxInfo.DoneCh, - } - - // Add extra information necessary for restoring from persisted cache - index.RequestMethod = req.Request.Method - index.RequestToken = req.Token - index.RequestHeader = req.Request.Header - - if index.Type != cacheboltdb.StaticSecretType { - // Store the index in the cache - c.logger.Debug("storing dynamic secret response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path, "id", index.ID) - err = c.Set(ctx, index) - if err != nil { - c.logger.Error("failed to cache the proxied response", "error", err) - return nil, err - } - - // Start renewing the secret in the response - go c.startRenewing(renewCtx, index, req, secret) - } - - return resp, nil -} - -func (c *LeaseCache) cacheStaticSecret(ctx context.Context, req *SendRequest, resp *SendResponse, index *cachememdb.Index) error { - // If a cached version of this secret exists, we now have access, so - // we don't need to re-cache, just update index.Tokens - indexFromCache, err := c.db.Get(cachememdb.IndexNameID, index.ID) - if err != nil && err != cachememdb.ErrCacheItemNotFound { - return err - } - - // The index already exists, so all we need to do is add our token - // to the index's allowed token list, then re-store it. - if indexFromCache != nil { - // We must hold a lock for the index while it's being updated. - // We keep the two locking mechanisms distinct, so that it's only writes - // that have to be serial. - indexFromCache.IndexLock.Lock() - defer indexFromCache.IndexLock.Unlock() - indexFromCache.Tokens[req.Token] = struct{}{} - - return c.storeStaticSecretIndex(ctx, req, indexFromCache) - } - - // Serialize the response to store it in the cached index - var respBytes bytes.Buffer - err = resp.Response.Write(&respBytes) - if err != nil { - c.logger.Error("failed to serialize response", "error", err) - return err - } - - // Reset the response body for upper layers to read - if resp.Response.Body != nil { - resp.Response.Body.Close() - } - resp.Response.Body = io.NopCloser(bytes.NewReader(resp.ResponseBody)) - - // Set the index's Response - index.Response = respBytes.Bytes() - - // Initialize the token map and add this token to it. - index.Tokens = map[string]struct{}{req.Token: {}} - - // Set the index type - index.Type = cacheboltdb.StaticSecretType - - return c.storeStaticSecretIndex(ctx, req, index) -} - -func (c *LeaseCache) storeStaticSecretIndex(ctx context.Context, req *SendRequest, index *cachememdb.Index) error { - // Store the index in the cache - c.logger.Debug("storing static secret response into the cache", "method", req.Request.Method, "path", req.Request.URL.Path, "id", index.ID) - err := c.Set(ctx, index) - if err != nil { - c.logger.Error("failed to cache the proxied response", "error", err) - return err - } - - capabilitiesIndex, created, err := c.retrieveOrCreateTokenCapabilitiesEntry(req.Token) - if err != nil { - c.logger.Error("failed to cache the proxied response", "error", err) - return err - } - - path := getStaticSecretPathFromRequest(req) - - // Extra caution -- avoid potential nil - if capabilitiesIndex.ReadablePaths == nil { - capabilitiesIndex.ReadablePaths = make(map[string]struct{}) - } - - // update the index with the new capability: - capabilitiesIndex.ReadablePaths[path] = struct{}{} - - err = c.SetCapabilitiesIndex(ctx, capabilitiesIndex) - if err != nil { - c.logger.Error("failed to cache token capabilities as part of caching the proxied response", "error", err) - return err - } - - // Lastly, ensure that we start renewing this index, if it's new. - // We require the 'created' check so that we don't renew the same - // index multiple times. - if c.capabilityManager != nil && created { - c.capabilityManager.StartRenewingCapabilities(capabilitiesIndex) - } - - return nil -} - -// retrieveOrCreateTokenCapabilitiesEntry will either retrieve the token -// capabilities entry from the cache, or create a new, empty one. -// The bool represents if a new token capability has been created. -func (c *LeaseCache) retrieveOrCreateTokenCapabilitiesEntry(token string) (*cachememdb.CapabilitiesIndex, bool, error) { - // The index ID is a hash of the token. - indexId := hashStaticSecretIndex(token) - indexFromCache, err := c.db.GetCapabilitiesIndex(cachememdb.IndexNameID, indexId) - if err != nil && err != cachememdb.ErrCacheItemNotFound { - return nil, false, err - } - - if indexFromCache != nil { - return indexFromCache, false, nil - } - - // Build the index to cache based on the response received - index := &cachememdb.CapabilitiesIndex{ - ID: indexId, - Token: token, - ReadablePaths: make(map[string]struct{}), - } - - return index, true, nil -} - -func (c *LeaseCache) createCtxInfo(ctx context.Context) *cachememdb.ContextInfo { - if ctx == nil { - c.l.RLock() - ctx = c.baseCtxInfo.Ctx - c.l.RUnlock() - } - return cachememdb.NewContextInfo(ctx) -} - -func (c *LeaseCache) startRenewing(ctx context.Context, index *cachememdb.Index, req *SendRequest, secret *api.Secret) { - defer func() { - id := ctx.Value(contextIndexID).(string) - if c.shuttingDown.Load() { - c.logger.Trace("not evicting index from cache during shutdown", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) - return - } - c.logger.Debug("evicting index from cache", "id", id, "method", req.Request.Method, "path", req.Request.URL.Path) - err := c.Evict(index) - if err != nil { - c.logger.Error("failed to evict index", "id", id, "error", err) - return - } - }() - - client, err := c.client.Clone() - if err != nil { - c.logger.Error("failed to create API client in the lifetime watcher", "error", err) - return - } - client.SetToken(req.Token) - - headers := client.Headers() - if headers == nil { - headers = make(http.Header) - } - - // We do not preserve any initial User-Agent here since these requests are from - // the proxy subsystem, but are made by the lease cache's lifetime watcher, - // not triggered by a specific request. - headers.Set("User-Agent", c.userAgentToUse) - client.SetHeaders(headers) - - watcher, err := client.NewLifetimeWatcher(&api.LifetimeWatcherInput{ - Secret: secret, - }) - if err != nil { - c.logger.Error("failed to create secret lifetime watcher", "error", err) - return - } - - c.logger.Debug("initiating renewal", "method", req.Request.Method, "path", req.Request.URL.Path) - go watcher.Start() - defer watcher.Stop() - - for { - select { - case <-ctx.Done(): - // This is the case which captures context cancellations from token - // and leases. Since all the contexts are derived from the agent's - // context, this will also cover the shutdown scenario. - c.logger.Debug("context cancelled; stopping lifetime watcher", "path", req.Request.URL.Path) - return - case err := <-watcher.DoneCh(): - // This case covers renewal completion and renewal errors - if err != nil { - c.logger.Error("failed to renew secret", "error", err) - return - } - c.logger.Debug("renewal halted; evicting from cache", "path", req.Request.URL.Path) - return - case <-watcher.RenewCh(): - c.logger.Debug("secret renewed", "path", req.Request.URL.Path) - if c.ps != nil { - if err := c.updateLastRenewed(ctx, index, time.Now().UTC()); err != nil { - c.logger.Warn("not able to update lastRenewed time for cached index", "id", index.ID) - } - } - case <-index.RenewCtxInfo.DoneCh: - // This case indicates the renewal process to shutdown and evict - // the cache entry. This is triggered when a specific secret - // renewal needs to be killed without affecting any of the derived - // context renewals. - c.logger.Debug("done channel closed") - return - } - } -} - -func (c *LeaseCache) updateLastRenewed(ctx context.Context, index *cachememdb.Index, t time.Time) error { - idLock := locksutil.LockForKey(c.idLocks, index.ID) - idLock.Lock() - defer idLock.Unlock() - - getIndex, err := c.db.Get(cachememdb.IndexNameID, index.ID) - if err != nil && err != cachememdb.ErrCacheItemNotFound { - return err - } - index.LastRenewed = t - if err := c.Set(ctx, getIndex); err != nil { - return err - } - return nil -} - -// computeIndexID results in a value that uniquely identifies a request -// received by the agent. It does so by SHA256 hashing the serialized request -// object containing the request path, query parameters and body parameters. -func computeIndexID(req *SendRequest) (string, error) { - var b bytes.Buffer - - cloned := req.Request.Clone(context.Background()) - cloned.Header.Del(vaulthttp.VaultIndexHeaderName) - cloned.Header.Del(vaulthttp.VaultForwardHeaderName) - cloned.Header.Del(vaulthttp.VaultInconsistentHeaderName) - // Serialize the request - if err := cloned.Write(&b); err != nil { - return "", fmt.Errorf("failed to serialize request: %v", err) - } - - // Reset the request body after it has been closed by Write - req.Request.Body = io.NopCloser(bytes.NewReader(req.RequestBody)) - - // Append req.Token into the byte slice. This is needed since auto-auth'ed - // requests sets the token directly into SendRequest.Token - if _, err := b.WriteString(req.Token); err != nil { - return "", fmt.Errorf("failed to write token to hash input: %w", err) - } - - return hex.EncodeToString(cryptoutil.Blake2b256Hash(string(b.Bytes()))), nil -} - -// canonicalizeStaticSecretPath takes an API request path such as -// /v1/foo/bar and a namespace, and turns it into a canonical representation -// of the secret's path in Vault. -// We opt for this form as namespace.Canonicalize returns a namespace in the -// form of "ns1/", so we keep consistent with path canonicalization. -func canonicalizeStaticSecretPath(requestPath string, ns string) string { - // /sys/capabilities accepts both requests that look like foo/bar - // and /foo/bar but not /v1/foo/bar. - // We trim the /v1/ from the start of the URL to get the foo/bar form. - // This means that we can use the paths we retrieve from the - // /sys/capabilities endpoint to access this index - // without having to re-add the /v1/ - path := strings.TrimPrefix(requestPath, "/v1/") - // Trim any leading slashes, as we never want those. - // This ensures /foo/bar gets turned to foo/bar - path = strings.TrimPrefix(path, "/") - - // If a namespace was provided in a way that wasn't directly in the path, - // it must be added to the path. - path = namespace.Canonicalize(ns) + path - - return path -} - -// getStaticSecretPathFromRequest gets the canonical path for a -// request, taking into account intricacies relating to /v1/ and namespaces -// in the header. -// Returns a path like foo/bar or ns1/foo/bar. -// We opt for this form as namespace.Canonicalize returns a namespace in the -// form of "ns1/", so we keep consistent with path canonicalization. -func getStaticSecretPathFromRequest(req *SendRequest) string { - path := req.Request.URL.Path - // Static secrets always have /v1 as a prefix. This enables us to - // enable a pass-through and never attempt to cache or view-from-cache - // any request without the /v1 prefix. - if !strings.HasPrefix(path, "/v1") { - return "" - } - var namespace string - if header := req.Request.Header; header != nil { - namespace = header.Get(api.NamespaceHeaderName) - } - return canonicalizeStaticSecretPath(path, namespace) -} - -// hashStaticSecretIndex is a simple function that hashes the path into -// a function. This is kept as a helper function for ease of use by downstream functions. -func hashStaticSecretIndex(unhashedIndex string) string { - return hex.EncodeToString(cryptoutil.Blake2b256Hash(unhashedIndex)) -} - -// computeStaticSecretCacheIndex results in a value that uniquely identifies a static -// secret's cached ID. Notably, we intentionally ignore headers (for example, -// the X-Vault-Token header) to remain agnostic to which token is being -// used in the request. We care only about the path. -// This will return "" if the index does not have a /v1 prefix, and therefore -// cannot be a static secret. -func computeStaticSecretCacheIndex(req *SendRequest) string { - path := getStaticSecretPathFromRequest(req) - if path == "" { - return path - } - return hashStaticSecretIndex(path) -} - -// HandleCacheClear returns a handlerFunc that can perform cache clearing operations. -func (c *LeaseCache) HandleCacheClear(ctx context.Context) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // If the cache is not enabled, return a 200 - if c == nil { - return - } - - // Only handle POST/PUT requests - switch r.Method { - case http.MethodPost: - case http.MethodPut: - default: - return - } - - req := new(cacheClearRequest) - if err := jsonutil.DecodeJSONFromReader(r.Body, req); err != nil { - if err == io.EOF { - err = errors.New("empty JSON provided") - } - logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse JSON input: %w", err)) - return - } - - c.logger.Debug("received cache-clear request", "type", req.Type, "namespace", req.Namespace, "value", req.Value) - - in, err := parseCacheClearInput(req) - if err != nil { - c.logger.Error("unable to parse clear input", "error", err) - logical.RespondError(w, http.StatusBadRequest, fmt.Errorf("failed to parse clear input: %w", err)) - return - } - - if err := c.handleCacheClear(ctx, in); err != nil { - // Default to 500 on error, unless the user provided an invalid type, - // which would then be a 400. - httpStatus := http.StatusInternalServerError - if err == errInvalidType { - httpStatus = http.StatusBadRequest - } - logical.RespondError(w, httpStatus, fmt.Errorf("failed to clear cache: %w", err)) - return - } - - return - }) -} - -func (c *LeaseCache) handleCacheClear(ctx context.Context, in *cacheClearInput) error { - if in == nil { - return errors.New("no value(s) provided to clear corresponding cache entries") - } - - switch in.Type { - case "request_path": - // For this particular case, we need to ensure that there are 2 provided - // indexers for the proper lookup. - if in.RequestPath == "" { - return errors.New("request path not provided") - } - - // The first value provided for this case will be the namespace, but if it's - // an empty value we need to overwrite it with "root/" to ensure proper - // cache lookup. - if in.Namespace == "" { - in.Namespace = "root/" - } - - // Find all the cached entries which has the given request path and - // cancel the contexts of all the respective lifetime watchers - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameRequestPath, in.Namespace, in.RequestPath) - if err != nil { - return err - } - for _, index := range indexes { - // If it's a static secret, we must remove directly, as there - // is no renew func to cancel. - if index.Type == cacheboltdb.StaticSecretType { - err = c.db.Evict(cachememdb.IndexNameID, index.ID) - if err != nil { - return err - } - } else { - if index.RenewCtxInfo != nil { - if index.RenewCtxInfo.CancelFunc != nil { - index.RenewCtxInfo.CancelFunc() - } - } - } - } - - case "token": - if in.Token == "" { - return errors.New("token not provided") - } - - // Get the context for the given token and cancel its context - index, err := c.db.Get(cachememdb.IndexNameToken, in.Token) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - return nil - } - if err != nil { - return err - } - - c.logger.Debug("canceling context of index attached to token") - - index.RenewCtxInfo.CancelFunc() - - case "token_accessor": - if in.TokenAccessor == "" && in.Type != cacheboltdb.StaticSecretType { - return errors.New("token accessor not provided") - } - - // Get the cached index and cancel the corresponding lifetime watcher - // context - index, err := c.db.Get(cachememdb.IndexNameTokenAccessor, in.TokenAccessor) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - return nil - } - if err != nil { - return err - } - - c.logger.Debug("canceling context of index attached to accessor") - - index.RenewCtxInfo.CancelFunc() - - case "lease": - if in.Lease == "" { - return errors.New("lease not provided") - } - - // Get the cached index and cancel the corresponding lifetime watcher - // context - index, err := c.db.Get(cachememdb.IndexNameLease, in.Lease) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - return nil - } - if err != nil { - return err - } - - c.logger.Debug("canceling context of index attached to accessor") - - index.RenewCtxInfo.CancelFunc() - - case "all": - // Cancel the base context which triggers all the goroutines to - // stop and evict entries from cache. - c.logger.Debug("canceling base context") - c.l.Lock() - c.baseCtxInfo.CancelFunc() - // Reset the base context - baseCtx, baseCancel := context.WithCancel(ctx) - c.baseCtxInfo = &cachememdb.ContextInfo{ - Ctx: baseCtx, - CancelFunc: baseCancel, - } - c.l.Unlock() - - // Reset the memdb instance (and persistent storage if enabled) - if err := c.Flush(); err != nil { - return err - } - - default: - return errInvalidType - } - - c.logger.Debug("successfully cleared matching cache entries") - - return nil -} - -// handleRevocationRequest checks whether the originating request is a -// revocation request, and if so perform applicable cache cleanups. -// Returns true is this is a revocation request. -func (c *LeaseCache) handleRevocationRequest(ctx context.Context, req *SendRequest, resp *SendResponse) (bool, error) { - // Lease and token revocations return 204's on success. Fast-path if that's - // not the case. - if resp.Response.StatusCode != http.StatusNoContent { - return false, nil - } - - _, path := deriveNamespaceAndRevocationPath(req) - - switch { - case path == vaultPathTokenRevoke: - // Get the token from the request body - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - tokenRaw, ok := jsonBody["token"] - if !ok { - return false, fmt.Errorf("failed to get token from request body") - } - token, ok := tokenRaw.(string) - if !ok { - return false, fmt.Errorf("expected token in the request body to be string") - } - - // Clear the cache entry associated with the token and all the other - // entries belonging to the leases derived from this token. - in := &cacheClearInput{ - Type: "token", - Token: token, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeSelf: - // Clear the cache entry associated with the token and all the other - // entries belonging to the leases derived from this token. - in := &cacheClearInput{ - Type: "token", - Token: req.Token, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeAccessor: - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - accessorRaw, ok := jsonBody["accessor"] - if !ok { - return false, fmt.Errorf("failed to get accessor from request body") - } - accessor, ok := accessorRaw.(string) - if !ok { - return false, fmt.Errorf("expected accessor in the request body to be string") - } - - in := &cacheClearInput{ - Type: "token_accessor", - TokenAccessor: accessor, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case path == vaultPathTokenRevokeOrphan: - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - tokenRaw, ok := jsonBody["token"] - if !ok { - return false, fmt.Errorf("failed to get token from request body") - } - token, ok := tokenRaw.(string) - if !ok { - return false, fmt.Errorf("expected token in the request body to be string") - } - - // Kill the lifetime watchers of all the leases attached to the revoked - // token - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLeaseToken, token) - if err != nil { - return false, err - } - for _, index := range indexes { - index.RenewCtxInfo.CancelFunc() - } - - // Kill the lifetime watchers of the revoked token - index, err := c.db.Get(cachememdb.IndexNameToken, token) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - return true, nil - } - if err != nil { - return false, err - } - - // Indicate the lifetime watcher goroutine for this index to return. - // This will not affect the child tokens because the context is not - // getting cancelled. - close(index.RenewCtxInfo.DoneCh) - - // Clear the parent references of the revoked token in the entries - // belonging to the child tokens of the revoked token. - indexes, err = c.db.GetByPrefix(cachememdb.IndexNameTokenParent, token) - if err != nil { - return false, err - } - for _, index := range indexes { - index.TokenParent = "" - err = c.db.Set(index) - if err != nil { - c.logger.Error("failed to persist index", "error", err) - return false, err - } - } - - case path == vaultPathLeaseRevoke: - // TODO: Should lease present in the URL itself be considered here? - // Get the lease from the request body - jsonBody := map[string]interface{}{} - if err := json.Unmarshal(req.RequestBody, &jsonBody); err != nil { - return false, err - } - leaseIDRaw, ok := jsonBody["lease_id"] - if !ok { - return false, fmt.Errorf("failed to get lease_id from request body") - } - leaseID, ok := leaseIDRaw.(string) - if !ok { - return false, fmt.Errorf("expected lease_id the request body to be string") - } - in := &cacheClearInput{ - Type: "lease", - Lease: leaseID, - } - if err := c.handleCacheClear(ctx, in); err != nil { - return false, err - } - - case strings.HasPrefix(path, vaultPathLeaseRevokeForce): - // Trim the URL path to get the request path prefix - prefix := strings.TrimPrefix(path, vaultPathLeaseRevokeForce) - // Get all the cache indexes that use the request path containing the - // prefix and cancel the lifetime watcher context of each. - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) - if err != nil { - return false, err - } - - _, tokenNSID := namespace.SplitIDFromString(req.Token) - for _, index := range indexes { - _, leaseNSID := namespace.SplitIDFromString(index.Lease) - // Only evict leases that match the token's namespace - if tokenNSID == leaseNSID { - index.RenewCtxInfo.CancelFunc() - } - } - - case strings.HasPrefix(path, vaultPathLeaseRevokePrefix): - // Trim the URL path to get the request path prefix - prefix := strings.TrimPrefix(path, vaultPathLeaseRevokePrefix) - // Get all the cache indexes that use the request path containing the - // prefix and cancel the lifetime watcher context of each. - indexes, err := c.db.GetByPrefix(cachememdb.IndexNameLease, prefix) - if err != nil { - return false, err - } - - _, tokenNSID := namespace.SplitIDFromString(req.Token) - for _, index := range indexes { - _, leaseNSID := namespace.SplitIDFromString(index.Lease) - // Only evict leases that match the token's namespace - if tokenNSID == leaseNSID { - index.RenewCtxInfo.CancelFunc() - } - } - - default: - return false, nil - } - - c.logger.Debug("triggered caching eviction from revocation request") - - return true, nil -} - -// Set stores the index in the cachememdb, and also stores it in the persistent -// cache (if enabled) -func (c *LeaseCache) Set(ctx context.Context, index *cachememdb.Index) error { - if err := c.db.Set(index); err != nil { - return err - } - - if c.ps != nil { - plaintext, err := index.Serialize() - if err != nil { - return err - } - - if err := c.ps.Set(ctx, index.ID, plaintext, index.Type); err != nil { - return err - } - c.logger.Trace("set entry in persistent storage", "type", index.Type, "path", index.RequestPath, "id", index.ID) - } - - return nil -} - -// SetCapabilitiesIndex stores the capabilities index in the cachememdb, and also stores it in the persistent -// cache (if enabled) -func (c *LeaseCache) SetCapabilitiesIndex(ctx context.Context, index *cachememdb.CapabilitiesIndex) error { - if err := c.db.SetCapabilitiesIndex(index); err != nil { - return err - } - - if c.ps != nil { - plaintext, err := index.SerializeCapabilitiesIndex() - if err != nil { - return err - } - - if err := c.ps.Set(ctx, index.ID, plaintext, cacheboltdb.TokenCapabilitiesType); err != nil { - return err - } - c.logger.Trace("set entry in persistent storage", "type", cacheboltdb.TokenCapabilitiesType, "id", index.ID) - } - - return nil -} - -// Evict removes an Index from the cachememdb, and also removes it from the -// persistent cache (if enabled) -func (c *LeaseCache) Evict(index *cachememdb.Index) error { - if err := c.db.Evict(cachememdb.IndexNameID, index.ID); err != nil { - return err - } - - if c.ps != nil { - if err := c.ps.Delete(index.ID, index.Type); err != nil { - return err - } - c.logger.Trace("deleted item from persistent storage", "id", index.ID) - } - - return nil -} - -// Flush the cachememdb and persistent cache (if enabled) -func (c *LeaseCache) Flush() error { - if err := c.db.Flush(); err != nil { - return err - } - - if c.ps != nil { - c.logger.Trace("clearing persistent storage") - return c.ps.Clear() - } - - return nil -} - -// Restore loads the cachememdb from the persistent storage passed in. Loads -// tokens first, since restoring a lease's renewal context and watcher requires -// looking up the token in the cachememdb. -// Restore also restarts any capability management for managed static secret -// tokens. -func (c *LeaseCache) Restore(ctx context.Context, storage *cacheboltdb.BoltStorage) error { - var errs *multierror.Error - - // Process tokens first - tokens, err := storage.GetByType(ctx, cacheboltdb.TokenType) - if err != nil { - errs = multierror.Append(errs, err) - } else { - if err := c.restoreTokens(tokens); err != nil { - errs = multierror.Append(errs, err) - } - } - - // Then process leases - leases, err := storage.GetByType(ctx, cacheboltdb.LeaseType) - if err != nil { - errs = multierror.Append(errs, err) - } else { - for _, lease := range leases { - newIndex, err := cachememdb.Deserialize(lease) - if err != nil { - errs = multierror.Append(errs, err) - continue - } - - c.logger.Trace("restoring lease", "id", newIndex.ID, "path", newIndex.RequestPath) - - // Check if this lease has already expired - expired, err := c.hasExpired(time.Now().UTC(), newIndex) - if err != nil { - c.logger.Warn("failed to check if lease is expired", "id", newIndex.ID, "error", err) - } - if expired { - continue - } - - if err := c.restoreLeaseRenewCtx(newIndex); err != nil { - errs = multierror.Append(errs, err) - continue - } - if err := c.db.Set(newIndex); err != nil { - errs = multierror.Append(errs, err) - continue - } - c.logger.Trace("restored lease", "id", newIndex.ID, "path", newIndex.RequestPath) - } - } - - // Then process static secrets and their capabilities - if c.cacheStaticSecrets { - staticSecrets, err := storage.GetByType(ctx, cacheboltdb.StaticSecretType) - if err != nil { - errs = multierror.Append(errs, err) - } else { - for _, staticSecret := range staticSecrets { - newIndex, err := cachememdb.Deserialize(staticSecret) - if err != nil { - errs = multierror.Append(errs, err) - continue - } - - c.logger.Trace("restoring static secret index", "id", newIndex.ID, "path", newIndex.RequestPath) - if err := c.db.Set(newIndex); err != nil { - errs = multierror.Append(errs, err) - continue - } - } - } - - capabilityIndexes, err := storage.GetByType(ctx, cacheboltdb.TokenCapabilitiesType) - if err != nil { - errs = multierror.Append(errs, err) - } else { - for _, capabilityIndex := range capabilityIndexes { - newIndex, err := cachememdb.DeserializeCapabilitiesIndex(capabilityIndex) - if err != nil { - errs = multierror.Append(errs, err) - continue - } - - c.logger.Trace("restoring capability index", "id", newIndex.ID) - if err := c.db.SetCapabilitiesIndex(newIndex); err != nil { - errs = multierror.Append(errs, err) - continue - } - - if c.capabilityManager != nil { - c.capabilityManager.StartRenewingCapabilities(newIndex) - } - } - } - } - - return errs.ErrorOrNil() -} - -func (c *LeaseCache) restoreTokens(tokens [][]byte) error { - var errors *multierror.Error - - for _, token := range tokens { - newIndex, err := cachememdb.Deserialize(token) - if err != nil { - errors = multierror.Append(errors, err) - continue - } - newIndex.RenewCtxInfo = c.createCtxInfo(nil) - if err := c.db.Set(newIndex); err != nil { - errors = multierror.Append(errors, err) - continue - } - c.logger.Trace("restored token", "id", newIndex.ID) - } - - return errors.ErrorOrNil() -} - -// restoreLeaseRenewCtx re-creates a RenewCtx for an index object and starts -// the watcher go routine -func (c *LeaseCache) restoreLeaseRenewCtx(index *cachememdb.Index) error { - if index.Response == nil { - return fmt.Errorf("cached response was nil for %s", index.ID) - } - - // Parse the secret to determine which type it is - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - c.logger.Error("failed to deserialize response", "error", err) - return err - } - secret, err := api.ParseSecret(resp.Body) - if err != nil { - c.logger.Error("failed to parse response as secret", "error", err) - return err - } - - var renewCtxInfo *cachememdb.ContextInfo - switch { - case secret.LeaseID != "": - entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) - } - if err != nil { - return err - } - - // Derive a context for renewal using the token's context - renewCtxInfo = cachememdb.NewContextInfo(entry.RenewCtxInfo.Ctx) - - case secret.Auth != nil: - var parentCtx context.Context - if !secret.Auth.Orphan { - entry, err := c.db.Get(cachememdb.IndexNameToken, index.RequestToken) - if errors.Is(err, cachememdb.ErrCacheItemNotFound) { - // If parent token is not managed by the cache, child shouldn't be - // either. - if entry == nil { - return fmt.Errorf("could not find parent Token %s for req path %s", index.RequestToken, index.RequestPath) - } - } - if err != nil { - return err - } - - c.logger.Debug("setting parent context", "method", index.RequestMethod, "path", index.RequestPath) - parentCtx = entry.RenewCtxInfo.Ctx - } - renewCtxInfo = c.createCtxInfo(parentCtx) - default: - // This isn't a renewable cache entry, i.e. a static secret cache entry. - // We return, because there's nothing to do. - return nil - } - - renewCtx := context.WithValue(renewCtxInfo.Ctx, contextIndexID, index.ID) - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: renewCtx, - CancelFunc: renewCtxInfo.CancelFunc, - DoneCh: renewCtxInfo.DoneCh, - } - - sendReq := &SendRequest{ - Token: index.RequestToken, - Request: &http.Request{ - Header: index.RequestHeader, - Method: index.RequestMethod, - URL: &url.URL{ - Path: index.RequestPath, - }, - }, - } - go c.startRenewing(renewCtx, index, sendReq, secret) - - return nil -} - -// deriveNamespaceAndRevocationPath returns the namespace and relative path for -// revocation paths. -// -// If the path contains a namespace, but it's not a revocation path, it will be -// returned as-is, since there's no way to tell where the namespace ends and -// where the request path begins purely based off a string. -// -// Case 1: /v1/ns1/leases/revoke -> ns1/, /v1/leases/revoke -// Case 2: ns1/ /v1/leases/revoke -> ns1/, /v1/leases/revoke -// Case 3: /v1/ns1/foo/bar -> root/, /v1/ns1/foo/bar -// Case 4: ns1/ /v1/foo/bar -> ns1/, /v1/foo/bar -func deriveNamespaceAndRevocationPath(req *SendRequest) (string, string) { - namespace := "root/" - nsHeader := req.Request.Header.Get(consts.NamespaceHeaderName) - if nsHeader != "" { - namespace = nsHeader - } - - fullPath := req.Request.URL.Path - nonVersionedPath := strings.TrimPrefix(fullPath, "/v1") - - for _, pathToCheck := range revocationPaths { - // We use strings.Contains here for paths that can contain - // vars in the path, e.g. /v1/lease/revoke-prefix/:prefix - i := strings.Index(nonVersionedPath, pathToCheck) - // If there's no match, move on to the next check - if i == -1 { - continue - } - - // If the index is 0, this is a relative path with no namespace preppended, - // so we can break early - if i == 0 { - break - } - - // We need to turn /ns1 into ns1/, this makes it easy - namespaceInPath := nshelper.Canonicalize(nonVersionedPath[:i]) - - // If it's root, we replace, otherwise we join - if namespace == "root/" { - namespace = namespaceInPath - } else { - namespace = namespace + namespaceInPath - } - - return namespace, fmt.Sprintf("/v1%s", nonVersionedPath[i:]) - } - - return namespace, fmt.Sprintf("/v1%s", nonVersionedPath) -} - -// RegisterAutoAuthToken adds the provided auto-token into the cache. This is -// primarily used to register the auto-auth token and should only be called -// within a sink's WriteToken func. -func (c *LeaseCache) RegisterAutoAuthToken(token string) error { - // Get the token from the cache - oldIndex, err := c.db.Get(cachememdb.IndexNameToken, token) - if err != nil && err != cachememdb.ErrCacheItemNotFound { - return err - } - - // If the index is found, just keep it in the cache and ignore the incoming - // token (since they're the same) - if oldIndex != nil { - c.logger.Trace("auto-auth token already exists in cache; no need to store it again") - return nil - } - - // The following randomly generated values are required for index stored by - // the cache, but are not actually used. We use random values to prevent - // accidental access. - id, err := base62.Random(5) - if err != nil { - return err - } - namespace, err := base62.Random(5) - if err != nil { - return err - } - requestPath, err := base62.Random(5) - if err != nil { - return err - } - - index := &cachememdb.Index{ - ID: id, - Token: token, - Namespace: namespace, - RequestPath: requestPath, - Type: cacheboltdb.TokenType, - } - - // Derive a context off of the lease cache's base context - ctxInfo := c.createCtxInfo(nil) - - index.RenewCtxInfo = &cachememdb.ContextInfo{ - Ctx: ctxInfo.Ctx, - CancelFunc: ctxInfo.CancelFunc, - DoneCh: ctxInfo.DoneCh, - } - - // Store the index in the cache - c.logger.Debug("storing auto-auth token into the cache") - err = c.Set(c.baseCtxInfo.Ctx, index) - if err != nil { - c.logger.Error("failed to cache the auto-auth token", "error", err) - return err - } - - return nil -} - -type cacheClearInput struct { - Type string - - RequestPath string - Namespace string - Token string - TokenAccessor string - Lease string -} - -func parseCacheClearInput(req *cacheClearRequest) (*cacheClearInput, error) { - if req == nil { - return nil, errors.New("nil request options provided") - } - - if req.Type == "" { - return nil, errors.New("no type provided") - } - - in := &cacheClearInput{ - Type: req.Type, - Namespace: req.Namespace, - } - - switch req.Type { - case "request_path": - in.RequestPath = req.Value - case "token": - in.Token = req.Value - case "token_accessor": - in.TokenAccessor = req.Value - case "lease": - in.Lease = req.Value - } - - return in, nil -} - -func (c *LeaseCache) hasExpired(currentTime time.Time, index *cachememdb.Index) (bool, error) { - reader := bufio.NewReader(bytes.NewReader(index.Response)) - resp, err := http.ReadResponse(reader, nil) - if err != nil { - return false, fmt.Errorf("failed to deserialize response: %w", err) - } - secret, err := api.ParseSecret(resp.Body) - if err != nil { - return false, fmt.Errorf("failed to parse response as secret: %w", err) - } - - elapsed := currentTime.Sub(index.LastRenewed) - var leaseDuration int - switch { - case secret.LeaseID != "": - leaseDuration = secret.LeaseDuration - case secret.Auth != nil: - leaseDuration = secret.Auth.LeaseDuration - default: - return false, errors.New("secret without lease encountered in expiration check") - } - - if int(elapsed.Seconds()) > leaseDuration { - c.logger.Trace("secret has expired", "id", index.ID, "elapsed", elapsed, "lease duration", leaseDuration) - return true, nil - } - return false, nil -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +const userpass = { + user: { + username: { + editType: 'string', + helpText: 'Username for this user.', + fieldValue: 'mutableId', + fieldGroup: 'default', + readOnly: true, + label: 'Username', + type: 'string', + }, + password: { + editType: 'string', + helpText: 'Password for this user.', + fieldGroup: 'default', + sensitive: true, + type: 'string', + }, + tokenBoundCidrs: { + editType: 'stringArray', + helpText: + 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', + fieldGroup: 'Tokens', + label: "Generated Token's Bound CIDRs", + }, + tokenExplicitMaxTtl: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', + fieldGroup: 'Tokens', + label: "Generated Token's Explicit Maximum TTL", + }, + tokenMaxTtl: { + editType: 'ttl', + helpText: 'The maximum lifetime of the generated token', + fieldGroup: 'Tokens', + label: "Generated Token's Maximum TTL", + }, + tokenNoDefaultPolicy: { + editType: 'boolean', + helpText: "If true, the 'default' policy will not automatically be added to generated tokens", + fieldGroup: 'Tokens', + label: "Do Not Attach 'default' Policy To Generated Tokens", + type: 'boolean', + }, + tokenNumUses: { + editType: 'number', + helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', + fieldGroup: 'Tokens', + label: 'Maximum Uses of Generated Tokens', + type: 'number', + }, + tokenPeriod: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', + fieldGroup: 'Tokens', + label: "Generated Token's Period", + }, + tokenPolicies: { + editType: 'stringArray', + helpText: 'A list of policies that will apply to the generated token for this user.', + fieldGroup: 'Tokens', + label: "Generated Token's Policies", + }, + tokenTtl: { + editType: 'ttl', + helpText: 'The initial ttl of the token to generate', + fieldGroup: 'Tokens', + label: "Generated Token's Initial TTL", + }, + tokenType: { + editType: 'string', + helpText: 'The type of token to generate, service or batch', + fieldGroup: 'Tokens', + label: "Generated Token's Type", + type: 'string', + }, + }, +}; + +const azure = { + 'auth-config/azure': { + clientId: { + editType: 'string', + fieldGroup: 'default', + helpText: + 'The OAuth2 client id to connection to Azure. This value can also be provided with the AZURE_CLIENT_ID environment variable.', + label: 'Client ID', + type: 'string', + }, + clientSecret: { + editType: 'string', + fieldGroup: 'default', + helpText: + 'The OAuth2 client secret to connection to Azure. This value can also be provided with the AZURE_CLIENT_SECRET environment variable.', + type: 'string', + }, + environment: { + editType: 'string', + fieldGroup: 'default', + helpText: + 'The Azure environment name. If not provided, AzurePublicCloud is used. This value can also be provided with the AZURE_ENVIRONMENT environment variable.', + type: 'string', + }, + maxRetries: { + editType: 'number', + fieldGroup: 'default', + helpText: + 'The maximum number of attempts a failed operation will be retried before producing an error.', + type: 'number', + }, + maxRetryDelay: { + editType: 'ttl', + fieldGroup: 'default', + helpText: 'The maximum delay allowed before retrying an operation.', + }, + resource: { + editType: 'string', + fieldGroup: 'default', + helpText: + 'The resource URL for the vault application in Azure Active Directory. This value can also be provided with the AZURE_AD_RESOURCE environment variable.', + type: 'string', + }, + retryDelay: { + editType: 'ttl', + fieldGroup: 'default', + helpText: 'The initial amount of delay to use before retrying an operation, increasing exponentially.', + }, + rootPasswordTtl: { + editType: 'ttl', + fieldGroup: 'default', + helpText: + 'The TTL of the root password in Azure. This can be either a number of seconds or a time formatted duration (ex: 24h, 48ds)', + }, + tenantId: { + editType: 'string', + fieldGroup: 'default', + helpText: + 'The tenant id for the Azure Active Directory. This is sometimes referred to as Directory ID in AD. This value can also be provided with the AZURE_TENANT_ID environment variable.', + label: 'Tenant ID', + type: 'string', + }, + }, +}; + +const cert = { + 'auth-config/cert': { + disableBinding: { + editType: 'boolean', + helpText: + 'If set, during renewal, skips the matching of presented client identity with the client identity used during login. Defaults to false.', + fieldGroup: 'default', + type: 'boolean', + }, + enableIdentityAliasMetadata: { + editType: 'boolean', + helpText: + 'If set, metadata of the certificate including the metadata corresponding to allowed_metadata_extensions will be stored in the alias. Defaults to false.', + fieldGroup: 'default', + type: 'boolean', + }, + ocspCacheSize: { + editType: 'number', + helpText: 'The size of the in memory OCSP response cache, shared by all configured certs', + fieldGroup: 'default', + type: 'number', + }, + }, + cert: { + name: { + editType: 'string', + helpText: 'The name of the certificate', + fieldValue: 'mutableId', + fieldGroup: 'default', + readOnly: true, + label: 'Name', + type: 'string', + }, + allowedCommonNames: { + editType: 'stringArray', + helpText: 'A list of names. At least one must exist in the Common Name. Supports globbing.', + fieldGroup: 'Constraints', + }, + allowedDnsSans: { + editType: 'stringArray', + helpText: 'A list of DNS names. At least one must exist in the SANs. Supports globbing.', + fieldGroup: 'Constraints', + label: 'Allowed DNS SANs', + }, + allowedEmailSans: { + editType: 'stringArray', + helpText: 'A list of Email Addresses. At least one must exist in the SANs. Supports globbing.', + fieldGroup: 'Constraints', + label: 'Allowed Email SANs', + }, + allowedMetadataExtensions: { + editType: 'stringArray', + helpText: + 'A list of OID extensions. Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the OID numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.', + fieldGroup: 'default', + }, + allowedNames: { + editType: 'stringArray', + helpText: + 'A list of names. At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.', + fieldGroup: 'Constraints', + }, + allowedOrganizationalUnits: { + editType: 'stringArray', + helpText: 'A list of Organizational Units names. At least one must exist in the OU field.', + fieldGroup: 'Constraints', + }, + allowedUriSans: { + editType: 'stringArray', + helpText: 'A list of URIs. At least one must exist in the SANs. Supports globbing.', + fieldGroup: 'Constraints', + label: 'Allowed URI SANs', + }, + certificate: { + editType: 'file', + helpText: 'The public certificate that should be trusted. Must be x509 PEM encoded.', + fieldGroup: 'default', + type: 'string', + }, + displayName: { + editType: 'string', + helpText: 'The display name to use for clients using this certificate.', + fieldGroup: 'default', + type: 'string', + }, + ocspCaCertificates: { + editType: 'file', + helpText: 'Any additional CA certificates needed to communicate with OCSP servers', + fieldGroup: 'default', + type: 'string', + }, + ocspEnabled: { + editType: 'boolean', + helpText: 'Whether to attempt OCSP verification of certificates at login', + fieldGroup: 'default', + type: 'boolean', + }, + ocspFailOpen: { + editType: 'boolean', + helpText: + 'If set to true, if an OCSP revocation cannot be made successfully, login will proceed rather than failing. If false, failing to get an OCSP status fails the request.', + fieldGroup: 'default', + type: 'boolean', + }, + ocspQueryAllServers: { + editType: 'boolean', + helpText: + 'If set to true, rather than accepting the first successful OCSP response, query all servers and consider the certificate valid only if all servers agree.', + fieldGroup: 'default', + type: 'boolean', + }, + ocspServersOverride: { + editType: 'stringArray', + helpText: + 'A list of OCSP server addresses. If unset, the OCSP server is determined from the AuthorityInformationAccess extension on the certificate being inspected.', + fieldGroup: 'default', + }, + requiredExtensions: { + editType: 'stringArray', + helpText: + "A list of extensions formatted as 'oid:value'. Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on 'value'.", + fieldGroup: 'default', + }, + tokenBoundCidrs: { + editType: 'stringArray', + helpText: + 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', + fieldGroup: 'Tokens', + label: "Generated Token's Bound CIDRs", + }, + tokenExplicitMaxTtl: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', + fieldGroup: 'Tokens', + label: "Generated Token's Explicit Maximum TTL", + }, + tokenMaxTtl: { + editType: 'ttl', + helpText: 'The maximum lifetime of the generated token', + fieldGroup: 'Tokens', + label: "Generated Token's Maximum TTL", + }, + tokenNoDefaultPolicy: { + editType: 'boolean', + helpText: "If true, the 'default' policy will not automatically be added to generated tokens", + fieldGroup: 'Tokens', + label: "Do Not Attach 'default' Policy To Generated Tokens", + type: 'boolean', + }, + tokenNumUses: { + editType: 'number', + helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', + fieldGroup: 'Tokens', + label: 'Maximum Uses of Generated Tokens', + type: 'number', + }, + tokenPeriod: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', + fieldGroup: 'Tokens', + label: "Generated Token's Period", + }, + tokenPolicies: { + editType: 'stringArray', + helpText: 'A list of policies that will apply to the generated token for this user.', + fieldGroup: 'Tokens', + label: "Generated Token's Policies", + }, + tokenTtl: { + editType: 'ttl', + helpText: 'The initial ttl of the token to generate', + fieldGroup: 'Tokens', + label: "Generated Token's Initial TTL", + }, + tokenType: { + editType: 'string', + helpText: 'The type of token to generate, service or batch', + fieldGroup: 'Tokens', + label: "Generated Token's Type", + type: 'string', + }, + }, +}; + +const gcp = { + 'auth-config/gcp': { + credentials: { + editType: 'string', + helpText: + 'Google credentials JSON that Vault will use to verify users against GCP APIs. If not specified, will use application default credentials', + fieldGroup: 'default', + label: 'Credentials', + type: 'string', + }, + customEndpoint: { + editType: 'object', + helpText: 'Specifies overrides for various Google API Service Endpoints used in requests.', + fieldGroup: 'default', + type: 'object', + }, + gceAlias: { + editType: 'string', + helpText: 'Indicates what value to use when generating an alias for GCE authentications.', + fieldGroup: 'default', + type: 'string', + }, + gceMetadata: { + editType: 'stringArray', + helpText: + "The metadata to include on the aliases and audit logs generated by this plugin. When set to 'default', includes: instance_creation_timestamp, instance_id, instance_name, project_id, project_number, role, service_account_id, service_account_email, zone. Not editing this field means the 'default' fields are included. Explicitly setting this field to empty overrides the 'default' and means no metadata will be included. If not using 'default', explicit fields must be sent like: 'field1,field2'.", + fieldGroup: 'default', + defaultValue: 'field1,field2', + label: 'gce_metadata', + }, + iamAlias: { + editType: 'string', + helpText: 'Indicates what value to use when generating an alias for IAM authentications.', + fieldGroup: 'default', + type: 'string', + }, + iamMetadata: { + editType: 'stringArray', + helpText: + "The metadata to include on the aliases and audit logs generated by this plugin. When set to 'default', includes: project_id, role, service_account_id, service_account_email. Not editing this field means the 'default' fields are included. Explicitly setting this field to empty overrides the 'default' and means no metadata will be included. If not using 'default', explicit fields must be sent like: 'field1,field2'.", + fieldGroup: 'default', + defaultValue: 'field1,field2', + label: 'iam_metadata', + }, + }, +}; + +const github = { + 'auth-config/github': { + baseUrl: { + editType: 'string', + helpText: + 'The API endpoint to use. Useful if you are running GitHub Enterprise or an API-compatible authentication server.', + fieldGroup: 'GitHub Options', + label: 'Base URL', + type: 'string', + }, + organization: { + editType: 'string', + helpText: 'The organization users must be part of', + fieldGroup: 'default', + type: 'string', + }, + organizationId: { + editType: 'number', + helpText: 'The ID of the organization users must be part of', + fieldGroup: 'default', + type: 'number', + }, + tokenBoundCidrs: { + editType: 'stringArray', + helpText: + 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', + fieldGroup: 'Tokens', + label: "Generated Token's Bound CIDRs", + }, + tokenExplicitMaxTtl: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', + fieldGroup: 'Tokens', + label: "Generated Token's Explicit Maximum TTL", + }, + tokenMaxTtl: { + editType: 'ttl', + helpText: 'The maximum lifetime of the generated token', + fieldGroup: 'Tokens', + label: "Generated Token's Maximum TTL", + }, + tokenNoDefaultPolicy: { + editType: 'boolean', + helpText: "If true, the 'default' policy will not automatically be added to generated tokens", + fieldGroup: 'Tokens', + label: "Do Not Attach 'default' Policy To Generated Tokens", + type: 'boolean', + }, + tokenNumUses: { + editType: 'number', + helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', + fieldGroup: 'Tokens', + label: 'Maximum Uses of Generated Tokens', + type: 'number', + }, + tokenPeriod: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', + fieldGroup: 'Tokens', + label: "Generated Token's Period", + }, + tokenPolicies: { + editType: 'stringArray', + helpText: 'A list of policies that will apply to the generated token for this user.', + fieldGroup: 'Tokens', + label: "Generated Token's Policies", + }, + tokenTtl: { + editType: 'ttl', + helpText: 'The initial ttl of the token to generate', + fieldGroup: 'Tokens', + label: "Generated Token's Initial TTL", + }, + tokenType: { + editType: 'string', + helpText: 'The type of token to generate, service or batch', + fieldGroup: 'Tokens', + label: "Generated Token's Type", + type: 'string', + }, + }, +}; + +const jwt = { + 'auth-config/jwt': { + boundIssuer: { + editType: 'string', + helpText: "The value against which to match the 'iss' claim in a JWT. Optional.", + fieldGroup: 'default', + type: 'string', + }, + defaultRole: { + editType: 'string', + helpText: + 'The default role to use if none is provided during login. If not set, a role is required during login.', + fieldGroup: 'default', + type: 'string', + }, + jwksCaPem: { + editType: 'string', + helpText: + 'The CA certificate or chain of certificates, in PEM format, to use to validate connections to the JWKS URL. If not set, system certificates are used.', + fieldGroup: 'default', + type: 'string', + }, + jwksUrl: { + editType: 'string', + helpText: + 'JWKS URL to use to authenticate signatures. Cannot be used with "oidc_discovery_url" or "jwt_validation_pubkeys".', + fieldGroup: 'default', + type: 'string', + }, + jwtSupportedAlgs: { + editType: 'stringArray', + helpText: 'A list of supported signing algorithms. Defaults to RS256.', + fieldGroup: 'default', + }, + jwtValidationPubkeys: { + editType: 'stringArray', + helpText: + 'A list of PEM-encoded public keys to use to authenticate signatures locally. Cannot be used with "jwks_url" or "oidc_discovery_url".', + fieldGroup: 'default', + }, + namespaceInState: { + editType: 'boolean', + helpText: + 'Pass namespace in the OIDC state parameter instead of as a separate query parameter. With this setting, the allowed redirect URL(s) in Vault and on the provider side should not contain a namespace query parameter. This means only one redirect URL entry needs to be maintained on the provider side for all vault namespaces that will be authenticating against it. Defaults to true for new configs.', + fieldGroup: 'default', + defaultValue: true, + label: 'Namespace in OIDC state', + type: 'boolean', + }, + oidcClientId: { + editType: 'string', + helpText: 'The OAuth Client ID configured with your OIDC provider.', + fieldGroup: 'default', + type: 'string', + }, + oidcClientSecret: { + editType: 'string', + helpText: 'The OAuth Client Secret configured with your OIDC provider.', + fieldGroup: 'default', + sensitive: true, + type: 'string', + }, + oidcDiscoveryCaPem: { + editType: 'string', + helpText: + 'The CA certificate or chain of certificates, in PEM format, to use to validate connections to the OIDC Discovery URL. If not set, system certificates are used.', + fieldGroup: 'default', + type: 'string', + }, + oidcDiscoveryUrl: { + editType: 'string', + helpText: + 'OIDC Discovery URL, without any .well-known component (base path). Cannot be used with "jwks_url" or "jwt_validation_pubkeys".', + fieldGroup: 'default', + type: 'string', + }, + oidcResponseMode: { + editType: 'string', + helpText: + "The response mode to be used in the OAuth2 request. Allowed values are 'query' and 'form_post'.", + fieldGroup: 'default', + type: 'string', + }, + oidcResponseTypes: { + editType: 'stringArray', + helpText: + "The response types to request. Allowed values are 'code' and 'id_token'. Defaults to 'code'.", + fieldGroup: 'default', + }, + providerConfig: { + editType: 'object', + helpText: 'Provider-specific configuration. Optional.', + fieldGroup: 'default', + label: 'Provider Config', + type: 'object', + }, + }, +}; + +const kubernetes = { + 'auth-config/kubernetes': { + disableLocalCaJwt: { + editType: 'boolean', + helpText: + 'Disable defaulting to the local CA cert and service account JWT when running in a Kubernetes pod', + fieldGroup: 'default', + label: 'Disable use of local CA and service account JWT', + type: 'boolean', + }, + kubernetesCaCert: { + editType: 'string', + helpText: 'PEM encoded CA cert for use by the TLS client used to talk with the API.', + fieldGroup: 'default', + label: 'Kubernetes CA Certificate', + type: 'string', + }, + kubernetesHost: { + editType: 'string', + helpText: + 'Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server.', + fieldGroup: 'default', + type: 'string', + }, + pemKeys: { + editType: 'stringArray', + helpText: + 'Optional list of PEM-formated public keys or certificates used to verify the signatures of kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every installation of Kubernetes exposes these keys.', + fieldGroup: 'default', + label: 'Service account verification keys', + }, + tokenReviewerJwt: { + editType: 'string', + helpText: + 'A service account JWT (or other token) used as a bearer token to access the TokenReview API to validate other JWTs during login. If not set the JWT used for login will be used to access the API.', + fieldGroup: 'default', + label: 'Token Reviewer JWT', + type: 'string', + }, + }, + role: { + name: { + editType: 'string', + helpText: 'Name of the role.', + fieldValue: 'mutableId', + fieldGroup: 'default', + readOnly: true, + label: 'Name', + type: 'string', + }, + aliasNameSource: { + editType: 'string', + helpText: + 'Source to use when deriving the Alias name. valid choices: "serviceaccount_uid" : e.g. 474b11b5-0f20-4f9d-8ca5-65715ab325e0 (most secure choice) "serviceaccount_name" : / e.g. vault/vault-agent default: "serviceaccount_uid"', + fieldGroup: 'default', + type: 'string', + }, + audience: { + editType: 'string', + helpText: 'Optional Audience claim to verify in the jwt.', + fieldGroup: 'default', + type: 'string', + }, + boundServiceAccountNames: { + editType: 'stringArray', + helpText: + 'List of service account names able to access this role. If set to "*" all names are allowed.', + fieldGroup: 'default', + }, + boundServiceAccountNamespaces: { + editType: 'stringArray', + helpText: 'List of namespaces allowed to access this role. If set to "*" all namespaces are allowed.', + fieldGroup: 'default', + }, + tokenBoundCidrs: { + editType: 'stringArray', + helpText: + 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', + fieldGroup: 'Tokens', + label: "Generated Token's Bound CIDRs", + }, + tokenExplicitMaxTtl: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', + fieldGroup: 'Tokens', + label: "Generated Token's Explicit Maximum TTL", + }, + tokenMaxTtl: { + editType: 'ttl', + helpText: 'The maximum lifetime of the generated token', + fieldGroup: 'Tokens', + label: "Generated Token's Maximum TTL", + }, + tokenNoDefaultPolicy: { + editType: 'boolean', + helpText: "If true, the 'default' policy will not automatically be added to generated tokens", + fieldGroup: 'Tokens', + label: "Do Not Attach 'default' Policy To Generated Tokens", + type: 'boolean', + }, + tokenNumUses: { + editType: 'number', + helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', + fieldGroup: 'Tokens', + label: 'Maximum Uses of Generated Tokens', + type: 'number', + }, + tokenPeriod: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', + fieldGroup: 'Tokens', + label: "Generated Token's Period", + }, + tokenPolicies: { + editType: 'stringArray', + helpText: 'A list of policies that will apply to the generated token for this user.', + fieldGroup: 'Tokens', + label: "Generated Token's Policies", + }, + tokenTtl: { + editType: 'ttl', + helpText: 'The initial ttl of the token to generate', + fieldGroup: 'Tokens', + label: "Generated Token's Initial TTL", + }, + tokenType: { + editType: 'string', + helpText: 'The type of token to generate, service or batch', + fieldGroup: 'Tokens', + label: "Generated Token's Type", + type: 'string', + }, + }, +}; + +const ldap = { + 'auth-config/ldap': { + anonymousGroupSearch: { + editType: 'boolean', + helpText: + 'Use anonymous binds when performing LDAP group searches (if true the initial credentials will still be used for the initial connection test).', + fieldGroup: 'default', + label: 'Anonymous group search', + type: 'boolean', + }, + binddn: { + editType: 'string', + helpText: 'LDAP DN for searching for the user DN (optional)', + fieldGroup: 'default', + label: 'Name of Object to bind (binddn)', + type: 'string', + }, + bindpass: { + editType: 'string', + helpText: 'LDAP password for searching for the user DN (optional)', + fieldGroup: 'default', + sensitive: true, + type: 'string', + }, + caseSensitiveNames: { + editType: 'boolean', + helpText: + 'If true, case sensitivity will be used when comparing usernames and groups for matching policies.', + fieldGroup: 'default', + type: 'boolean', + }, + certificate: { + editType: 'file', + helpText: + 'CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded (optional)', + fieldGroup: 'default', + label: 'CA certificate', + type: 'string', + }, + clientTlsCert: { + editType: 'file', + helpText: 'Client certificate to provide to the LDAP server, must be x509 PEM encoded (optional)', + fieldGroup: 'default', + label: 'Client certificate', + type: 'string', + }, + clientTlsKey: { + editType: 'file', + helpText: 'Client certificate key to provide to the LDAP server, must be x509 PEM encoded (optional)', + fieldGroup: 'default', + label: 'Client key', + type: 'string', + }, + connectionTimeout: { + editType: 'ttl', + helpText: + 'Timeout, in seconds, when attempting to connect to the LDAP server before trying the next URL in the configuration.', + fieldGroup: 'default', + }, + denyNullBind: { + editType: 'boolean', + helpText: + "Denies an unauthenticated LDAP bind request if the user's password is empty; defaults to true", + fieldGroup: 'default', + type: 'boolean', + }, + dereferenceAliases: { + editType: 'string', + helpText: + "When aliases should be dereferenced on search operations. Accepted values are 'never', 'finding', 'searching', 'always'. Defaults to 'never'.", + possibleValues: ['never', 'finding', 'searching', 'always'], + fieldGroup: 'default', + type: 'string', + }, + discoverdn: { + editType: 'boolean', + helpText: 'Use anonymous bind to discover the bind DN of a user (optional)', + fieldGroup: 'default', + label: 'Discover DN', + type: 'boolean', + }, + groupattr: { + editType: 'string', + helpText: + 'LDAP attribute to follow on objects returned by in order to enumerate user group membership. Examples: "cn" or "memberOf", etc. Default: cn', + fieldGroup: 'default', + defaultValue: 'cn', + label: 'Group Attribute', + type: 'string', + }, + groupdn: { + editType: 'string', + helpText: 'LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)', + fieldGroup: 'default', + label: 'Group DN', + type: 'string', + }, + groupfilter: { + editType: 'string', + helpText: + 'Go template for querying group membership of user (optional) The template can access the following context variables: UserDN, Username Example: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}})) Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))', + fieldGroup: 'default', + label: 'Group Filter', + type: 'string', + }, + insecureTls: { + editType: 'boolean', + helpText: 'Skip LDAP server SSL Certificate verification - VERY insecure (optional)', + fieldGroup: 'default', + label: 'Insecure TLS', + type: 'boolean', + }, + maxPageSize: { + editType: 'number', + helpText: + "If set to a value greater than 0, the LDAP backend will use the LDAP server's paged search control to request pages of up to the given size. This can be used to avoid hitting the LDAP server's maximum result size limit. Otherwise, the LDAP backend will not use the paged search control.", + fieldGroup: 'default', + type: 'number', + }, + passwordPolicy: { + editType: 'string', + fieldGroup: 'default', + helpText: 'Password policy to use to rotate the root password', + type: 'string', + }, + requestTimeout: { + editType: 'ttl', + helpText: + 'Timeout, in seconds, for the connection when making requests against the server before returning back an error.', + fieldGroup: 'default', + }, + starttls: { + editType: 'boolean', + helpText: 'Issue a StartTLS command after establishing unencrypted connection (optional)', + fieldGroup: 'default', + label: 'Issue StartTLS', + type: 'boolean', + }, + tlsMaxVersion: { + editType: 'string', + helpText: + "Maximum TLS version to use. Accepted values are 'tls10', 'tls11', 'tls12' or 'tls13'. Defaults to 'tls12'", + possibleValues: ['tls10', 'tls11', 'tls12', 'tls13'], + fieldGroup: 'default', + label: 'Maximum TLS Version', + type: 'string', + }, + tlsMinVersion: { + editType: 'string', + helpText: + "Minimum TLS version to use. Accepted values are 'tls10', 'tls11', 'tls12' or 'tls13'. Defaults to 'tls12'", + possibleValues: ['tls10', 'tls11', 'tls12', 'tls13'], + fieldGroup: 'default', + label: 'Minimum TLS Version', + type: 'string', + }, + tokenBoundCidrs: { + editType: 'stringArray', + helpText: + 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', + fieldGroup: 'Tokens', + label: "Generated Token's Bound CIDRs", + }, + tokenExplicitMaxTtl: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', + fieldGroup: 'Tokens', + label: "Generated Token's Explicit Maximum TTL", + }, + tokenMaxTtl: { + editType: 'ttl', + helpText: 'The maximum lifetime of the generated token', + fieldGroup: 'Tokens', + label: "Generated Token's Maximum TTL", + }, + tokenNoDefaultPolicy: { + editType: 'boolean', + helpText: "If true, the 'default' policy will not automatically be added to generated tokens", + fieldGroup: 'Tokens', + label: "Do Not Attach 'default' Policy To Generated Tokens", + type: 'boolean', + }, + tokenNumUses: { + editType: 'number', + helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', + fieldGroup: 'Tokens', + label: 'Maximum Uses of Generated Tokens', + type: 'number', + }, + tokenPeriod: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', + fieldGroup: 'Tokens', + label: "Generated Token's Period", + }, + tokenPolicies: { + editType: 'stringArray', + helpText: 'A list of policies that will apply to the generated token for this user.', + fieldGroup: 'Tokens', + label: "Generated Token's Policies", + }, + tokenTtl: { + editType: 'ttl', + helpText: 'The initial ttl of the token to generate', + fieldGroup: 'Tokens', + label: "Generated Token's Initial TTL", + }, + tokenType: { + editType: 'string', + helpText: 'The type of token to generate, service or batch', + fieldGroup: 'Tokens', + label: "Generated Token's Type", + type: 'string', + }, + upndomain: { + editType: 'string', + helpText: 'Enables userPrincipalDomain login with [username]@UPNDomain (optional)', + fieldGroup: 'default', + label: 'User Principal (UPN) Domain', + type: 'string', + }, + url: { + editType: 'string', + helpText: + 'LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.', + fieldGroup: 'default', + label: 'URL', + type: 'string', + }, + usePre111GroupCnBehavior: { + editType: 'boolean', + helpText: + 'In Vault 1.1.1 a fix for handling group CN values of different cases unfortunately introduced a regression that could cause previously defined groups to not be found due to a change in the resulting name. If set true, the pre-1.1.1 behavior for matching group CNs will be used. This is only needed in some upgrade scenarios for backwards compatibility. It is enabled by default if the config is upgraded but disabled by default on new configurations.', + fieldGroup: 'default', + type: 'boolean', + }, + useTokenGroups: { + editType: 'boolean', + helpText: + 'If true, use the Active Directory tokenGroups constructed attribute of the user to find the group memberships. This will find all security groups including nested ones.', + fieldGroup: 'default', + type: 'boolean', + }, + userattr: { + editType: 'string', + helpText: 'Attribute used for users (default: cn)', + fieldGroup: 'default', + defaultValue: 'cn', + label: 'User Attribute', + type: 'string', + }, + userdn: { + editType: 'string', + helpText: 'LDAP domain to use for users (eg: ou=People,dc=example,dc=org)', + fieldGroup: 'default', + label: 'User DN', + type: 'string', + }, + userfilter: { + editType: 'string', + helpText: + 'Go template for LDAP user search filer (optional) The template can access the following context variables: UserAttr, Username Default: ({{.UserAttr}}={{.Username}})', + fieldGroup: 'default', + label: 'User Search Filter', + type: 'string', + }, + usernameAsAlias: { + editType: 'boolean', + helpText: 'If true, sets the alias name to the username', + fieldGroup: 'default', + type: 'boolean', + }, + }, + group: { + name: { + editType: 'string', + helpText: 'Name of the LDAP group.', + fieldValue: 'mutableId', + fieldGroup: 'default', + readOnly: true, + label: 'Name', + type: 'string', + }, + policies: { + editType: 'stringArray', + helpText: 'A list of policies associated to the group.', + fieldGroup: 'default', + }, + }, + user: { + name: { + editType: 'string', + helpText: 'Name of the LDAP user.', + fieldValue: 'mutableId', + fieldGroup: 'default', + readOnly: true, + label: 'Name', + type: 'string', + }, + groups: { + editType: 'stringArray', + helpText: 'A list of additional groups associated with the user.', + fieldGroup: 'default', + }, + policies: { + editType: 'stringArray', + helpText: 'A list of policies associated with the user.', + fieldGroup: 'default', + }, + }, +}; + +const okta = { + 'auth-config/okta': { + apiToken: { + editType: 'string', + helpText: 'Okta API key.', + fieldGroup: 'default', + label: 'API Token', + type: 'string', + }, + baseUrl: { + editType: 'string', + helpText: + 'The base domain to use for the Okta API. When not specified in the configuration, "okta.com" is used.', + fieldGroup: 'default', + label: 'Base URL', + type: 'string', + }, + bypassOktaMfa: { + editType: 'boolean', + helpText: + 'When set true, requests by Okta for a MFA check will be bypassed. This also disallows certain status checks on the account, such as whether the password is expired.', + fieldGroup: 'default', + label: 'Bypass Okta MFA', + type: 'boolean', + }, + orgName: { + editType: 'string', + helpText: 'Name of the organization to be used in the Okta API.', + fieldGroup: 'default', + label: 'Organization Name', + type: 'string', + }, + tokenBoundCidrs: { + editType: 'stringArray', + helpText: + 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', + fieldGroup: 'Tokens', + label: "Generated Token's Bound CIDRs", + }, + tokenExplicitMaxTtl: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', + fieldGroup: 'Tokens', + label: "Generated Token's Explicit Maximum TTL", + }, + tokenMaxTtl: { + editType: 'ttl', + helpText: 'The maximum lifetime of the generated token', + fieldGroup: 'Tokens', + label: "Generated Token's Maximum TTL", + }, + tokenNoDefaultPolicy: { + editType: 'boolean', + helpText: "If true, the 'default' policy will not automatically be added to generated tokens", + fieldGroup: 'Tokens', + label: "Do Not Attach 'default' Policy To Generated Tokens", + type: 'boolean', + }, + tokenNumUses: { + editType: 'number', + helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', + fieldGroup: 'Tokens', + label: 'Maximum Uses of Generated Tokens', + type: 'number', + }, + tokenPeriod: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', + fieldGroup: 'Tokens', + label: "Generated Token's Period", + }, + tokenPolicies: { + editType: 'stringArray', + helpText: 'A list of policies that will apply to the generated token for this user.', + fieldGroup: 'Tokens', + label: "Generated Token's Policies", + }, + tokenTtl: { + editType: 'ttl', + helpText: 'The initial ttl of the token to generate', + fieldGroup: 'Tokens', + label: "Generated Token's Initial TTL", + }, + tokenType: { + editType: 'string', + helpText: 'The type of token to generate, service or batch', + fieldGroup: 'Tokens', + label: "Generated Token's Type", + type: 'string', + }, + }, + group: { + name: { + editType: 'string', + helpText: 'Name of the Okta group.', + fieldValue: 'mutableId', + fieldGroup: 'default', + readOnly: true, + label: 'Name', + type: 'string', + }, + policies: { + editType: 'stringArray', + helpText: 'A list of policies associated to the group.', + fieldGroup: 'default', + }, + }, + user: { + name: { + editType: 'string', + helpText: 'Name of the user.', + fieldValue: 'mutableId', + fieldGroup: 'default', + readOnly: true, + label: 'Name', + type: 'string', + }, + groups: { + editType: 'stringArray', + helpText: 'List of groups associated with the user.', + fieldGroup: 'default', + }, + policies: { + editType: 'stringArray', + helpText: 'List of policies associated with the user.', + fieldGroup: 'default', + }, + }, +}; + +const radius = { + 'auth-config/radius': { + dialTimeout: { + editType: 'ttl', + helpText: 'Number of seconds before connect times out (default: 10)', + fieldGroup: 'default', + defaultValue: 10, + }, + host: { + editType: 'string', + helpText: 'RADIUS server host', + fieldGroup: 'default', + label: 'Host', + type: 'string', + }, + nasIdentifier: { + editType: 'string', + helpText: 'RADIUS NAS Identifier field (optional)', + fieldGroup: 'default', + label: 'NAS Identifier', + type: 'string', + }, + nasPort: { + editType: 'number', + helpText: 'RADIUS NAS port field (default: 10)', + fieldGroup: 'default', + defaultValue: 10, + label: 'NAS Port', + type: 'number', + }, + port: { + editType: 'number', + helpText: 'RADIUS server port (default: 1812)', + fieldGroup: 'default', + defaultValue: 1812, + type: 'number', + }, + readTimeout: { + editType: 'ttl', + helpText: 'Number of seconds before response times out (default: 10)', + fieldGroup: 'default', + defaultValue: 10, + }, + secret: { + editType: 'string', + helpText: 'Secret shared with the RADIUS server', + fieldGroup: 'default', + type: 'string', + }, + tokenBoundCidrs: { + editType: 'stringArray', + helpText: + 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', + fieldGroup: 'Tokens', + label: "Generated Token's Bound CIDRs", + }, + tokenExplicitMaxTtl: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', + fieldGroup: 'Tokens', + label: "Generated Token's Explicit Maximum TTL", + }, + tokenMaxTtl: { + editType: 'ttl', + helpText: 'The maximum lifetime of the generated token', + fieldGroup: 'Tokens', + label: "Generated Token's Maximum TTL", + }, + tokenNoDefaultPolicy: { + editType: 'boolean', + helpText: "If true, the 'default' policy will not automatically be added to generated tokens", + fieldGroup: 'Tokens', + label: "Do Not Attach 'default' Policy To Generated Tokens", + type: 'boolean', + }, + tokenNumUses: { + editType: 'number', + helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', + fieldGroup: 'Tokens', + label: 'Maximum Uses of Generated Tokens', + type: 'number', + }, + tokenPeriod: { + editType: 'ttl', + helpText: + 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', + fieldGroup: 'Tokens', + label: "Generated Token's Period", + }, + tokenPolicies: { + editType: 'stringArray', + helpText: 'A list of policies that will apply to the generated token for this user.', + fieldGroup: 'Tokens', + label: "Generated Token's Policies", + }, + tokenTtl: { + editType: 'ttl', + helpText: 'The initial ttl of the token to generate', + fieldGroup: 'Tokens', + label: "Generated Token's Initial TTL", + }, + tokenType: { + editType: 'string', + helpText: 'The type of token to generate, service or batch', + fieldGroup: 'Tokens', + label: "Generated Token's Type", + type: 'string', + }, + unregisteredUserPolicies: { + editType: 'string', + helpText: + 'List of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)', + fieldGroup: 'default', + label: 'Policies for unregistered users', + type: 'string', + }, + }, + user: { + name: { + editType: 'string', + helpText: 'Name of the RADIUS user.', + fieldValue: 'mutableId', + fieldGroup: 'default', + readOnly: true, + label: 'Name', + type: 'string', + }, + policies: { + editType: 'stringArray', + helpText: 'A list of policies associated to the user.', + fieldGroup: 'default', + }, + }, +}; + +export default { + azure, + userpass, + cert, + gcp, + github, + jwt, + kubernetes, + ldap, + okta, + radius, + // aws is the only method that doesn't leverage OpenApi in practice +}; diff --git a/command/audit.go b/command/audit.go index 67f5b194daaa..8d69fe8759ce 100644 --- a/command/audit.go +++ b/command/audit.go @@ -1,49 +1,55 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "strings" - - "github.com/mitchellh/cli" -) - -var _ cli.Command = (*AuditCommand)(nil) - -type AuditCommand struct { - *BaseCommand -} - -func (c *AuditCommand) Synopsis() string { - return "Interact with audit devices" -} - -func (c *AuditCommand) Help() string { - helpText := ` -Usage: vault audit [options] [args] - - This command groups subcommands for interacting with Vault's audit devices. - Users can list, enable, and disable audit devices. - - *NOTE*: Once an audit device has been enabled, failure to audit could prevent - Vault from servicing future requests. It is highly recommended that you enable - multiple audit devices. - - List all enabled audit devices: - - $ vault audit list - - Enable a new audit device "file"; - - $ vault audit enable file file_path=/var/log/audit.log - - Please see the individual subcommand help for detailed usage information. -` - - return strings.TrimSpace(helpText) -} - -func (c *AuditCommand) Run(args []string) int { - return cli.RunResultHelp -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +{{#if this.canLoginSaml}} +
+
+ +
+ +
+ +
+
+ {{yield}} +
+ + +{{else}} + + Nonsecure context detected + + Logging in with a SAML auth method requires a browser in a secure context. + + + + Read more about secure contexts. + + + + +{{/if}} \ No newline at end of file diff --git a/command/audit_disable.go b/command/audit_disable.go index 0e99da438d7b..57489a186f1d 100644 --- a/command/audit_disable.go +++ b/command/audit_disable.go @@ -4,89 +4,49 @@ package command import ( - "fmt" "strings" - "github.com/mitchellh/cli" - "github.com/posener/complete" + "github.com/hashicorp/cli" ) -var ( - _ cli.Command = (*AuditDisableCommand)(nil) - _ cli.CommandAutocomplete = (*AuditDisableCommand)(nil) -) +var _ cli.Command = (*AuthCommand)(nil) -type AuditDisableCommand struct { +type AuthCommand struct { *BaseCommand } -func (c *AuditDisableCommand) Synopsis() string { - return "Disables an audit device" +func (c *AuthCommand) Synopsis() string { + return "Interact with auth methods" } -func (c *AuditDisableCommand) Help() string { - helpText := ` -Usage: vault audit disable [options] PATH +func (c *AuthCommand) Help() string { + return strings.TrimSpace(` +Usage: vault auth [options] [args] - Disables an audit device. Once an audit device is disabled, no future audit - logs are dispatched to it. The data associated with the audit device is not - affected. + This command groups subcommands for interacting with Vault's auth methods. + Users can list, enable, disable, and get help for different auth methods. - The argument corresponds to the PATH of audit device, not the TYPE! + To authenticate to Vault as a user or machine, use the "vault login" command + instead. This command is for interacting with the auth methods themselves, not + authenticating to Vault. - Disable the audit device enabled at "file/": + List all enabled auth methods: - $ vault audit disable file/ + $ vault auth list -` + c.Flags().Help() + Enable a new auth method "userpass"; - return strings.TrimSpace(helpText) -} + $ vault auth enable userpass -func (c *AuditDisableCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP) -} + Get detailed help information about how to authenticate to a particular auth + method: -func (c *AuditDisableCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultAudits() -} + $ vault auth help github -func (c *AuditDisableCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() + Please see the individual subcommand help for detailed usage information. +`) } -func (c *AuditDisableCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - path := ensureTrailingSlash(sanitizePath(args[0])) - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if err := client.Sys().DisableAudit(path); err != nil { - c.UI.Error(fmt.Sprintf("Error disabling audit device: %s", err)) - return 2 - } - - c.UI.Output(fmt.Sprintf("Success! Disabled audit device (if it was enabled) at: %s", path)) - - return 0 +func (c *AuthCommand) Run(args []string) int { + return cli.RunResultHelp } diff --git a/command/audit_disable_test.go b/command/audit_disable_test.go index ec28f70ddfc0..13d291f2a24f 100644 --- a/command/audit_disable_test.go +++ b/command/audit_disable_test.go @@ -1,163 +1,132 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "strings" - "testing" - - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" -) - -func testAuditDisableCommand(tb testing.TB) (*cli.MockUi, *AuditDisableCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &AuditDisableCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} - -func TestAuditDisableCommand_Run(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "not_enough_args", - nil, - "Not enough arguments", - 1, - }, - { - "too_many_args", - []string{"foo", "bar", "baz"}, - "Too many arguments", - 1, - }, - { - "not_real", - []string{"not_real"}, - "Success! Disabled audit device (if it was enabled) at: not_real/", - 0, - }, - { - "default", - []string{"file"}, - "Success! Disabled audit device (if it was enabled) at: file/", - 0, - }, - } - - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ - Type: "file", - Options: map[string]string{ - "file_path": "discard", - }, - }); err != nil { - t.Fatal(err) - } - - ui, cmd := testAuditDisableCommand(t) - cmd.client = client - - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } - - t.Run("integration", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().EnableAuditWithOptions("integration_audit_disable", &api.EnableAuditOptions{ - Type: "file", - Options: map[string]string{ - "file_path": "discard", - }, - }); err != nil { - t.Fatal(err) - } - - ui, cmd := testAuditDisableCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "integration_audit_disable/", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Success! Disabled audit device (if it was enabled) at: integration_audit_disable/" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - - mounts, err := client.Sys().ListMounts() - if err != nil { - t.Fatal(err) - } - - if _, ok := mounts["integration_audit_disable"]; ok { - t.Errorf("expected mount to not exist: %#v", mounts) - } - }) - - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServerBad(t) - defer closer() - - ui, cmd := testAuditDisableCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "file", - }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Error disabling audit device: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - }) - - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() - - _, cmd := testAuditDisableCommand(t) - assertNoTabs(t, cmd) - }) -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +{{#if this.mfaErrors}} +
+ + + +
+{{else}} + + <:header> + {{#if this.oidcProvider}} +
+ +
+ {{else}} +
+
+ +
+
+
+ {{#if this.mfaAuthData}} + + {{else if this.waitingForOktaNumberChallenge}} + + {{/if}} +

+ {{if (or this.mfaAuthData this.waitingForOktaNumberChallenge) "Authenticate" "Sign in to Vault"}} +

+
+ {{/if}} + + + <:subHeader> + {{#if (has-feature "Namespaces")}} + {{#unless this.mfaAuthData}} + +
+
+ +
+ {{#if this.managedNamespaceRoot}} +
+ /{{this.managedNamespaceRoot}} +
+ {{/if}} +
+
+
+ +
+
+
+
+
+ {{/unless}} + {{/if}} + + + <:content> + {{#if this.mfaAuthData}} + + {{else}} + + {{/if}} + + + <:footer> +
+

+ {{#if this.oidcProvider}} + Once you log in, you will be redirected back to your application. If you require login credentials, contact your + administrator. + {{else}} + Contact your administrator for login credentials. + {{/if}} +

+
+ +
+{{/if}} \ No newline at end of file diff --git a/command/audit_enable.go b/command/audit_enable.go index c77fe277d1f3..66c53c47e785 100644 --- a/command/audit_enable.go +++ b/command/audit_enable.go @@ -1,159 +1,69 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "io" - "os" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*AuditEnableCommand)(nil) - _ cli.CommandAutocomplete = (*AuditEnableCommand)(nil) -) - -type AuditEnableCommand struct { - *BaseCommand - - flagDescription string - flagPath string - flagLocal bool - - testStdin io.Reader // For tests -} - -func (c *AuditEnableCommand) Synopsis() string { - return "Enables an audit device" -} - -func (c *AuditEnableCommand) Help() string { - helpText := ` -Usage: vault audit enable [options] TYPE [CONFIG K=V...] - - Enables an audit device at a given path. - - This command enables an audit device of TYPE. Additional options for - configuring the audit device can be specified after the type in the same - format as the "vault write" command in key/value pairs. - - For example, to configure the file audit device to write audit logs at the - path "/var/log/audit.log": - - $ vault audit enable file file_path=/var/log/audit.log - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *AuditEnableCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) - - f := set.NewFlagSet("Command Options") - - f.StringVar(&StringVar{ - Name: "description", - Target: &c.flagDescription, - Default: "", - EnvVar: "", - Completion: complete.PredictAnything, - Usage: "Human-friendly description for the purpose of this audit " + - "device.", - }) - - f.StringVar(&StringVar{ - Name: "path", - Target: &c.flagPath, - Default: "", // The default is complex, so we have to manually document - EnvVar: "", - Completion: complete.PredictAnything, - Usage: "Place where the audit device will be accessible. This must be " + - "unique across all audit devices. This defaults to the \"type\" of the " + - "audit device.", - }) - - f.BoolVar(&BoolVar{ - Name: "local", - Target: &c.flagLocal, - Default: false, - EnvVar: "", - Usage: "Mark the audit device as a local-only device. Local devices " + - "are not replicated or removed by replication.", - }) - - return set -} - -func (c *AuditEnableCommand) AutocompleteArgs() complete.Predictor { - return complete.PredictSet( - "file", - "syslog", - "socket", - ) -} - -func (c *AuditEnableCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *AuditEnableCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - if len(args) < 1 { - c.UI.Error("Error enabling audit device: audit type missing. Valid types include 'file', 'socket' and 'syslog'.") - return 1 - } - - // Grab the type - auditType := strings.TrimSpace(args[0]) - - auditPath := c.flagPath - if auditPath == "" { - auditPath = auditType - } - auditPath = ensureTrailingSlash(auditPath) - - // Pull our fake stdin if needed - stdin := (io.Reader)(os.Stdin) - if c.testStdin != nil { - stdin = c.testStdin - } - - options, err := parseArgsDataString(stdin, args[1:]) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if err := client.Sys().EnableAuditWithOptions(auditPath, &api.EnableAuditOptions{ - Type: auditType, - Description: c.flagDescription, - Options: options, - Local: c.flagLocal, - }); err != nil { - c.UI.Error(fmt.Sprintf("Error enabling audit device: %s", err)) - return 2 - } - - c.UI.Output(fmt.Sprintf("Success! Enabled the %s audit device at: %s", auditType, auditPath)) - return 0 -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { create, visitable, fillable, clickable } from 'ember-cli-page-object'; +import { click, settled } from '@ember/test-helpers'; +import VAULT_KEYS from 'vault/tests/helpers/vault-keys'; + +const { rootToken } = VAULT_KEYS; + +export default create({ + visit: visitable('/vault/auth'), + logout: visitable('/vault/logout'), + submit: clickable('[data-test-auth-submit]'), + tokenInput: fillable('[data-test-token]'), + usernameInput: fillable('[data-test-username]'), + passwordInput: fillable('[data-test-password]'), + namespaceInput: fillable('[data-test-auth-form-ns-input]'), + optionsToggle: clickable('[data-test-auth-form-options-toggle]'), + mountPath: fillable('[data-test-auth-form-mount-path]'), + + login: async function (token = rootToken) { + // make sure we're always logged out and logged back in + await this.logout(); + await settled(); + // clear session storage to ensure we have a clean state + window.localStorage.clear(); + await this.visit({ with: 'token' }); + await settled(); + return this.tokenInput(token).submit(); + }, + loginUsername: async function (username, password, path) { + // make sure we're always logged out and logged back in + await this.logout(); + await settled(); + // clear local storage to ensure we have a clean state + window.localStorage.clear(); + await this.visit({ with: 'userpass' }); + await settled(); + if (path) { + await this.optionsToggle(); + await this.mountPath(path); + } + await this.usernameInput(username); + return this.passwordInput(password).submit(); + }, + loginNs: async function (ns, token = rootToken) { + // make sure we're always logged out and logged back in + await this.logout(); + await settled(); + // clear session storage to ensure we have a clean state + window.localStorage.clear(); + await this.visit({ with: 'token' }); + await settled(); + await this.namespaceInput(ns); + await settled(); + await this.tokenInput(token).submit(); + return; + }, + clickLogout: async function (clearNamespace = false) { + await click('[data-test-user-menu-trigger]'); + await click('[data-test-user-menu-content] a#logout'); + if (clearNamespace) { + await this.namespaceInput(''); + } + return; + }, +}); diff --git a/command/audit_enable_test.go b/command/audit_enable_test.go index 58dca872e640..1476b71d0f07 100644 --- a/command/audit_enable_test.go +++ b/command/audit_enable_test.go @@ -4,211 +4,89 @@ package command import ( - "io/ioutil" - "os" + "fmt" "strings" - "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + "github.com/posener/complete" ) -func testAuditEnableCommand(tb testing.TB) (*cli.MockUi, *AuditEnableCommand) { - tb.Helper() +var ( + _ cli.Command = (*AuthDisableCommand)(nil) + _ cli.CommandAutocomplete = (*AuthDisableCommand)(nil) +) - ui := cli.NewMockUi() - return ui, &AuditEnableCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } +type AuthDisableCommand struct { + *BaseCommand } -func TestAuditEnableCommand_Run(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "empty", - nil, - "Error enabling audit device: audit type missing. Valid types include 'file', 'socket' and 'syslog'.", - 1, - }, - { - "not_a_valid_type", - []string{"nope_definitely_not_a_valid_type_like_ever"}, - "", - 2, - }, - { - "enable", - []string{"file", "file_path=discard"}, - "Success! Enabled the file audit device at: file/", - 0, - }, - { - "enable_path", - []string{ - "-path", "audit_path", - "file", - "file_path=discard", - }, - "Success! Enabled the file audit device at: audit_path/", - 0, - }, - } +func (c *AuthDisableCommand) Synopsis() string { + return "Disables an auth method" +} + +func (c *AuthDisableCommand) Help() string { + helpText := ` +Usage: vault auth disable [options] PATH + + Disables an existing auth method at the given PATH. The argument corresponds + to the PATH of the mount, not the TYPE!. Once the auth method is disabled its + path can no longer be used to authenticate. - for _, tc := range cases { - tc := tc + All access tokens generated via the disabled auth method are immediately + revoked. This command will block until all tokens are revoked. - t.Run(tc.name, func(t *testing.T) { - t.Parallel() + Disable the auth method at userpass/: - client, closer := testVaultServer(t) - defer closer() + $ vault auth disable userpass/ - ui, cmd := testAuditEnableCommand(t) - cmd.client = client +` + c.Flags().Help() - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } + return strings.TrimSpace(helpText) +} + +func (c *AuthDisableCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *AuthDisableCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAuths() +} + +func (c *AuthDisableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthDisableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + path := ensureTrailingSlash(sanitizePath(args[0])) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) + if err := client.Sys().DisableAuth(path); err != nil { + c.UI.Error(fmt.Sprintf("Error disabling auth method at %s: %s", path, err)) + return 2 } - t.Run("integration", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testAuditEnableCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "-path", "audit_enable_integration/", - "-description", "The best kind of test", - "file", - "file_path=discard", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Success! Enabled the file audit device at: audit_enable_integration/" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - - audits, err := client.Sys().ListAudit() - if err != nil { - t.Fatal(err) - } - - auditInfo, ok := audits["audit_enable_integration/"] - if !ok { - t.Fatalf("expected audit to exist") - } - if exp := "file"; auditInfo.Type != exp { - t.Errorf("expected %q to be %q", auditInfo.Type, exp) - } - if exp := "The best kind of test"; auditInfo.Description != exp { - t.Errorf("expected %q to be %q", auditInfo.Description, exp) - } - - filePath, ok := auditInfo.Options["file_path"] - if !ok || filePath != "discard" { - t.Errorf("missing some options: %#v", auditInfo) - } - }) - - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServerBad(t) - defer closer() - - ui, cmd := testAuditEnableCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "pki", - }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Error enabling audit device: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - }) - - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() - - _, cmd := testAuditEnableCommand(t) - assertNoTabs(t, cmd) - }) - - t.Run("mount_all", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServerAllBackends(t) - defer closer() - - files, err := ioutil.ReadDir("../builtin/audit") - if err != nil { - t.Fatal(err) - } - - var backends []string - for _, f := range files { - if f.IsDir() { - backends = append(backends, f.Name()) - } - } - - for _, b := range backends { - ui, cmd := testAuditEnableCommand(t) - cmd.client = client - - args := []string{ - b, - } - switch b { - case "file": - args = append(args, "file_path=discard") - case "socket": - args = append(args, "address=127.0.0.1:8888", - "skip_test=true") - case "syslog": - if _, exists := os.LookupEnv("WSLENV"); exists { - t.Log("skipping syslog test on WSL") - continue - } - if os.Getenv("CIRCLECI") == "true" { - // TODO install syslog in docker image we run our tests in - t.Log("skipping syslog test on CircleCI") - continue - } - } - code := cmd.Run(args) - if exp := 0; code != exp { - t.Errorf("type %s, expected %d to be %d - %s", b, code, exp, ui.OutputWriter.String()+ui.ErrorWriter.String()) - } - } - }) + c.UI.Output(fmt.Sprintf("Success! Disabled the auth method (if it existed) at: %s", path)) + return 0 } diff --git a/command/audit_list.go b/command/audit_list.go index a7793fcee493..f9da8a7d770c 100644 --- a/command/audit_list.go +++ b/command/audit_list.go @@ -4,168 +4,137 @@ package command import ( - "fmt" - "sort" "strings" + "testing" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" + "github.com/hashicorp/cli" ) -var ( - _ cli.Command = (*AuditListCommand)(nil) - _ cli.CommandAutocomplete = (*AuditListCommand)(nil) -) - -type AuditListCommand struct { - *BaseCommand +func testAuthDisableCommand(tb testing.TB) (*cli.MockUi, *AuthDisableCommand) { + tb.Helper() - flagDetailed bool -} - -func (c *AuditListCommand) Synopsis() string { - return "Lists enabled audit devices" + ui := cli.NewMockUi() + return ui, &AuthDisableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } } -func (c *AuditListCommand) Help() string { - helpText := ` -Usage: vault audit list [options] - - Lists the enabled audit devices in the Vault server. The output lists the - enabled audit devices and the options for those devices. - - List all audit devices: +func TestAuthDisableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } - $ vault audit list + t.Run("validations", func(t *testing.T) { + t.Parallel() - List detailed output about the audit devices: + for _, tc := range cases { + tc := tc - $ vault audit list -detailed + t.Run(tc.name, func(t *testing.T) { + t.Parallel() -` + c.Flags().Help() + client, closer := testVaultServer(t) + defer closer() - return strings.TrimSpace(helpText) -} + ui, cmd := testAuthDisableCommand(t) + cmd.client = client -func (c *AuditListCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } - f := set.NewFlagSet("Command Options") - - f.BoolVar(&BoolVar{ - Name: "detailed", - Target: &c.flagDetailed, - Default: false, - EnvVar: "", - Usage: "Print detailed information such as options and replication " + - "status about each auth device.", + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } }) - return set -} - -func (c *AuditListCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *AuditListCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *AuditListCommand) Run(args []string) int { - f := c.Flags() + t.Run("integration", func(t *testing.T) { + t.Parallel() - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } + client, closer := testVaultServer(t) + defer closer() - args = f.Args() - if len(args) > 0 { - c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) - return 1 - } + if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil { + t.Fatal(err) + } - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } + ui, cmd := testAuthDisableCommand(t) + cmd.client = client - audits, err := client.Sys().ListAudit() - if err != nil { - c.UI.Error(fmt.Sprintf("Error listing audits: %s", err)) - return 2 - } + code := cmd.Run([]string{ + "my-auth", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - switch Format(c.UI) { - case "table": - if len(audits) == 0 { - c.UI.Output("No audit devices are enabled.") - return 2 + expected := "Success! Disabled the auth method" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) } - if c.flagDetailed { - c.UI.Output(tableOutput(c.detailedAudits(audits), nil)) - return 0 + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) } - c.UI.Output(tableOutput(c.simpleAudits(audits), nil)) - return 0 - default: - return OutputData(c.UI, audits) - } -} -func (c *AuditListCommand) simpleAudits(audits map[string]*api.Audit) []string { - paths := make([]string, 0, len(audits)) - for path := range audits { - paths = append(paths, path) - } - sort.Strings(paths) - - columns := []string{"Path | Type | Description"} - for _, path := range paths { - audit := audits[path] - columns = append(columns, fmt.Sprintf("%s | %s | %s", - audit.Path, - audit.Type, - audit.Description, - )) - } + if auth, ok := auths["my-auth/"]; ok { + t.Errorf("expected auth to be disabled: %#v", auth) + } + }) - return columns -} + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() -func (c *AuditListCommand) detailedAudits(audits map[string]*api.Audit) []string { - paths := make([]string, 0, len(audits)) - for path := range audits { - paths = append(paths, path) - } - sort.Strings(paths) + client, closer := testVaultServerBad(t) + defer closer() - columns := []string{"Path | Type | Description | Replication | Options"} - for _, path := range paths { - audit := audits[path] + ui, cmd := testAuthDisableCommand(t) + cmd.client = client - opts := make([]string, 0, len(audit.Options)) - for k, v := range audit.Options { - opts = append(opts, k+"="+v) + code := cmd.Run([]string{ + "my-auth", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) } - replication := "replicated" - if audit.Local { - replication = "local" + expected := "Error disabling auth method at my-auth/: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) } + }) - columns = append(columns, fmt.Sprintf("%s | %s | %s | %s | %s", - path, - audit.Type, - audit.Description, - replication, - strings.Join(opts, " "), - )) - } + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() - return columns + _, cmd := testAuthDisableCommand(t) + assertNoTabs(t, cmd) + }) } diff --git a/command/audit_list_test.go b/command/audit_list_test.go index bc41c13d603c..912f410e8285 100644 --- a/command/audit_list_test.go +++ b/command/audit_list_test.go @@ -4,111 +4,328 @@ package command import ( + "flag" + "fmt" + "strconv" "strings" - "testing" + "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" + "github.com/posener/complete" ) -func testAuditListCommand(tb testing.TB) (*cli.MockUi, *AuditListCommand) { - tb.Helper() +var ( + _ cli.Command = (*AuthEnableCommand)(nil) + _ cli.CommandAutocomplete = (*AuthEnableCommand)(nil) +) - ui := cli.NewMockUi() - return ui, &AuditListCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } +type AuthEnableCommand struct { + *BaseCommand + + flagDescription string + flagPath string + flagDefaultLeaseTTL time.Duration + flagMaxLeaseTTL time.Duration + flagAuditNonHMACRequestKeys []string + flagAuditNonHMACResponseKeys []string + flagListingVisibility string + flagPluginName string + flagPassthroughRequestHeaders []string + flagAllowedResponseHeaders []string + flagOptions map[string]string + flagLocal bool + flagSealWrap bool + flagExternalEntropyAccess bool + flagTokenType string + flagVersion int + flagPluginVersion string } -func TestAuditListCommand_Run(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "too_many_args", - []string{"foo"}, - "Too many arguments", - 1, - }, - { - "lists", - nil, - "Path", - 0, - }, - { - "detailed", - []string{"-detailed"}, - "Options", - 0, - }, +func (c *AuthEnableCommand) Synopsis() string { + return "Enables a new auth method" +} + +func (c *AuthEnableCommand) Help() string { + helpText := ` +Usage: vault auth enable [options] TYPE + + Enables a new auth method. An auth method is responsible for authenticating + users or machines and assigning them policies with which they can access + Vault. + + Enable the userpass auth method at userpass/: + + $ vault auth enable userpass + + Enable the LDAP auth method at auth-prod/: + + $ vault auth enable -path=auth-prod ldap + + Enable a custom auth plugin (after it's registered in the plugin registry): + + $ vault auth enable -path=my-auth -plugin-name=my-auth-plugin plugin + + OR (preferred way): + + $ vault auth enable -path=my-auth my-auth-plugin + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuthEnableCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "description", + Target: &c.flagDescription, + Completion: complete.PredictAnything, + Usage: "Human-friendly description for the purpose of this " + + "auth method.", + }) + + f.StringVar(&StringVar{ + Name: "path", + Target: &c.flagPath, + Default: "", // The default is complex, so we have to manually document + Completion: complete.PredictAnything, + Usage: "Place where the auth method will be accessible. This must be " + + "unique across all auth methods. This defaults to the \"type\" of " + + "the auth method. The auth method will be accessible at " + + "\"/auth/\".", + }) + + f.DurationVar(&DurationVar{ + Name: "default-lease-ttl", + Target: &c.flagDefaultLeaseTTL, + Completion: complete.PredictAnything, + Usage: "The default lease TTL for this auth method. If unspecified, " + + "this defaults to the Vault server's globally configured default lease " + + "TTL.", + }) + + f.DurationVar(&DurationVar{ + Name: "max-lease-ttl", + Target: &c.flagMaxLeaseTTL, + Completion: complete.PredictAnything, + Usage: "The maximum lease TTL for this auth method. If unspecified, " + + "this defaults to the Vault server's globally configured maximum lease " + + "TTL.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACRequestKeys, + Target: &c.flagAuditNonHMACRequestKeys, + Usage: "Key that will not be HMAC'd by audit devices in the request data object. " + + "To specify multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACResponseKeys, + Target: &c.flagAuditNonHMACResponseKeys, + Usage: "Key that will not be HMAC'd by audit devices in the response data object. " + + "To specify multiple values, specify this flag multiple times.", + }) + + f.StringVar(&StringVar{ + Name: flagNameListingVisibility, + Target: &c.flagListingVisibility, + Usage: "Determines the visibility of the mount in the UI-specific listing endpoint.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNamePassthroughRequestHeaders, + Target: &c.flagPassthroughRequestHeaders, + Usage: "Request header value that will be sent to the plugin. To specify multiple " + + "values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedResponseHeaders, + Target: &c.flagAllowedResponseHeaders, + Usage: "Response header value that plugins will be allowed to set. To specify multiple " + + "values, specify this flag multiple times.", + }) + + f.StringVar(&StringVar{ + Name: "plugin-name", + Target: &c.flagPluginName, + Completion: c.PredictVaultPlugins(api.PluginTypeCredential), + Usage: "Name of the auth method plugin. This plugin name must already " + + "exist in the Vault server's plugin catalog.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "options", + Target: &c.flagOptions, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value for the mount options. " + + "This can be specified multiple times.", + }) + + f.BoolVar(&BoolVar{ + Name: "local", + Target: &c.flagLocal, + Default: false, + Usage: "Mark the auth method as local-only. Local auth methods are " + + "not replicated nor removed by replication.", + }) + + f.BoolVar(&BoolVar{ + Name: "seal-wrap", + Target: &c.flagSealWrap, + Default: false, + Usage: "Enable seal wrapping of critical values in the secrets engine.", + }) + + f.BoolVar(&BoolVar{ + Name: "external-entropy-access", + Target: &c.flagExternalEntropyAccess, + Default: false, + Usage: "Enable auth method to access Vault's external entropy source.", + }) + + f.StringVar(&StringVar{ + Name: flagNameTokenType, + Target: &c.flagTokenType, + Usage: "Sets a forced token type for the mount.", + }) + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Default: 0, + Usage: "Select the version of the auth method to run. Not supported by all auth methods.", + }) + + f.StringVar(&StringVar{ + Name: flagNamePluginVersion, + Target: &c.flagPluginVersion, + Default: "", + Usage: "Select the semantic version of the plugin to enable.", + }) + + return set +} + +func (c *AuthEnableCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAvailableAuths() +} + +func (c *AuthEnableCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthEnableCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 } - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().EnableAuditWithOptions("file", &api.EnableAuditOptions{ - Type: "file", - Options: map[string]string{ - "file_path": "discard", - }, - }); err != nil { - t.Fatal(err) - } - - ui, cmd := testAuditListCommand(t) - cmd.client = client - - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 } - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } - client, closer := testVaultServerBad(t) - defer closer() + authType := strings.TrimSpace(args[0]) + if authType == "plugin" { + authType = c.flagPluginName + } - ui, cmd := testAuditListCommand(t) - cmd.client = client + // If no path is specified, we default the path to the backend type + // or use the plugin name if it's a plugin backend + authPath := c.flagPath + if authPath == "" { + if authType == "plugin" { + authPath = c.flagPluginName + } else { + authPath = authType + } + } + + // Append a trailing slash to indicate it's a path in output + authPath = ensureTrailingSlash(authPath) - code := cmd.Run([]string{}) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) + if c.flagVersion > 0 { + if c.flagOptions == nil { + c.flagOptions = make(map[string]string) } + c.flagOptions["version"] = strconv.Itoa(c.flagVersion) + } + + authOpts := &api.EnableAuthOptions{ + Type: authType, + Description: c.flagDescription, + Local: c.flagLocal, + SealWrap: c.flagSealWrap, + ExternalEntropyAccess: c.flagExternalEntropyAccess, + Config: api.AuthConfigInput{ + DefaultLeaseTTL: c.flagDefaultLeaseTTL.String(), + MaxLeaseTTL: c.flagMaxLeaseTTL.String(), + }, + Options: c.flagOptions, + } - expected := "Error listing audits: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) + // Set these values only if they are provided in the CLI + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAuditNonHMACRequestKeys { + authOpts.Config.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys + } + + if fl.Name == flagNameAuditNonHMACResponseKeys { + authOpts.Config.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys + } + + if fl.Name == flagNameListingVisibility { + authOpts.Config.ListingVisibility = c.flagListingVisibility } - }) - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() + if fl.Name == flagNamePassthroughRequestHeaders { + authOpts.Config.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders + } + + if fl.Name == flagNameAllowedResponseHeaders { + authOpts.Config.AllowedResponseHeaders = c.flagAllowedResponseHeaders + } - _, cmd := testAuditListCommand(t) - assertNoTabs(t, cmd) + if fl.Name == flagNameTokenType { + authOpts.Config.TokenType = c.flagTokenType + } + + if fl.Name == flagNamePluginVersion { + authOpts.Config.PluginVersion = c.flagPluginVersion + } }) + + if err := client.Sys().EnableAuthWithOptions(authPath, authOpts); err != nil { + c.UI.Error(fmt.Sprintf("Error enabling %s auth: %s", authType, err)) + return 2 + } + + authThing := authType + " auth method" + if authType == "plugin" { + authThing = c.flagPluginName + " plugin" + } + if c.flagPluginVersion != "" { + authThing += " version " + c.flagPluginVersion + } + c.UI.Output(fmt.Sprintf("Success! Enabled %s at: %s", authThing, authPath)) + return 0 } diff --git a/command/auth.go b/command/auth.go index 76919ed2084f..0cdaf0fc79dd 100644 --- a/command/auth.go +++ b/command/auth.go @@ -4,49 +4,241 @@ package command import ( + "io/ioutil" + "sort" "strings" - - "github.com/mitchellh/cli" + "testing" + + "github.com/go-test/deep" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/strutil" ) -var _ cli.Command = (*AuthCommand)(nil) - -type AuthCommand struct { - *BaseCommand -} - -func (c *AuthCommand) Synopsis() string { - return "Interact with auth methods" -} - -func (c *AuthCommand) Help() string { - return strings.TrimSpace(` -Usage: vault auth [options] [args] - - This command groups subcommands for interacting with Vault's auth methods. - Users can list, enable, disable, and get help for different auth methods. - - To authenticate to Vault as a user or machine, use the "vault login" command - instead. This command is for interacting with the auth methods themselves, not - authenticating to Vault. - - List all enabled auth methods: - - $ vault auth list - - Enable a new auth method "userpass"; - - $ vault auth enable userpass - - Get detailed help information about how to authenticate to a particular auth - method: - - $ vault auth help github +func testAuthEnableCommand(tb testing.TB) (*cli.MockUi, *AuthEnableCommand) { + tb.Helper() - Please see the individual subcommand help for detailed usage information. -`) + ui := cli.NewMockUi() + return ui, &AuthEnableCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } } -func (c *AuthCommand) Run(args []string) int { - return cli.RunResultHelp +func TestAuthEnableCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + { + "not_a_valid_auth", + []string{"nope_definitely_not_a_valid_mount_like_ever"}, + "", + 2, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthEnableCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected command return code to be %d, got %d", tc.code, code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q in response\n got: %+v", tc.out, combined) + } + }) + } + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthEnableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "-path", "auth_integration/", + "-description", "The best kind of test", + "-audit-non-hmac-request-keys", "foo,bar", + "-audit-non-hmac-response-keys", "foo,bar", + "-passthrough-request-headers", "authorization,authentication", + "-passthrough-request-headers", "www-authentication", + "-allowed-response-headers", "authorization", + "-listing-visibility", "unauth", + "userpass", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Enabled userpass auth method at:" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + authInfo, ok := auths["auth_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "userpass"; authInfo.Type != exp { + t.Errorf("expected %q to be %q", authInfo.Type, exp) + } + if exp := "The best kind of test"; authInfo.Description != exp { + t.Errorf("expected %q to be %q", authInfo.Description, exp) + } + if diff := deep.Equal([]string{"authorization,authentication", "www-authentication"}, authInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in PassthroughRequestHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"authorization"}, authInfo.Config.AllowedResponseHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, authInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, authInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuthEnableCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "userpass", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error enabling userpass auth: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testAuthEnableCommand(t) + assertNoTabs(t, cmd) + }) + + t.Run("mount_all", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerAllBackends(t) + defer closer() + + files, err := ioutil.ReadDir("../builtin/credential") + if err != nil { + t.Fatal(err) + } + + var backends []string + for _, f := range files { + if f.IsDir() && f.Name() != "token" { + backends = append(backends, f.Name()) + } + } + + modFile, err := ioutil.ReadFile("../go.mod") + if err != nil { + t.Fatal(err) + } + modLines := strings.Split(string(modFile), "\n") + for _, p := range modLines { + splitLine := strings.Split(strings.TrimSpace(p), " ") + if len(splitLine) == 0 { + continue + } + potPlug := strings.TrimPrefix(splitLine[0], "github.com/hashicorp/") + if strings.HasPrefix(potPlug, "vault-plugin-auth-") { + backends = append(backends, strings.TrimPrefix(potPlug, "vault-plugin-auth-")) + } + } + // Since "pcf" plugin in the Vault registry is also pointed at the "vault-plugin-auth-cf" + // repository, we need to manually append it here so it'll tie out with our expected number + // of credential backends. + backends = append(backends, "pcf") + + regkeys := strutil.StrListDelete(builtinplugins.Registry.Keys(consts.PluginTypeCredential), "oidc") + sort.Strings(regkeys) + sort.Strings(backends) + if d := cmp.Diff(regkeys, backends); len(d) > 0 { + t.Fatalf("found credential registry mismatch: %v", d) + } + + for _, b := range backends { + var expectedResult int = 0 + + // Not a builtin + if b == "token" { + continue + } + + ui, cmd := testAuthEnableCommand(t) + cmd.client = client + + actualResult := cmd.Run([]string{ + b, + }) + + // Need to handle deprecated builtins specially + status, _ := builtinplugins.Registry.DeprecationStatus(b, consts.PluginTypeCredential) + if status == consts.PendingRemoval || status == consts.Removed { + expectedResult = 2 + } + + if actualResult != expectedResult { + t.Errorf("type: %s - got: %d, expected: %d - %s", b, actualResult, expectedResult, ui.OutputWriter.String()+ui.ErrorWriter.String()) + } + } + }) } diff --git a/command/auth_disable.go b/command/auth_disable.go index 775f7fabc7fd..68365e737c3b 100644 --- a/command/auth_disable.go +++ b/command/auth_disable.go @@ -7,56 +7,70 @@ import ( "fmt" "strings" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) var ( - _ cli.Command = (*AuthDisableCommand)(nil) - _ cli.CommandAutocomplete = (*AuthDisableCommand)(nil) + _ cli.Command = (*AuthHelpCommand)(nil) + _ cli.CommandAutocomplete = (*AuthHelpCommand)(nil) ) -type AuthDisableCommand struct { +type AuthHelpCommand struct { *BaseCommand + + Handlers map[string]LoginHandler } -func (c *AuthDisableCommand) Synopsis() string { - return "Disables an auth method" +func (c *AuthHelpCommand) Synopsis() string { + return "Prints usage for an auth method" } -func (c *AuthDisableCommand) Help() string { +func (c *AuthHelpCommand) Help() string { helpText := ` -Usage: vault auth disable [options] PATH +Usage: vault auth help [options] TYPE | PATH + + Prints usage and help for an auth method. + + - If given a TYPE, this command prints the default help for the + auth method of that type. + + - If given a PATH, this command prints the help output for the + auth method enabled at that path. This path must already + exist. - Disables an existing auth method at the given PATH. The argument corresponds - to the PATH of the mount, not the TYPE!. Once the auth method is disabled its - path can no longer be used to authenticate. + Get usage instructions for the userpass auth method: - All access tokens generated via the disabled auth method are immediately - revoked. This command will block until all tokens are revoked. + $ vault auth help userpass - Disable the auth method at userpass/: + Print usage for the auth method enabled at my-method/: - $ vault auth disable userpass/ + $ vault auth help my-method/ + + Each auth method produces its own help output. ` + c.Flags().Help() return strings.TrimSpace(helpText) } -func (c *AuthDisableCommand) Flags() *FlagSets { +func (c *AuthHelpCommand) Flags() *FlagSets { return c.flagSet(FlagSetHTTP) } -func (c *AuthDisableCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultAuths() +func (c *AuthHelpCommand) AutocompleteArgs() complete.Predictor { + handlers := make([]string, 0, len(c.Handlers)) + for k := range c.Handlers { + handlers = append(handlers, k) + } + return complete.PredictSet(handlers...) } -func (c *AuthDisableCommand) AutocompleteFlags() complete.Flags { +func (c *AuthHelpCommand) AutocompleteFlags() complete.Flags { return c.Flags().Completions() } -func (c *AuthDisableCommand) Run(args []string) int { +func (c *AuthHelpCommand) Run(args []string) int { f := c.Flags() if err := f.Parse(args); err != nil { @@ -74,19 +88,41 @@ func (c *AuthDisableCommand) Run(args []string) int { return 1 } - path := ensureTrailingSlash(sanitizePath(args[0])) - client, err := c.Client() if err != nil { c.UI.Error(err.Error()) return 2 } - if err := client.Sys().DisableAuth(path); err != nil { - c.UI.Error(fmt.Sprintf("Error disabling auth method at %s: %s", path, err)) - return 2 + // Start with the assumption that we have an auth type, not a path. + authType := strings.TrimSpace(args[0]) + + authHandler, ok := c.Handlers[authType] + if !ok { + // There was no auth type by that name, see if it's a mount + auths, err := client.Sys().ListAuth() + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing auth methods: %s", err)) + return 2 + } + + authPath := ensureTrailingSlash(sanitizePath(args[0])) + auth, ok := auths[authPath] + if !ok { + c.UI.Warn(fmt.Sprintf( + "No auth method available on the server at %q", authPath)) + return 1 + } + + authHandler, ok = c.Handlers[auth.Type] + if !ok { + c.UI.Warn(wrapAtLength(fmt.Sprintf( + "No method-specific CLI handler available for auth method %q", + authType))) + return 2 + } } - c.UI.Output(fmt.Sprintf("Success! Disabled the auth method (if it existed) at: %s", path)) + c.UI.Output(authHandler.Help()) return 0 } diff --git a/command/auth_disable_test.go b/command/auth_disable_test.go index 1cf429876070..e437f29199e3 100644 --- a/command/auth_disable_test.go +++ b/command/auth_disable_test.go @@ -7,21 +7,28 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" ) -func testAuthDisableCommand(tb testing.TB) (*cli.MockUi, *AuthDisableCommand) { +func testAuthHelpCommand(tb testing.TB) (*cli.MockUi, *AuthHelpCommand) { tb.Helper() ui := cli.NewMockUi() - return ui, &AuthDisableCommand{ + return ui, &AuthHelpCommand{ BaseCommand: &BaseCommand{ UI: ui, }, + Handlers: map[string]LoginHandler{ + "userpass": &credUserpass.CLIHandler{ + DefaultMount: "userpass", + }, + }, } } -func TestAuthDisableCommand_Run(t *testing.T) { +func TestAuthHelpCommand_Run(t *testing.T) { t.Parallel() cases := []struct { @@ -30,81 +37,93 @@ func TestAuthDisableCommand_Run(t *testing.T) { out string code int }{ - { - "not_enough_args", - nil, - "Not enough arguments", - 1, - }, { "too_many_args", []string{"foo", "bar"}, "Too many arguments", 1, }, + { + "not_enough_args", + nil, + "Not enough arguments", + 1, + }, } - t.Run("validations", func(t *testing.T) { - t.Parallel() + for _, tc := range cases { + tc := tc - for _, tc := range cases { - tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - t.Run(tc.name, func(t *testing.T) { - t.Parallel() + client, closer := testVaultServer(t) + defer closer() - client, closer := testVaultServer(t) - defer closer() + ui, cmd := testAuthHelpCommand(t) + cmd.client = client - ui, cmd := testAuthDisableCommand(t) - cmd.client = client + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } - }) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } - t.Run("integration", func(t *testing.T) { + t.Run("path", func(t *testing.T) { t.Parallel() client, closer := testVaultServer(t) defer closer() - if err := client.Sys().EnableAuth("my-auth", "userpass", ""); err != nil { + if err := client.Sys().EnableAuth("foo", "userpass", ""); err != nil { t.Fatal(err) } - ui, cmd := testAuthDisableCommand(t) + ui, cmd := testAuthHelpCommand(t) cmd.client = client code := cmd.Run([]string{ - "my-auth", + "foo/", }) if exp := 0; code != exp { t.Errorf("expected %d to be %d", code, exp) } - expected := "Success! Disabled the auth method" + expected := "Usage: vault login -method=userpass" combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) } + }) - auths, err := client.Sys().ListAuth() - if err != nil { - t.Fatal(err) + t.Run("type", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // No mounted auth methods + + ui, cmd := testAuthHelpCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "userpass", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) } - if auth, ok := auths["my-auth/"]; ok { - t.Errorf("expected auth to be disabled: %#v", auth) + expected := "Usage: vault login -method=userpass" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) } }) @@ -114,17 +133,17 @@ func TestAuthDisableCommand_Run(t *testing.T) { client, closer := testVaultServerBad(t) defer closer() - ui, cmd := testAuthDisableCommand(t) + ui, cmd := testAuthHelpCommand(t) cmd.client = client code := cmd.Run([]string{ - "my-auth", + "sys/mounts", }) if exp := 2; code != exp { t.Errorf("expected %d to be %d", code, exp) } - expected := "Error disabling auth method at my-auth/: " + expected := "Error listing auth methods: " combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) @@ -134,7 +153,7 @@ func TestAuthDisableCommand_Run(t *testing.T) { t.Run("no_tabs", func(t *testing.T) { t.Parallel() - _, cmd := testAuthDisableCommand(t) + _, cmd := testAuthHelpCommand(t) assertNoTabs(t, cmd) }) } diff --git a/command/auth_enable.go b/command/auth_enable.go index cd47d2f35f92..d095156e155b 100644 --- a/command/auth_enable.go +++ b/command/auth_enable.go @@ -4,223 +4,78 @@ package command import ( - "flag" "fmt" + "sort" "strconv" "strings" - "time" + "github.com/hashicorp/cli" "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" "github.com/posener/complete" ) var ( - _ cli.Command = (*AuthEnableCommand)(nil) - _ cli.CommandAutocomplete = (*AuthEnableCommand)(nil) + _ cli.Command = (*AuthListCommand)(nil) + _ cli.CommandAutocomplete = (*AuthListCommand)(nil) ) -type AuthEnableCommand struct { +type AuthListCommand struct { *BaseCommand - flagDescription string - flagPath string - flagDefaultLeaseTTL time.Duration - flagMaxLeaseTTL time.Duration - flagAuditNonHMACRequestKeys []string - flagAuditNonHMACResponseKeys []string - flagListingVisibility string - flagPluginName string - flagPassthroughRequestHeaders []string - flagAllowedResponseHeaders []string - flagOptions map[string]string - flagLocal bool - flagSealWrap bool - flagExternalEntropyAccess bool - flagTokenType string - flagVersion int - flagPluginVersion string + flagDetailed bool } -func (c *AuthEnableCommand) Synopsis() string { - return "Enables a new auth method" +func (c *AuthListCommand) Synopsis() string { + return "Lists enabled auth methods" } -func (c *AuthEnableCommand) Help() string { +func (c *AuthListCommand) Help() string { helpText := ` -Usage: vault auth enable [options] TYPE +Usage: vault auth list [options] - Enables a new auth method. An auth method is responsible for authenticating - users or machines and assigning them policies with which they can access - Vault. + Lists the enabled auth methods on the Vault server. This command also outputs + information about the method including configuration and human-friendly + descriptions. A TTL of "system" indicates that the system default is in use. - Enable the userpass auth method at userpass/: + List all enabled auth methods: - $ vault auth enable userpass + $ vault auth list - Enable the LDAP auth method at auth-prod/: + List all enabled auth methods with detailed output: - $ vault auth enable -path=auth-prod ldap - - Enable a custom auth plugin (after it's registered in the plugin registry): - - $ vault auth enable -path=my-auth -plugin-name=my-auth-plugin plugin - - OR (preferred way): - - $ vault auth enable -path=my-auth my-auth-plugin + $ vault auth list -detailed ` + c.Flags().Help() return strings.TrimSpace(helpText) } -func (c *AuthEnableCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) +func (c *AuthListCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) f := set.NewFlagSet("Command Options") - f.StringVar(&StringVar{ - Name: "description", - Target: &c.flagDescription, - Completion: complete.PredictAnything, - Usage: "Human-friendly description for the purpose of this " + - "auth method.", - }) - - f.StringVar(&StringVar{ - Name: "path", - Target: &c.flagPath, - Default: "", // The default is complex, so we have to manually document - Completion: complete.PredictAnything, - Usage: "Place where the auth method will be accessible. This must be " + - "unique across all auth methods. This defaults to the \"type\" of " + - "the auth method. The auth method will be accessible at " + - "\"/auth/\".", - }) - - f.DurationVar(&DurationVar{ - Name: "default-lease-ttl", - Target: &c.flagDefaultLeaseTTL, - Completion: complete.PredictAnything, - Usage: "The default lease TTL for this auth method. If unspecified, " + - "this defaults to the Vault server's globally configured default lease " + - "TTL.", - }) - - f.DurationVar(&DurationVar{ - Name: "max-lease-ttl", - Target: &c.flagMaxLeaseTTL, - Completion: complete.PredictAnything, - Usage: "The maximum lease TTL for this auth method. If unspecified, " + - "this defaults to the Vault server's globally configured maximum lease " + - "TTL.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: flagNameAuditNonHMACRequestKeys, - Target: &c.flagAuditNonHMACRequestKeys, - Usage: "Key that will not be HMAC'd by audit devices in the request data object. " + - "To specify multiple values, specify this flag multiple times.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: flagNameAuditNonHMACResponseKeys, - Target: &c.flagAuditNonHMACResponseKeys, - Usage: "Key that will not be HMAC'd by audit devices in the response data object. " + - "To specify multiple values, specify this flag multiple times.", - }) - - f.StringVar(&StringVar{ - Name: flagNameListingVisibility, - Target: &c.flagListingVisibility, - Usage: "Determines the visibility of the mount in the UI-specific listing endpoint.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: flagNamePassthroughRequestHeaders, - Target: &c.flagPassthroughRequestHeaders, - Usage: "Request header value that will be sent to the plugin. To specify multiple " + - "values, specify this flag multiple times.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: flagNameAllowedResponseHeaders, - Target: &c.flagAllowedResponseHeaders, - Usage: "Response header value that plugins will be allowed to set. To specify multiple " + - "values, specify this flag multiple times.", - }) - - f.StringVar(&StringVar{ - Name: "plugin-name", - Target: &c.flagPluginName, - Completion: c.PredictVaultPlugins(api.PluginTypeCredential), - Usage: "Name of the auth method plugin. This plugin name must already " + - "exist in the Vault server's plugin catalog.", - }) - - f.StringMapVar(&StringMapVar{ - Name: "options", - Target: &c.flagOptions, - Completion: complete.PredictAnything, - Usage: "Key-value pair provided as key=value for the mount options. " + - "This can be specified multiple times.", - }) - - f.BoolVar(&BoolVar{ - Name: "local", - Target: &c.flagLocal, - Default: false, - Usage: "Mark the auth method as local-only. Local auth methods are " + - "not replicated nor removed by replication.", - }) - - f.BoolVar(&BoolVar{ - Name: "seal-wrap", - Target: &c.flagSealWrap, - Default: false, - Usage: "Enable seal wrapping of critical values in the secrets engine.", - }) - f.BoolVar(&BoolVar{ - Name: "external-entropy-access", - Target: &c.flagExternalEntropyAccess, + Name: "detailed", + Target: &c.flagDetailed, Default: false, - Usage: "Enable auth method to access Vault's external entropy source.", - }) - - f.StringVar(&StringVar{ - Name: flagNameTokenType, - Target: &c.flagTokenType, - Usage: "Sets a forced token type for the mount.", - }) - - f.IntVar(&IntVar{ - Name: "version", - Target: &c.flagVersion, - Default: 0, - Usage: "Select the version of the auth method to run. Not supported by all auth methods.", - }) - - f.StringVar(&StringVar{ - Name: flagNamePluginVersion, - Target: &c.flagPluginVersion, - Default: "", - Usage: "Select the semantic version of the plugin to enable.", + Usage: "Print detailed information such as configuration and replication " + + "status about each auth method. This option is only applicable to " + + "table-formatted output.", }) return set } -func (c *AuthEnableCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultAvailableAuths() +func (c *AuthListCommand) AutocompleteArgs() complete.Predictor { + return nil } -func (c *AuthEnableCommand) AutocompleteFlags() complete.Flags { +func (c *AuthListCommand) AutocompleteFlags() complete.Flags { return c.Flags().Completions() } -func (c *AuthEnableCommand) Run(args []string) int { +func (c *AuthListCommand) Run(args []string) int { f := c.Flags() if err := f.Parse(args); err != nil { @@ -229,12 +84,8 @@ func (c *AuthEnableCommand) Run(args []string) int { } args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) return 1 } @@ -244,88 +95,95 @@ func (c *AuthEnableCommand) Run(args []string) int { return 2 } - authType := strings.TrimSpace(args[0]) - if authType == "plugin" { - authType = c.flagPluginName + auths, err := client.Sys().ListAuth() + if err != nil { + c.UI.Error(fmt.Sprintf("Error listing enabled authentications: %s", err)) + return 2 } - // If no path is specified, we default the path to the backend type - // or use the plugin name if it's a plugin backend - authPath := c.flagPath - if authPath == "" { - if authType == "plugin" { - authPath = c.flagPluginName - } else { - authPath = authType + switch Format(c.UI) { + case "table": + if c.flagDetailed { + c.UI.Output(tableOutput(c.detailedMounts(auths), nil)) + return 0 } + c.UI.Output(tableOutput(c.simpleMounts(auths), nil)) + return 0 + default: + return OutputData(c.UI, auths) } +} - // Append a trailing slash to indicate it's a path in output - authPath = ensureTrailingSlash(authPath) - - if c.flagVersion > 0 { - if c.flagOptions == nil { - c.flagOptions = make(map[string]string) - } - c.flagOptions["version"] = strconv.Itoa(c.flagVersion) +func (c *AuthListCommand) simpleMounts(auths map[string]*api.AuthMount) []string { + paths := make([]string, 0, len(auths)) + for path := range auths { + paths = append(paths, path) } + sort.Strings(paths) - authOpts := &api.EnableAuthOptions{ - Type: authType, - Description: c.flagDescription, - Local: c.flagLocal, - SealWrap: c.flagSealWrap, - ExternalEntropyAccess: c.flagExternalEntropyAccess, - Config: api.AuthConfigInput{ - DefaultLeaseTTL: c.flagDefaultLeaseTTL.String(), - MaxLeaseTTL: c.flagMaxLeaseTTL.String(), - }, - Options: c.flagOptions, + out := []string{"Path | Type | Accessor | Description | Version"} + for _, path := range paths { + mount := auths[path] + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s", path, mount.Type, mount.Accessor, mount.Description, mount.PluginVersion)) } - // Set these values only if they are provided in the CLI - f.Visit(func(fl *flag.Flag) { - if fl.Name == flagNameAuditNonHMACRequestKeys { - authOpts.Config.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys - } - - if fl.Name == flagNameAuditNonHMACResponseKeys { - authOpts.Config.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys - } + return out +} - if fl.Name == flagNameListingVisibility { - authOpts.Config.ListingVisibility = c.flagListingVisibility +func (c *AuthListCommand) detailedMounts(auths map[string]*api.AuthMount) []string { + paths := make([]string, 0, len(auths)) + for path := range auths { + paths = append(paths, path) + } + sort.Strings(paths) + + calcTTL := func(typ string, ttl int) string { + switch { + case typ == "system", typ == "cubbyhole": + return "" + case ttl != 0: + return strconv.Itoa(ttl) + default: + return "system" } + } - if fl.Name == flagNamePassthroughRequestHeaders { - authOpts.Config.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders - } + out := []string{"Path | Plugin | Accessor | Default TTL | Max TTL | Token Type | Replication | Seal Wrap | External Entropy Access | Options | Description | UUID | Version | Running Version | Running SHA256 | Deprecation Status"} + for _, path := range paths { + mount := auths[path] - if fl.Name == flagNameAllowedResponseHeaders { - authOpts.Config.AllowedResponseHeaders = c.flagAllowedResponseHeaders - } + defaultTTL := calcTTL(mount.Type, mount.Config.DefaultLeaseTTL) + maxTTL := calcTTL(mount.Type, mount.Config.MaxLeaseTTL) - if fl.Name == flagNameTokenType { - authOpts.Config.TokenType = c.flagTokenType + replication := "replicated" + if mount.Local { + replication = "local" } - if fl.Name == flagNamePluginVersion { - authOpts.Config.PluginVersion = c.flagPluginVersion + pluginName := mount.Type + if pluginName == "plugin" { + pluginName = mount.Config.PluginName } - }) - if err := client.Sys().EnableAuthWithOptions(authPath, authOpts); err != nil { - c.UI.Error(fmt.Sprintf("Error enabling %s auth: %s", authType, err)) - return 2 + out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s | %s | %s | %t | %v | %s | %s | %s | %s | %s | %s | %s", + path, + pluginName, + mount.Accessor, + defaultTTL, + maxTTL, + mount.Config.TokenType, + replication, + mount.SealWrap, + mount.ExternalEntropyAccess, + mount.Options, + mount.Description, + mount.UUID, + mount.PluginVersion, + mount.RunningVersion, + mount.RunningSha256, + mount.DeprecationStatus, + )) } - authThing := authType + " auth method" - if authType == "plugin" { - authThing = c.flagPluginName + " plugin" - } - if c.flagPluginVersion != "" { - authThing += " version " + c.flagPluginVersion - } - c.UI.Output(fmt.Sprintf("Success! Enabled %s at: %s", authThing, authPath)) - return 0 + return out } diff --git a/command/auth_enable_test.go b/command/auth_enable_test.go index 93c685819e44..087010a8ce35 100644 --- a/command/auth_enable_test.go +++ b/command/auth_enable_test.go @@ -4,31 +4,24 @@ package command import ( - "io/ioutil" - "sort" "strings" "testing" - "github.com/go-test/deep" - "github.com/google/go-cmp/cmp" - "github.com/hashicorp/vault/helper/builtinplugins" - "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/hashicorp/vault/sdk/helper/strutil" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" ) -func testAuthEnableCommand(tb testing.TB) (*cli.MockUi, *AuthEnableCommand) { +func testAuthListCommand(tb testing.TB) (*cli.MockUi, *AuthListCommand) { tb.Helper() ui := cli.NewMockUi() - return ui, &AuthEnableCommand{ + return ui, &AuthListCommand{ BaseCommand: &BaseCommand{ UI: ui, }, } } -func TestAuthEnableCommand_Run(t *testing.T) { +func TestAuthListCommand_Run(t *testing.T) { t.Parallel() cases := []struct { @@ -37,106 +30,51 @@ func TestAuthEnableCommand_Run(t *testing.T) { out string code int }{ - { - "not_enough_args", - nil, - "Not enough arguments", - 1, - }, { "too_many_args", - []string{"foo", "bar"}, + []string{"foo"}, "Too many arguments", 1, }, { - "not_a_valid_auth", - []string{"nope_definitely_not_a_valid_mount_like_ever"}, - "", - 2, + "lists", + nil, + "Path", + 0, + }, + { + "detailed", + []string{"-detailed"}, + "Default TTL", + 0, }, } - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testAuthEnableCommand(t) - cmd.client = client - - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected command return code to be %d, got %d", tc.code, code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q in response\n got: %+v", tc.out, combined) - } - }) - } - - t.Run("integration", func(t *testing.T) { + t.Run("validations", func(t *testing.T) { t.Parallel() - client, closer := testVaultServer(t) - defer closer() + for _, tc := range cases { + tc := tc - ui, cmd := testAuthEnableCommand(t) - cmd.client = client + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - code := cmd.Run([]string{ - "-path", "auth_integration/", - "-description", "The best kind of test", - "-audit-non-hmac-request-keys", "foo,bar", - "-audit-non-hmac-response-keys", "foo,bar", - "-passthrough-request-headers", "authorization,authentication", - "-passthrough-request-headers", "www-authentication", - "-allowed-response-headers", "authorization", - "-listing-visibility", "unauth", - "userpass", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + client, closer := testVaultServer(t) + defer closer() - expected := "Success! Enabled userpass auth method at:" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } + ui, cmd := testAuthListCommand(t) + cmd.client = client - auths, err := client.Sys().ListAuth() - if err != nil { - t.Fatal(err) - } + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } - authInfo, ok := auths["auth_integration/"] - if !ok { - t.Fatalf("expected mount to exist") - } - if exp := "userpass"; authInfo.Type != exp { - t.Errorf("expected %q to be %q", authInfo.Type, exp) - } - if exp := "The best kind of test"; authInfo.Description != exp { - t.Errorf("expected %q to be %q", authInfo.Description, exp) - } - if diff := deep.Equal([]string{"authorization,authentication", "www-authentication"}, authInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { - t.Errorf("Failed to find expected values in PassthroughRequestHeaders. Difference is: %v", diff) - } - if diff := deep.Equal([]string{"authorization"}, authInfo.Config.AllowedResponseHeaders); len(diff) > 0 { - t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) - } - if diff := deep.Equal([]string{"foo,bar"}, authInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { - t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) - } - if diff := deep.Equal([]string{"foo,bar"}, authInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { - t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) } }) @@ -146,17 +84,15 @@ func TestAuthEnableCommand_Run(t *testing.T) { client, closer := testVaultServerBad(t) defer closer() - ui, cmd := testAuthEnableCommand(t) + ui, cmd := testAuthListCommand(t) cmd.client = client - code := cmd.Run([]string{ - "userpass", - }) + code := cmd.Run([]string{}) if exp := 2; code != exp { t.Errorf("expected %d to be %d", code, exp) } - expected := "Error enabling userpass auth: " + expected := "Error listing enabled authentications: " combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) @@ -166,79 +102,7 @@ func TestAuthEnableCommand_Run(t *testing.T) { t.Run("no_tabs", func(t *testing.T) { t.Parallel() - _, cmd := testAuthEnableCommand(t) + _, cmd := testAuthListCommand(t) assertNoTabs(t, cmd) }) - - t.Run("mount_all", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServerAllBackends(t) - defer closer() - - files, err := ioutil.ReadDir("../builtin/credential") - if err != nil { - t.Fatal(err) - } - - var backends []string - for _, f := range files { - if f.IsDir() && f.Name() != "token" { - backends = append(backends, f.Name()) - } - } - - modFile, err := ioutil.ReadFile("../go.mod") - if err != nil { - t.Fatal(err) - } - modLines := strings.Split(string(modFile), "\n") - for _, p := range modLines { - splitLine := strings.Split(strings.TrimSpace(p), " ") - if len(splitLine) == 0 { - continue - } - potPlug := strings.TrimPrefix(splitLine[0], "github.com/hashicorp/") - if strings.HasPrefix(potPlug, "vault-plugin-auth-") { - backends = append(backends, strings.TrimPrefix(potPlug, "vault-plugin-auth-")) - } - } - // Since "pcf" plugin in the Vault registry is also pointed at the "vault-plugin-auth-cf" - // repository, we need to manually append it here so it'll tie out with our expected number - // of credential backends. - backends = append(backends, "pcf") - - regkeys := strutil.StrListDelete(builtinplugins.Registry.Keys(consts.PluginTypeCredential), "oidc") - sort.Strings(regkeys) - sort.Strings(backends) - if d := cmp.Diff(regkeys, backends); len(d) > 0 { - t.Fatalf("found credential registry mismatch: %v", d) - } - - for _, b := range backends { - var expectedResult int = 0 - - // Not a builtin - if b == "token" { - continue - } - - ui, cmd := testAuthEnableCommand(t) - cmd.client = client - - actualResult := cmd.Run([]string{ - b, - }) - - // Need to handle deprecated builtins specially - status, _ := builtinplugins.Registry.DeprecationStatus(b, consts.PluginTypeCredential) - if status == consts.PendingRemoval || status == consts.Removed { - expectedResult = 2 - } - - if actualResult != expectedResult { - t.Errorf("type: %s - got: %d, expected: %d - %s", b, actualResult, expectedResult, ui.OutputWriter.String()+ui.ErrorWriter.String()) - } - } - }) } diff --git a/command/auth_help.go b/command/auth_help.go index 5e971ebbac41..3ede5fc49fc9 100644 --- a/command/auth_help.go +++ b/command/auth_help.go @@ -6,71 +6,67 @@ package command import ( "fmt" "strings" + "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" "github.com/posener/complete" ) var ( - _ cli.Command = (*AuthHelpCommand)(nil) - _ cli.CommandAutocomplete = (*AuthHelpCommand)(nil) + _ cli.Command = (*AuthMoveCommand)(nil) + _ cli.CommandAutocomplete = (*AuthMoveCommand)(nil) ) -type AuthHelpCommand struct { +type AuthMoveCommand struct { *BaseCommand - - Handlers map[string]LoginHandler } -func (c *AuthHelpCommand) Synopsis() string { - return "Prints usage for an auth method" +func (c *AuthMoveCommand) Synopsis() string { + return "Move an auth method to a new path" } -func (c *AuthHelpCommand) Help() string { +func (c *AuthMoveCommand) Help() string { helpText := ` -Usage: vault auth help [options] TYPE | PATH - - Prints usage and help for an auth method. +Usage: vault auth move [options] SOURCE DESTINATION - - If given a TYPE, this command prints the default help for the - auth method of that type. + Moves an existing auth method to a new path. Any leases from the old + auth method are revoked, but all configuration associated with the method + is preserved. It initiates the migration and intermittently polls its status, + exiting if a final state is reached. - - If given a PATH, this command prints the help output for the - auth method enabled at that path. This path must already - exist. + This command works within or across namespaces, both source and destination paths + can be prefixed with a namespace heirarchy relative to the current namespace. - Get usage instructions for the userpass auth method: + WARNING! Moving an auth method will revoke any leases from the + old method. - $ vault auth help userpass + Move the auth method at approle/ to generic/: - Print usage for the auth method enabled at my-method/: + $ vault auth move approle/ generic/ - $ vault auth help my-method/ + Move the auth method at ns1/approle/ across namespaces to ns2/generic/, + where ns1 and ns2 are child namespaces of the current namespace: - Each auth method produces its own help output. + $ vault auth move ns1/approle/ ns2/generic/ ` + c.Flags().Help() return strings.TrimSpace(helpText) } -func (c *AuthHelpCommand) Flags() *FlagSets { +func (c *AuthMoveCommand) Flags() *FlagSets { return c.flagSet(FlagSetHTTP) } -func (c *AuthHelpCommand) AutocompleteArgs() complete.Predictor { - handlers := make([]string, 0, len(c.Handlers)) - for k := range c.Handlers { - handlers = append(handlers, k) - } - return complete.PredictSet(handlers...) +func (c *AuthMoveCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultMounts() } -func (c *AuthHelpCommand) AutocompleteFlags() complete.Flags { +func (c *AuthMoveCommand) AutocompleteFlags() complete.Flags { return c.Flags().Completions() } -func (c *AuthHelpCommand) Run(args []string) int { +func (c *AuthMoveCommand) Run(args []string) int { f := c.Flags() if err := f.Parse(args); err != nil { @@ -80,49 +76,51 @@ func (c *AuthHelpCommand) Run(args []string) int { args = f.Args() switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + case len(args) < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", len(args))) return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + case len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", len(args))) return 1 } + // Grab the source and destination + source := ensureTrailingSlash(args[0]) + destination := ensureTrailingSlash(args[1]) + client, err := c.Client() if err != nil { c.UI.Error(err.Error()) return 2 } - // Start with the assumption that we have an auth type, not a path. - authType := strings.TrimSpace(args[0]) + remountResp, err := client.Sys().StartRemount(source, destination) + if err != nil { + c.UI.Error(fmt.Sprintf("Error moving auth method %s to %s: %s", source, destination, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Started moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) - authHandler, ok := c.Handlers[authType] - if !ok { - // There was no auth type by that name, see if it's a mount - auths, err := client.Sys().ListAuth() + // Poll the status endpoint with the returned migration ID + // Exit if a terminal status is reached, else wait and retry + for { + remountStatusResp, err := client.Sys().RemountStatus(remountResp.MigrationID) if err != nil { - c.UI.Error(fmt.Sprintf("Error listing auth methods: %s", err)) + c.UI.Error(fmt.Sprintf("Error checking migration status of auth method %s to %s: %s", source, destination, err)) return 2 } - - authPath := ensureTrailingSlash(sanitizePath(args[0])) - auth, ok := auths[authPath] - if !ok { - c.UI.Warn(fmt.Sprintf( - "No auth method available on the server at %q", authPath)) - return 1 + if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusSuccess { + c.UI.Output(fmt.Sprintf("Success! Finished moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + return 0 } - - authHandler, ok = c.Handlers[auth.Type] - if !ok { - c.UI.Warn(wrapAtLength(fmt.Sprintf( - "No method-specific CLI handler available for auth method %q", - authType))) - return 2 + if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusFailure { + c.UI.Error(fmt.Sprintf("Failure! Error encountered moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + return 0 } + c.UI.Output(fmt.Sprintf("Waiting for terminal status in migration of auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + time.Sleep(10 * time.Second) } - c.UI.Output(authHandler.Help()) return 0 } diff --git a/command/auth_help_test.go b/command/auth_help_test.go index d87832c8920f..0b585e7e0031 100644 --- a/command/auth_help_test.go +++ b/command/auth_help_test.go @@ -7,28 +7,22 @@ import ( "strings" "testing" - "github.com/mitchellh/cli" - - credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" ) -func testAuthHelpCommand(tb testing.TB) (*cli.MockUi, *AuthHelpCommand) { +func testAuthMoveCommand(tb testing.TB) (*cli.MockUi, *AuthMoveCommand) { tb.Helper() ui := cli.NewMockUi() - return ui, &AuthHelpCommand{ + return ui, &AuthMoveCommand{ BaseCommand: &BaseCommand{ UI: ui, }, - Handlers: map[string]LoginHandler{ - "userpass": &credUserpass.CLIHandler{ - DefaultMount: "userpass", - }, - }, } } -func TestAuthHelpCommand_Run(t *testing.T) { +func TestAuthMoveCommand_Run(t *testing.T) { t.Parallel() cases := []struct { @@ -37,93 +31,89 @@ func TestAuthHelpCommand_Run(t *testing.T) { out string code int }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, { "too_many_args", - []string{"foo", "bar"}, + []string{"foo", "bar", "baz"}, "Too many arguments", 1, }, { - "not_enough_args", - nil, - "Not enough arguments", - 1, + "non_existent", + []string{"not_real", "over_here"}, + "Error moving auth method not_real/ to over_here/", + 2, }, } - for _, tc := range cases { - tc := tc + t.Run("validations", func(t *testing.T) { + t.Parallel() - t.Run(tc.name, func(t *testing.T) { - t.Parallel() + for _, tc := range cases { + tc := tc - client, closer := testVaultServer(t) - defer closer() + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - ui, cmd := testAuthHelpCommand(t) - cmd.client = client + client, closer := testVaultServer(t) + defer closer() - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } + ui, cmd := testAuthMoveCommand(t) + cmd.client = client - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } - t.Run("path", func(t *testing.T) { + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { t.Parallel() client, closer := testVaultServer(t) defer closer() - if err := client.Sys().EnableAuth("foo", "userpass", ""); err != nil { + ui, cmd := testAuthMoveCommand(t) + cmd.client = client + + if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ + Type: "userpass", + }); err != nil { t.Fatal(err) } - ui, cmd := testAuthHelpCommand(t) - cmd.client = client - code := cmd.Run([]string{ - "foo/", + "auth/my-auth/", "auth/my-auth-2/", }) if exp := 0; code != exp { t.Errorf("expected %d to be %d", code, exp) } - expected := "Usage: vault login -method=userpass" + expected := "Success! Finished moving auth method auth/my-auth/ to auth/my-auth-2/" combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) } - }) - - t.Run("type", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - // No mounted auth methods - - ui, cmd := testAuthHelpCommand(t) - cmd.client = client - code := cmd.Run([]string{ - "userpass", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) + mounts, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) } - expected := "Usage: vault login -method=userpass" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) + if _, ok := mounts["my-auth-2/"]; !ok { + t.Errorf("expected mount at my-auth-2/: %#v", mounts) } }) @@ -133,17 +123,17 @@ func TestAuthHelpCommand_Run(t *testing.T) { client, closer := testVaultServerBad(t) defer closer() - ui, cmd := testAuthHelpCommand(t) + ui, cmd := testAuthMoveCommand(t) cmd.client = client code := cmd.Run([]string{ - "sys/mounts", + "auth/my-auth/", "auth/my-auth-2/", }) if exp := 2; code != exp { t.Errorf("expected %d to be %d", code, exp) } - expected := "Error listing auth methods: " + expected := "Error moving auth method auth/my-auth/ to auth/my-auth-2/:" combined := ui.OutputWriter.String() + ui.ErrorWriter.String() if !strings.Contains(combined, expected) { t.Errorf("expected %q to contain %q", combined, expected) @@ -153,7 +143,7 @@ func TestAuthHelpCommand_Run(t *testing.T) { t.Run("no_tabs", func(t *testing.T) { t.Parallel() - _, cmd := testAuthHelpCommand(t) + _, cmd := testAuthMoveCommand(t) assertNoTabs(t, cmd) }) } diff --git a/command/auth_list.go b/command/auth_list.go index e1a8771d7f5b..9071c0c157a0 100644 --- a/command/auth_list.go +++ b/command/auth_list.go @@ -4,186 +4,34 @@ package command import ( - "fmt" - "sort" - "strconv" - "strings" + "testing" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" -) + "github.com/hashicorp/cli" -var ( - _ cli.Command = (*AuthListCommand)(nil) - _ cli.CommandAutocomplete = (*AuthListCommand)(nil) + "github.com/hashicorp/vault/command/token" ) -type AuthListCommand struct { - *BaseCommand - - flagDetailed bool -} - -func (c *AuthListCommand) Synopsis() string { - return "Lists enabled auth methods" -} - -func (c *AuthListCommand) Help() string { - helpText := ` -Usage: vault auth list [options] - - Lists the enabled auth methods on the Vault server. This command also outputs - information about the method including configuration and human-friendly - descriptions. A TTL of "system" indicates that the system default is in use. - - List all enabled auth methods: - - $ vault auth list - - List all enabled auth methods with detailed output: - - $ vault auth list -detailed - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *AuthListCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - f := set.NewFlagSet("Command Options") - - f.BoolVar(&BoolVar{ - Name: "detailed", - Target: &c.flagDetailed, - Default: false, - Usage: "Print detailed information such as configuration and replication " + - "status about each auth method. This option is only applicable to " + - "table-formatted output.", - }) - - return set -} - -func (c *AuthListCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *AuthListCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} +func testAuthCommand(tb testing.TB) (*cli.MockUi, *AuthCommand) { + tb.Helper() -func (c *AuthListCommand) Run(args []string) int { - f := c.Flags() + ui := cli.NewMockUi() + return ui, &AuthCommand{ + BaseCommand: &BaseCommand{ + UI: ui, - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - if len(args) > 0 { - c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - auths, err := client.Sys().ListAuth() - if err != nil { - c.UI.Error(fmt.Sprintf("Error listing enabled authentications: %s", err)) - return 2 - } - - switch Format(c.UI) { - case "table": - if c.flagDetailed { - c.UI.Output(tableOutput(c.detailedMounts(auths), nil)) - return 0 - } - c.UI.Output(tableOutput(c.simpleMounts(auths), nil)) - return 0 - default: - return OutputData(c.UI, auths) + // Override to our own token helper + tokenHelper: token.NewTestingTokenHelper(), + }, } } -func (c *AuthListCommand) simpleMounts(auths map[string]*api.AuthMount) []string { - paths := make([]string, 0, len(auths)) - for path := range auths { - paths = append(paths, path) - } - sort.Strings(paths) - - out := []string{"Path | Type | Accessor | Description | Version"} - for _, path := range paths { - mount := auths[path] - out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s", path, mount.Type, mount.Accessor, mount.Description, mount.PluginVersion)) - } +func TestAuthCommand_Run(t *testing.T) { + t.Parallel() - return out -} + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() -func (c *AuthListCommand) detailedMounts(auths map[string]*api.AuthMount) []string { - paths := make([]string, 0, len(auths)) - for path := range auths { - paths = append(paths, path) - } - sort.Strings(paths) - - calcTTL := func(typ string, ttl int) string { - switch { - case typ == "system", typ == "cubbyhole": - return "" - case ttl != 0: - return strconv.Itoa(ttl) - default: - return "system" - } - } - - out := []string{"Path | Plugin | Accessor | Default TTL | Max TTL | Token Type | Replication | Seal Wrap | External Entropy Access | Options | Description | UUID | Version | Running Version | Running SHA256 | Deprecation Status"} - for _, path := range paths { - mount := auths[path] - - defaultTTL := calcTTL(mount.Type, mount.Config.DefaultLeaseTTL) - maxTTL := calcTTL(mount.Type, mount.Config.MaxLeaseTTL) - - replication := "replicated" - if mount.Local { - replication = "local" - } - - pluginName := mount.Type - if pluginName == "plugin" { - pluginName = mount.Config.PluginName - } - - out = append(out, fmt.Sprintf("%s | %s | %s | %s | %s | %s | %s | %t | %v | %s | %s | %s | %s | %s | %s | %s", - path, - pluginName, - mount.Accessor, - defaultTTL, - maxTTL, - mount.Config.TokenType, - replication, - mount.SealWrap, - mount.ExternalEntropyAccess, - mount.Options, - mount.Description, - mount.UUID, - mount.PluginVersion, - mount.RunningVersion, - mount.RunningSha256, - mount.DeprecationStatus, - )) - } - - return out + _, cmd := testAuthCommand(t) + assertNoTabs(t, cmd) + }) } diff --git a/command/auth_list_test.go b/command/auth_list_test.go index 8e019f1b8fa9..10e7f9fe4113 100644 --- a/command/auth_list_test.go +++ b/command/auth_list_test.go @@ -4,105 +4,307 @@ package command import ( + "flag" + "fmt" + "strconv" "strings" - "testing" + "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" ) -func testAuthListCommand(tb testing.TB) (*cli.MockUi, *AuthListCommand) { - tb.Helper() +var ( + _ cli.Command = (*AuthTuneCommand)(nil) + _ cli.CommandAutocomplete = (*AuthTuneCommand)(nil) +) - ui := cli.NewMockUi() - return ui, &AuthListCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } +type AuthTuneCommand struct { + *BaseCommand + + flagAuditNonHMACRequestKeys []string + flagAuditNonHMACResponseKeys []string + flagDefaultLeaseTTL time.Duration + flagDescription string + flagListingVisibility string + flagMaxLeaseTTL time.Duration + flagPassthroughRequestHeaders []string + flagAllowedResponseHeaders []string + flagOptions map[string]string + flagTokenType string + flagVersion int + flagPluginVersion string + flagUserLockoutThreshold uint + flagUserLockoutDuration time.Duration + flagUserLockoutCounterResetDuration time.Duration + flagUserLockoutDisable bool +} + +func (c *AuthTuneCommand) Synopsis() string { + return "Tunes an auth method configuration" +} + +func (c *AuthTuneCommand) Help() string { + helpText := ` +Usage: vault auth tune [options] PATH + + Tunes the configuration options for the auth method at the given PATH. The + argument corresponds to the PATH where the auth method is enabled, not the + TYPE! + + Tune the default lease for the github auth method: + + $ vault auth tune -default-lease-ttl=72h github/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *AuthTuneCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACRequestKeys, + Target: &c.flagAuditNonHMACRequestKeys, + Usage: "Key that will not be HMAC'd by audit devices in the request data " + + "object. To specify multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACResponseKeys, + Target: &c.flagAuditNonHMACResponseKeys, + Usage: "Key that will not be HMAC'd by audit devices in the response data " + + "object. To specify multiple values, specify this flag multiple times.", + }) + + f.DurationVar(&DurationVar{ + Name: "default-lease-ttl", + Target: &c.flagDefaultLeaseTTL, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The default lease TTL for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured default lease TTL, " + + "or a previously configured value for the auth method.", + }) + + f.StringVar(&StringVar{ + Name: flagNameDescription, + Target: &c.flagDescription, + Usage: "Human-friendly description of the this auth method. This overrides " + + "the current stored value, if any.", + }) + + f.StringVar(&StringVar{ + Name: flagNameListingVisibility, + Target: &c.flagListingVisibility, + Usage: "Determines the visibility of the mount in the UI-specific listing " + + "endpoint.", + }) + + f.DurationVar(&DurationVar{ + Name: "max-lease-ttl", + Target: &c.flagMaxLeaseTTL, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The maximum lease TTL for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured maximum lease TTL, " + + "or a previously configured value for the auth method.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNamePassthroughRequestHeaders, + Target: &c.flagPassthroughRequestHeaders, + Usage: "Request header value that will be sent to the plugin. To specify " + + "multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedResponseHeaders, + Target: &c.flagAllowedResponseHeaders, + Usage: "Response header value that plugins will be allowed to set. To specify " + + "multiple values, specify this flag multiple times.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "options", + Target: &c.flagOptions, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value for the mount options. " + + "This can be specified multiple times.", + }) + + f.StringVar(&StringVar{ + Name: flagNameTokenType, + Target: &c.flagTokenType, + Usage: "Sets a forced token type for the mount.", + }) + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Default: 0, + Usage: "Select the version of the auth method to run. Not supported by all auth methods.", + }) + + f.UintVar(&UintVar{ + Name: flagNameUserLockoutThreshold, + Target: &c.flagUserLockoutThreshold, + Usage: "The threshold for user lockout for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured user lockout threshold, " + + "or a previously configured value for the auth method.", + }) + + f.DurationVar(&DurationVar{ + Name: flagNameUserLockoutDuration, + Target: &c.flagUserLockoutDuration, + Completion: complete.PredictAnything, + Usage: "The user lockout duration for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured user lockout duration, " + + "or a previously configured value for the auth method.", + }) + + f.DurationVar(&DurationVar{ + Name: flagNameUserLockoutCounterResetDuration, + Target: &c.flagUserLockoutCounterResetDuration, + Completion: complete.PredictAnything, + Usage: "The user lockout counter reset duration for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured user lockout counter reset duration, " + + "or a previously configured value for the auth method.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameUserLockoutDisable, + Target: &c.flagUserLockoutDisable, + Default: false, + Usage: "Disable user lockout for this auth method. If unspecified, this " + + "defaults to the Vault server's globally configured user lockout disable, " + + "or a previously configured value for the auth method.", + }) + + f.StringVar(&StringVar{ + Name: flagNamePluginVersion, + Target: &c.flagPluginVersion, + Default: "", + Usage: "Select the semantic version of the plugin to run. The new version must be registered in " + + "the plugin catalog, and will not start running until the plugin is reloaded.", + }) + + return set +} + +func (c *AuthTuneCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultAuths() } -func TestAuthListCommand_Run(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "too_many_args", - []string{"foo"}, - "Too many arguments", - 1, - }, - { - "lists", - nil, - "Path", - 0, - }, - { - "detailed", - []string{"-detailed"}, - "Default TTL", - 0, - }, +func (c *AuthTuneCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *AuthTuneCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 } - t.Run("validations", func(t *testing.T) { - t.Parallel() + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } - for _, tc := range cases { - tc := tc + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } - t.Run(tc.name, func(t *testing.T) { - t.Parallel() + if c.flagVersion > 0 { + if c.flagOptions == nil { + c.flagOptions = make(map[string]string) + } + c.flagOptions["version"] = strconv.Itoa(c.flagVersion) + } - client, closer := testVaultServer(t) - defer closer() + mountConfigInput := api.MountConfigInput{ + DefaultLeaseTTL: ttlToAPI(c.flagDefaultLeaseTTL), + MaxLeaseTTL: ttlToAPI(c.flagMaxLeaseTTL), + Options: c.flagOptions, + } - ui, cmd := testAuthListCommand(t) - cmd.client = client + // Set these values only if they are provided in the CLI + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAuditNonHMACRequestKeys { + mountConfigInput.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys + } - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } + if fl.Name == flagNameAuditNonHMACResponseKeys { + mountConfigInput.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys + } - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) + if fl.Name == flagNameDescription { + mountConfigInput.Description = &c.flagDescription } - }) - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() + if fl.Name == flagNameListingVisibility { + mountConfigInput.ListingVisibility = c.flagListingVisibility + } - client, closer := testVaultServerBad(t) - defer closer() + if fl.Name == flagNamePassthroughRequestHeaders { + mountConfigInput.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders + } - ui, cmd := testAuthListCommand(t) - cmd.client = client + if fl.Name == flagNameAllowedResponseHeaders { + mountConfigInput.AllowedResponseHeaders = c.flagAllowedResponseHeaders + } - code := cmd.Run([]string{}) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) + if fl.Name == flagNameTokenType { + mountConfigInput.TokenType = c.flagTokenType + } + switch fl.Name { + case flagNameUserLockoutThreshold, flagNameUserLockoutDuration, flagNameUserLockoutCounterResetDuration, flagNameUserLockoutDisable: + if mountConfigInput.UserLockoutConfig == nil { + mountConfigInput.UserLockoutConfig = &api.UserLockoutConfigInput{} + } + } + if fl.Name == flagNameUserLockoutThreshold { + mountConfigInput.UserLockoutConfig.LockoutThreshold = strconv.FormatUint(uint64(c.flagUserLockoutThreshold), 10) + } + if fl.Name == flagNameUserLockoutDuration { + mountConfigInput.UserLockoutConfig.LockoutDuration = ttlToAPI(c.flagUserLockoutDuration) + } + if fl.Name == flagNameUserLockoutCounterResetDuration { + mountConfigInput.UserLockoutConfig.LockoutCounterResetDuration = ttlToAPI(c.flagUserLockoutCounterResetDuration) + } + if fl.Name == flagNameUserLockoutDisable { + mountConfigInput.UserLockoutConfig.DisableLockout = &c.flagUserLockoutDisable } - expected := "Error listing enabled authentications: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) + if fl.Name == flagNamePluginVersion { + mountConfigInput.PluginVersion = c.flagPluginVersion } }) - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() + // Append /auth (since that's where auths live) and a trailing slash to + // indicate it's a path in output + mountPath := ensureTrailingSlash(sanitizePath(args[0])) - _, cmd := testAuthListCommand(t) - assertNoTabs(t, cmd) - }) + if err := client.Sys().TuneMount("/auth/"+mountPath, mountConfigInput); err != nil { + c.UI.Error(fmt.Sprintf("Error tuning auth method %s: %s", mountPath, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Tuned the auth method at: %s", mountPath)) + return 0 } diff --git a/command/auth_move.go b/command/auth_move.go index e91d9d82fb83..4bc8ddf46596 100644 --- a/command/auth_move.go +++ b/command/auth_move.go @@ -4,123 +4,289 @@ package command import ( - "fmt" "strings" - "time" + "testing" - "github.com/mitchellh/cli" - "github.com/posener/complete" + "github.com/go-test/deep" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" ) -var ( - _ cli.Command = (*AuthMoveCommand)(nil) - _ cli.CommandAutocomplete = (*AuthMoveCommand)(nil) -) +func testAuthTuneCommand(tb testing.TB) (*cli.MockUi, *AuthTuneCommand) { + tb.Helper() -type AuthMoveCommand struct { - *BaseCommand + ui := cli.NewMockUi() + return ui, &AuthTuneCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } } -func (c *AuthMoveCommand) Synopsis() string { - return "Move an auth method to a new path" -} +func TestAuthTuneCommand_Run(t *testing.T) { + t.Parallel() -func (c *AuthMoveCommand) Help() string { - helpText := ` -Usage: vault auth move [options] SOURCE DESTINATION + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } - Moves an existing auth method to a new path. Any leases from the old - auth method are revoked, but all configuration associated with the method - is preserved. It initiates the migration and intermittently polls its status, - exiting if a final state is reached. + t.Run("validations", func(t *testing.T) { + t.Parallel() - This command works within or across namespaces, both source and destination paths - can be prefixed with a namespace heirarchy relative to the current namespace. + for _, tc := range cases { + tc := tc - WARNING! Moving an auth method will revoke any leases from the - old method. + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - Move the auth method at approle/ to generic/: + client, closer := testVaultServer(t) + defer closer() - $ vault auth move approle/ generic/ + ui, cmd := testAuthTuneCommand(t) + cmd.client = client - Move the auth method at ns1/approle/ across namespaces to ns2/generic/, - where ns1 and ns2 are child namespaces of the current namespace: + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } - $ vault auth move ns1/approle/ ns2/generic/ + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) -` + c.Flags().Help() + t.Run("integration", func(t *testing.T) { + t.Run("flags_all", func(t *testing.T) { + t.Parallel() + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) - return strings.TrimSpace(helpText) -} + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() -func (c *AuthMoveCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP) -} + ui, cmd := testAuthTuneCommand(t) + cmd.client = client -func (c *AuthMoveCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultMounts() -} + // Mount + if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ + Type: "userpass", + }); err != nil { + t.Fatal(err) + } -func (c *AuthMoveCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + mountInfo, ok := auths["my-auth/"] + if !ok { + t.Fatalf("expected mount to exist: %#v", auths) + } -func (c *AuthMoveCommand) Run(args []string) int { - f := c.Flags() + if exp := ""; mountInfo.PluginVersion != exp { + t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) + } - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "userpass", api.PluginTypeCredential) - args = f.Args() - switch { - case len(args) < 2: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", len(args))) - return 1 - case len(args) > 2: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", len(args))) - return 1 - } + code := cmd.Run([]string{ + "-description", "new description", + "-default-lease-ttl", "30m", + "-max-lease-ttl", "1h", + "-audit-non-hmac-request-keys", "foo,bar", + "-audit-non-hmac-response-keys", "foo,bar", + "-passthrough-request-headers", "authorization", + "-passthrough-request-headers", "www-authentication", + "-allowed-response-headers", "authorization,www-authentication", + "-listing-visibility", "unauth", + "-plugin-version", version, + "my-auth/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } - // Grab the source and destination - source := ensureTrailingSlash(args[0]) - destination := ensureTrailingSlash(args[1]) + expected := "Success! Tuned the auth method at: my-auth/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } + auths, err = client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } - remountResp, err := client.Sys().StartRemount(source, destination) - if err != nil { - c.UI.Error(fmt.Sprintf("Error moving auth method %s to %s: %s", source, destination, err)) - return 2 - } + mountInfo, ok = auths["my-auth/"] + if !ok { + t.Fatalf("expected auth to exist") + } + if exp := "new description"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + if exp := "userpass"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := version; mountInfo.PluginVersion != exp { + t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) + } + if exp := 1800; mountInfo.Config.DefaultLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.DefaultLeaseTTL, exp) + } + if exp := 3600; mountInfo.Config.MaxLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.MaxLeaseTTL, exp) + } + if diff := deep.Equal([]string{"authorization", "www-authentication"}, mountInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in PassthroughRequestHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"authorization,www-authentication"}, mountInfo.Config.AllowedResponseHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + } + }) - c.UI.Output(fmt.Sprintf("Started moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + t.Run("flags_description", func(t *testing.T) { + t.Parallel() + t.Run("not_provided", func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() - // Poll the status endpoint with the returned migration ID - // Exit if a terminal status is reached, else wait and retry - for { - remountStatusResp, err := client.Sys().RemountStatus(remountResp.MigrationID) - if err != nil { - c.UI.Error(fmt.Sprintf("Error checking migration status of auth method %s to %s: %s", source, destination, err)) - return 2 - } - if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusSuccess { - c.UI.Output(fmt.Sprintf("Success! Finished moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) - return 0 + ui, cmd := testAuthTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ + Type: "userpass", + Description: "initial description", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-default-lease-ttl", "30m", + "my-auth/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the auth method at: my-auth/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := auths["my-auth/"] + if !ok { + t.Fatalf("expected auth to exist") + } + if exp := "initial description"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + }) + + t.Run("provided_empty", func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testAuthTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ + Type: "userpass", + Description: "initial description", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-description", "", + "my-auth/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the auth method at: my-auth/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := auths["my-auth/"] + if !ok { + t.Fatalf("expected auth to exist") + } + if exp := ""; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + }) + }) + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testAuthTuneCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "userpass/", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) } - if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusFailure { - c.UI.Error(fmt.Sprintf("Failure! Error encountered moving auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) - return 0 + + expected := "Error tuning auth method userpass/: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) } - c.UI.Output(fmt.Sprintf("Waiting for terminal status in migration of auth method %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) - time.Sleep(10 * time.Second) - } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() - return 0 + _, cmd := testAuthTuneCommand(t) + assertNoTabs(t, cmd) + }) } diff --git a/command/auth_move_test.go b/command/auth_move_test.go index 095c3ae9daab..2124e64b4958 100644 --- a/command/auth_move_test.go +++ b/command/auth_move_test.go @@ -1,149 +1,341 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +--- +layout: api +page_title: Azure - Secrets Engines - HTTP API +description: This is the API documentation for the Vault Azure secrets engine. +--- -package command +# Azure secrets engine (API) -import ( - "strings" - "testing" +This is the API documentation for the Vault Azure +secrets engine. For general information about the usage and operation of +the Azure secrets engine, please see the main [Azure secrets documentation][docs]. - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" -) +This documentation assumes the Azure secrets engine is enabled at the `/azure` path +in Vault. Since it is possible to mount secrets engines at any path, please +update your API calls accordingly. -func testAuthMoveCommand(tb testing.TB) (*cli.MockUi, *AuthMoveCommand) { - tb.Helper() +## Configure access - ui := cli.NewMockUi() - return ui, &AuthMoveCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } +Configures the credentials required for the plugin to perform API calls +to Azure. These credentials will be used to query roles and create/delete +service principals. Environment variables will override any parameters set in the config. + +| Method | Path | +| :----- | :-------------- | +| `POST` | `/azure/config` | + +- `subscription_id` (`string: `) - The subscription id for the Azure Active Directory. + This value can also be provided with the AZURE_SUBSCRIPTION_ID environment variable. +- `tenant_id` (`string: `) - The tenant id for the Azure Active Directory. + This value can also be provided with the AZURE_TENANT_ID environment variable. +- `client_id` (`string:""`) - The OAuth2 client id to connect to Azure. This value can also be provided + with the AZURE_CLIENT_ID environment variable. See [authentication](/vault/docs/secrets/azure#authentication) for more details. +- `client_secret` (`string:""`) - The OAuth2 client secret to connect to Azure. This value can also be + provided with the AZURE_CLIENT_SECRET environment variable. See [authentication](/vault/docs/secrets/azure#authentication) for more details. +- `environment` (`string:""`) - The Azure environment. This value can also be provided with the AZURE_ENVIRONMENT + environment variable. If not specified, Vault will use Azure Public Cloud. +- `password_policy` `(string: "")` - Specifies a [password policy](/vault/docs/concepts/password-policies) to + use when creating dynamic credentials. Defaults to generating an alphanumeric password if not set. +- `root_password_ttl` `(string: 182d)` - Specifies how long the root password is valid for in Azure when + rotate-root generates a new client secret. Uses [duration format strings](/vault/docs/concepts/duration-format). + +### Sample payload + +```json +{ + "subscription_id": "94ca80...", + "tenant_id": "d0ac7e...", + "client_id": "e607c4...", + "client_secret": "9a6346...", + "environment": "AzureGermanCloud", + "password_policy": "azure_policy", + "root_password_ttl": "48d" +} +``` + +### Sample request + + + + +```shell-session +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://127.0.0.1:8200/v1/azure/config +``` + + + + +```shell-session +$ vault write azure/config \ + subscription_id="94ca80..." \ + tenant_id="d0ac7e...", + client_id="e607c4...", + client_secret="9a6346...", + environment="AzureGermanCloud", + password_policy="azure_policy" +``` + + + + +## Read config + +Return the stored configuration, omitting `client_secret`. + +| Method | Path | +| :----- | :-------------- | +| `GET` | `/azure/config` | + +### Sample request + + + + +```shell-session +$ curl \ + --header "X-Vault-Token: ..." \ + --request GET \ + https://127.0.0.1:8200/v1/azure/config +``` + + + + +```shell-session +$ vault read azure/config +``` + + + + +### Sample response + +```json +{ + "data": { + "subscription_id": "94ca80...", + "tenant_id": "d0ac7e...", + "client_id": "e607c4...", + "environment": "AzureGermanCloud" + }, + ... +} +``` + +## Delete config + +Deletes the stored Azure configuration and credentials. + +| Method | Path | +| :------- | :-------------- | +| `DELETE` | `/azure/config` | + +### Sample request + + + + +```shell-session +$ curl \ + --header "X-Vault-Token: ..." \ + --request DELETE \ + https://127.0.0.1:8200/v1/azure/config +``` + + + + +```shell-session +$ vault delete azure/config +``` + + + + +## Rotate root + +This endpoint generates a new client secret for the root account defined in the config. The +value generated will only be known by Vault. + +~> Due to the eventual consistency of Microsoft Azure client secret APIs, the plugin +may briefly stop authenticating to Azure as the password propagates through their +datacenters. + +| Method | Path | +| :----- | :------------------- | +| `POST` | `/azure/rotate-root` | + +### Parameters + +There are no parameters to this operation. + +### Sample request + +```shell-session +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + http://127.0.0.1:8200/v1/azure/rotate-root +``` + +## Create/Update role + +Create or update a Vault role. Either `application_object_id` or +`azure_roles` must be provided, and these resources must exist for this +call to succeed. See the Azure secrets [roles docs][roles] for more +information about roles. + +| Method | Path | +| :----- | :------------------- | +| `POST` | `/azure/roles/:name` | + +### Parameters + +- `azure_roles` (`string: ""`) - List of Azure roles to be assigned to the generated service + principal. The array must be in JSON format, properly escaped as a string. See [roles docs][roles] + for details on role definition. +- `azure_groups` (`string: ""`) - List of Azure groups that the generated service principal will be + assigned to. The array must be in JSON format, properly escaped as a string. See [groups docs][groups] + for more details. +- `application_object_id` (`string: ""`) - Application Object ID for an existing service principal that will + be used instead of creating dynamic service principals. If present, `azure_roles` will be ignored. See + [roles docs][roles] for details on role definition. +- `persist_app` (`bool: "false"`) – If set to true, persists the created service principal and application for the lifetime of the role. + Useful for when the Service Principal needs to maintain ownership of objects it creates +- `ttl` (`string: ""`) – Specifies the default TTL for service principals generated using this role. + Accepts time suffixed strings ("1h") or an integer number of seconds. Defaults to the system/engine default TTL time. +- `max_ttl` (`string: ""`) – Specifies the maximum TTL for service principals generated using this role. Accepts time + suffixed strings ("1h") or an integer number of seconds. Defaults to the system/engine max TTL time. +- `permanently_delete` (`bool: false`) - Specifies whether to permanently delete Applications and Service Principals that are dynamically + created by Vault. If `application_object_id` is present, `permanently_delete` must be `false`. +- `sign_in_audience` (`string: ""`) - Specifies the security principal types that are allowed to sign in to the application. + Valid values are: AzureADMyOrg, AzureADMultipleOrgs, AzureADandPersonalMicrosoftAccount, PersonalMicrosoftAccount. +- `tags` (`string: ""`) - A comma-separated string of Azure tags to attach to an application. + +### Sample payload + +```json +{ + "azure_roles": "[ + { + \"role_name\": \"Contributor\", + \"scope\": \"/subscriptions//resourceGroups/Website\" + }, + { + \"role_id\": \"/subscriptions//providers/Microsoft.Authorization/roleDefinitions/\", + \"scope\": \"/subscriptions/\" + } + ]", + "ttl": 3600, + "max_ttl": "24h" + "sign_in_audience": "AzureADMyOrg" + "tags": "team:engineering","environment:development" +} +``` + +### Sample request + +```shell-session +$ curl \ + --header "X-Vault-Token: ..." \ + --request POST \ + --data @payload.json \ + https://127.0.0.1:8200/v1/azure/roles/my-role +``` + +## List roles + +Lists all of the roles that are registered with the plugin. + +| Method | Path | +| :----- | :------------- | +| `LIST` | `/azure/roles` | + +### Sample request + + + + +```shell-session +$ curl \ + --header "X-Vault-Token: ..." \ + --request LIST \ + https://127.0.0.1:8200/v1/azure/roles +``` + + + + +```shell-session +$ vault list azure/roles +``` + + + + +### Sample response + +```json +{ + "data": { + "keys": ["my-role-one", "my-role-two"] + } } +``` + +## Generate credentials + +This endpoint generates a new service principal based on the named role. + +| Method | Path | +| :----- | :------------------- | +| `GET` | `/azure/creds/:name` | + +### Parameters + +- `name` (`string: `) - Specifies the name of the role to create credentials against. + +### Sample request -func TestAuthMoveCommand_Run(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "not_enough_args", - []string{}, - "Not enough arguments", - 1, - }, - { - "too_many_args", - []string{"foo", "bar", "baz"}, - "Too many arguments", - 1, - }, - { - "non_existent", - []string{"not_real", "over_here"}, - "Error moving auth method not_real/ to over_here/", - 2, - }, - } - - t.Run("validations", func(t *testing.T) { - t.Parallel() - - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testAuthMoveCommand(t) - cmd.client = client - - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } - }) - - t.Run("integration", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testAuthMoveCommand(t) - cmd.client = client - - if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ - Type: "userpass", - }); err != nil { - t.Fatal(err) - } - - code := cmd.Run([]string{ - "auth/my-auth/", "auth/my-auth-2/", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Success! Finished moving auth method auth/my-auth/ to auth/my-auth-2/" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - - mounts, err := client.Sys().ListAuth() - if err != nil { - t.Fatal(err) - } - - if _, ok := mounts["my-auth-2/"]; !ok { - t.Errorf("expected mount at my-auth-2/: %#v", mounts) - } - }) - - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServerBad(t) - defer closer() - - ui, cmd := testAuthMoveCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "auth/my-auth/", "auth/my-auth-2/", - }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Error moving auth method auth/my-auth/ to auth/my-auth-2/:" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - }) - - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() - - _, cmd := testAuthMoveCommand(t) - assertNoTabs(t, cmd) - }) + + + +```shell-session +$ curl \ + --header "X-Vault-Token: ..." \ + http://127.0.0.1:8200/v1/azure/creds/my-role +``` + + + + +```shell-session +$ vault read azure/creds/my-role +``` + + + + +### Sample response + +```json +{ + "data": { + "client_id": "408bf248-dd4e-4be5-919a-7f6207a307ab", + "client_secret": "9PfdaDP9qcf98ggw8WSttfVreFcN4q9c4m4x", + ... + } } +``` + +## Revoking/Renewing secrets + +See docs on how to [renew](/vault/api-docs/system/leases#renew-lease) and [revoke](/vault/api-docs/system/leases#revoke-lease) leases. + +[docs]: /vault/docs/secrets/azure +[roles]: /vault/docs/secrets/azure#roles +[groups]: /vault/docs/secrets/azure#azure-groups diff --git a/command/auth_test.go b/command/auth_test.go index 4eb7d4ee3e07..d86ec901388c 100644 --- a/command/auth_test.go +++ b/command/auth_test.go @@ -1,37 +1,172 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +--- +layout: docs +page_title: Azure Key Vault - Secrets Sync Destination +description: The Azure Key Vault destination syncs secrets from Vault to Azure. +--- -package command +# Azure Key Vault -import ( - "testing" +The Azure Key Vault destination enables Vault to sync and unsync secrets of your choosing into +an external Azure account. When configured, Vault will actively maintain the state of each externally-synced +secret in realtime. This includes sending new secrets, updating existing secret values, and removing +secrets when they either get dissociated from the destination or deleted from Vault. - "github.com/mitchellh/cli" +Prerequisites: +* Ability to read or create KVv2 secrets +* Ability to create Azure AD user credentials with access to an Azure Key Vault +* Ability to create sync destinations and associations on your Vault server - "github.com/hashicorp/vault/command/token" -) +## Setup -func testAuthCommand(tb testing.TB) (*cli.MockUi, *AuthCommand) { - tb.Helper() +1. If you do not already have an Azure Key Vault instance, navigate to the Azure Portal to create a new + [Key Vault](https://learn.microsoft.com/en-us/azure/key-vault/general/quick-create-portal). - ui := cli.NewMockUi() - return ui, &AuthCommand{ - BaseCommand: &BaseCommand{ - UI: ui, +1. A service principal with a client id and client secret will be needed to configure Azure Key Vault as a + sync destination. This [guide](https://learn.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal) + will walk you through creating the service principal. - // Override to our own token helper - tokenHelper: token.NewTestingTokenHelper(), - }, - } -} +1. Once the service principal is created, the next step is to + [grant the service principal](https://learn.microsoft.com/en-us/azure/key-vault/general/rbac-guide?tabs=azure-cli) + access to Azure Key Vault. To quickly get started, we recommend using the "Key Vault Secrets Officer" built-in role, + which gives sufficient access to manage secrets. For more information, see the [Permissions](#permissions) section. + + +1. Configure a sync destination with the service principal credentials and Key Vault URI created in the previous steps. + + ```shell-session + $ vault write sys/sync/stores/azure-kv/my-azure-1 \ + key_vault_uri="$KEY_VAULT_URI" \ + client_id="$CLIENT_ID" \ + client_secret="$CLIENT_SECRET" \ + tenant_id="$TENANT_ID" + ``` + + **Output:** + + + + ```plaintext + Key Value + --- ----- + connection_details map[client_id:123 client_secret:***** key_vault_uri:***** tenant_id:123] + name my-azure-1 + type azure-kv + ``` + + + +## Usage + +1. If you do not already have a KVv2 secret to sync, mount a new KVv2 secrets engine. + + ```shell-session + $ vault secrets enable -path='my-kv' kv-v2 + ``` + + **Output:** + + + + ```plaintext + Success! Enabled the kv-v2 secrets engine at: my-kv/ + ``` + + + +1. Create secrets you wish to sync with a target Azure Key Vault. + + ```shell-session + $ vault kv put -mount='my-kv' my-secret foo='bar' + ``` -func TestAuthCommand_Run(t *testing.T) { - t.Parallel() + **Output:** - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() + - _, cmd := testAuthCommand(t) - assertNoTabs(t, cmd) - }) + ```plaintext + ==== Secret Path ==== + my-kv/data/my-secret + + ======= Metadata ======= + Key Value + --- ----- + created_time 2023-09-19T13:17:23.395109Z + custom_metadata + deletion_time n/a + destroyed false + version 1 + ``` + + + +1. Create an association between the destination and a secret to synchronize. + + ```shell-session + $ vault write sys/sync/destinations/azure-kv/my-azure-1/associations/set \ + mount='my-kv' \ + secret_name='my-secret' + ``` + + **Output:** + + + + ```plaintext + Key Value + --- ----- + associated_secrets map[kv_7532a8b4/my-secret:map[accessor:kv_7532a8b4 secret_name:my-secret sync_status:SYNCED updated_at:2023-09-21T13:53:24.839885-07:00]] + store_name my-azure-1 + store_type azure-kv + ``` + + + +1. Navigate to [Azure Key Vault](https://portal.azure.com/#view/HubsExtension/BrowseResource/resourceType/Microsoft.KeyVault%2Fvaults) + in the Azure portal to confirm your secret was successfully created. + +Moving forward, any modification on the Vault secret will be propagated in near real time to its Azure Key Vault +counterpart. Creating a new secret version in Vault will create a new version in Azure Key Vault. Deleting the secret +or the association in Vault will delete the secret in your Azure Key Vault as well. + + +## Permissions + +For a more minimal set of permissions, you can create a +[custom role](https://learn.microsoft.com/en-us/azure/role-based-access-control/custom-roles#steps-to-create-a-custom-role) +using the following JSON role definition. Be sure to replace the subscription id placeholder. + +```json +{ + "properties": { + "roleName": "Key Vault Secrets Reader Writer", + "description": "Custom role for reading and updating Azure Key Vault secrets.", + "permissions": [ + { + "actions": [ + "Microsoft.KeyVault/vaults/secrets/read", + "Microsoft.KeyVault/vaults/secrets/write" + ], + "notActions": [], + "dataActions": [ + "Microsoft.KeyVault/vaults/secrets/delete", + "Microsoft.KeyVault/vaults/secrets/backup/action", + "Microsoft.KeyVault/vaults/secrets/purge/action", + "Microsoft.KeyVault/vaults/secrets/recover/action", + "Microsoft.KeyVault/vaults/secrets/restore/action", + "Microsoft.KeyVault/vaults/secrets/readMetadata/action", + "Microsoft.KeyVault/vaults/secrets/getSecret/action", + "Microsoft.KeyVault/vaults/secrets/setSecret/action" + ], + "notDataActions": [] + } + ], + "assignableScopes": [ + "/subscriptions/{subscriptionId}/" + ] + } } +``` + +## API + +Please see the [secrets sync API](/vault/api-docs/system/secrets-sync) for more details. diff --git a/command/auth_tune.go b/command/auth_tune.go index 65b3070666d9..3f203fb13bb0 100644 --- a/command/auth_tune.go +++ b/command/auth_tune.go @@ -1,310 +1,179 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package command +package ldap import ( - "flag" + "context" "fmt" - "strconv" "strings" - "time" + "sync" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" -) + "github.com/hashicorp/cap/ldap" + "github.com/hashicorp/go-secure-stdlib/strutil" -var ( - _ cli.Command = (*AuthTuneCommand)(nil) - _ cli.CommandAutocomplete = (*AuthTuneCommand)(nil) + "github.com/hashicorp/vault/sdk/framework" + "github.com/hashicorp/vault/sdk/helper/ldaputil" + "github.com/hashicorp/vault/sdk/logical" ) -type AuthTuneCommand struct { - *BaseCommand - - flagAuditNonHMACRequestKeys []string - flagAuditNonHMACResponseKeys []string - flagDefaultLeaseTTL time.Duration - flagDescription string - flagListingVisibility string - flagMaxLeaseTTL time.Duration - flagPassthroughRequestHeaders []string - flagAllowedResponseHeaders []string - flagOptions map[string]string - flagTokenType string - flagVersion int - flagPluginVersion string - flagUserLockoutThreshold uint - flagUserLockoutDuration time.Duration - flagUserLockoutCounterResetDuration time.Duration - flagUserLockoutDisable bool -} - -func (c *AuthTuneCommand) Synopsis() string { - return "Tunes an auth method configuration" -} - -func (c *AuthTuneCommand) Help() string { - helpText := ` -Usage: vault auth tune [options] PATH - - Tunes the configuration options for the auth method at the given PATH. The - argument corresponds to the PATH where the auth method is enabled, not the - TYPE! - - Tune the default lease for the github auth method: - - $ vault auth tune -default-lease-ttl=72h github/ - -` + c.Flags().Help() +const ( + operationPrefixLDAP = "ldap" + errUserBindFailed = "ldap operation failed: failed to bind as user" + defaultPasswordLength = 64 // length to use for configured root password on rotations by default +) - return strings.TrimSpace(helpText) +func Factory(ctx context.Context, conf *logical.BackendConfig) (logical.Backend, error) { + b := Backend() + if err := b.Setup(ctx, conf); err != nil { + return nil, err + } + return b, nil } -func (c *AuthTuneCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) - - f := set.NewFlagSet("Command Options") - - f.StringSliceVar(&StringSliceVar{ - Name: flagNameAuditNonHMACRequestKeys, - Target: &c.flagAuditNonHMACRequestKeys, - Usage: "Key that will not be HMAC'd by audit devices in the request data " + - "object. To specify multiple values, specify this flag multiple times.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: flagNameAuditNonHMACResponseKeys, - Target: &c.flagAuditNonHMACResponseKeys, - Usage: "Key that will not be HMAC'd by audit devices in the response data " + - "object. To specify multiple values, specify this flag multiple times.", - }) - - f.DurationVar(&DurationVar{ - Name: "default-lease-ttl", - Target: &c.flagDefaultLeaseTTL, - Default: 0, - EnvVar: "", - Completion: complete.PredictAnything, - Usage: "The default lease TTL for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured default lease TTL, " + - "or a previously configured value for the auth method.", - }) - - f.StringVar(&StringVar{ - Name: flagNameDescription, - Target: &c.flagDescription, - Usage: "Human-friendly description of the this auth method. This overrides " + - "the current stored value, if any.", - }) - - f.StringVar(&StringVar{ - Name: flagNameListingVisibility, - Target: &c.flagListingVisibility, - Usage: "Determines the visibility of the mount in the UI-specific listing " + - "endpoint.", - }) - - f.DurationVar(&DurationVar{ - Name: "max-lease-ttl", - Target: &c.flagMaxLeaseTTL, - Default: 0, - EnvVar: "", - Completion: complete.PredictAnything, - Usage: "The maximum lease TTL for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured maximum lease TTL, " + - "or a previously configured value for the auth method.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: flagNamePassthroughRequestHeaders, - Target: &c.flagPassthroughRequestHeaders, - Usage: "Request header value that will be sent to the plugin. To specify " + - "multiple values, specify this flag multiple times.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: flagNameAllowedResponseHeaders, - Target: &c.flagAllowedResponseHeaders, - Usage: "Response header value that plugins will be allowed to set. To specify " + - "multiple values, specify this flag multiple times.", - }) - - f.StringMapVar(&StringMapVar{ - Name: "options", - Target: &c.flagOptions, - Completion: complete.PredictAnything, - Usage: "Key-value pair provided as key=value for the mount options. " + - "This can be specified multiple times.", - }) - - f.StringVar(&StringVar{ - Name: flagNameTokenType, - Target: &c.flagTokenType, - Usage: "Sets a forced token type for the mount.", - }) - - f.IntVar(&IntVar{ - Name: "version", - Target: &c.flagVersion, - Default: 0, - Usage: "Select the version of the auth method to run. Not supported by all auth methods.", - }) - - f.UintVar(&UintVar{ - Name: flagNameUserLockoutThreshold, - Target: &c.flagUserLockoutThreshold, - Usage: "The threshold for user lockout for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured user lockout threshold, " + - "or a previously configured value for the auth method.", - }) - - f.DurationVar(&DurationVar{ - Name: flagNameUserLockoutDuration, - Target: &c.flagUserLockoutDuration, - Completion: complete.PredictAnything, - Usage: "The user lockout duration for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured user lockout duration, " + - "or a previously configured value for the auth method.", - }) - - f.DurationVar(&DurationVar{ - Name: flagNameUserLockoutCounterResetDuration, - Target: &c.flagUserLockoutCounterResetDuration, - Completion: complete.PredictAnything, - Usage: "The user lockout counter reset duration for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured user lockout counter reset duration, " + - "or a previously configured value for the auth method.", - }) - - f.BoolVar(&BoolVar{ - Name: flagNameUserLockoutDisable, - Target: &c.flagUserLockoutDisable, - Default: false, - Usage: "Disable user lockout for this auth method. If unspecified, this " + - "defaults to the Vault server's globally configured user lockout disable, " + - "or a previously configured value for the auth method.", - }) - - f.StringVar(&StringVar{ - Name: flagNamePluginVersion, - Target: &c.flagPluginVersion, - Default: "", - Usage: "Select the semantic version of the plugin to run. The new version must be registered in " + - "the plugin catalog, and will not start running until the plugin is reloaded.", - }) +func Backend() *backend { + var b backend + b.Backend = &framework.Backend{ + Help: backendHelp, + + PathsSpecial: &logical.Paths{ + Unauthenticated: []string{ + "login/*", + }, + + SealWrapStorage: []string{ + "config", + }, + }, + + Paths: []*framework.Path{ + pathConfig(&b), + pathGroups(&b), + pathGroupsList(&b), + pathUsers(&b), + pathUsersList(&b), + pathLogin(&b), + pathConfigRotateRoot(&b), + }, + + AuthRenew: b.pathLoginRenew, + BackendType: logical.TypeCredential, + } - return set + return &b } -func (c *AuthTuneCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultAuths() -} +type backend struct { + *framework.Backend -func (c *AuthTuneCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() + mu sync.RWMutex } -func (c *AuthTuneCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() +func (b *backend) Login(ctx context.Context, req *logical.Request, username string, password string, usernameAsAlias bool) (string, []string, *logical.Response, []string, error) { + cfg, err := b.Config(ctx, req) if err != nil { - c.UI.Error(err.Error()) - return 2 + return "", nil, nil, nil, err } - - if c.flagVersion > 0 { - if c.flagOptions == nil { - c.flagOptions = make(map[string]string) - } - c.flagOptions["version"] = strconv.Itoa(c.flagVersion) + if cfg == nil { + return "", nil, logical.ErrorResponse("ldap backend not configured"), nil, nil } - mountConfigInput := api.MountConfigInput{ - DefaultLeaseTTL: ttlToAPI(c.flagDefaultLeaseTTL), - MaxLeaseTTL: ttlToAPI(c.flagMaxLeaseTTL), - Options: c.flagOptions, + if cfg.DenyNullBind && len(password) == 0 { + return "", nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil, nil } - // Set these values only if they are provided in the CLI - f.Visit(func(fl *flag.Flag) { - if fl.Name == flagNameAuditNonHMACRequestKeys { - mountConfigInput.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys - } + ldapClient, err := ldap.NewClient(ctx, ldaputil.ConvertConfig(cfg.ConfigEntry)) + if err != nil { + return "", nil, logical.ErrorResponse(err.Error()), nil, nil + } - if fl.Name == flagNameAuditNonHMACResponseKeys { - mountConfigInput.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys - } + // Clean connection + defer ldapClient.Close(ctx) - if fl.Name == flagNameDescription { - mountConfigInput.Description = &c.flagDescription + c, err := ldapClient.Authenticate(ctx, username, password, ldap.WithGroups(), ldap.WithUserAttributes()) + if err != nil { + if strings.Contains(err.Error(), "discovery of user bind DN failed") || + strings.Contains(err.Error(), "unable to bind user") { + return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials } - if fl.Name == flagNameListingVisibility { - mountConfigInput.ListingVisibility = c.flagListingVisibility - } + return "", nil, logical.ErrorResponse(err.Error()), nil, nil + } - if fl.Name == flagNamePassthroughRequestHeaders { - mountConfigInput.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders - } + ldapGroups := c.Groups + ldapResponse := &logical.Response{ + Data: map[string]interface{}{}, + } + if len(ldapGroups) == 0 { + errString := fmt.Sprintf( + "no LDAP groups found in groupDN %q; only policies from locally-defined groups available", + cfg.GroupDN) + ldapResponse.AddWarning(errString) + } - if fl.Name == flagNameAllowedResponseHeaders { - mountConfigInput.AllowedResponseHeaders = c.flagAllowedResponseHeaders - } + for _, warning := range c.Warnings { + ldapResponse.AddWarning(string(warning)) + } - if fl.Name == flagNameTokenType { - mountConfigInput.TokenType = c.flagTokenType - } - switch fl.Name { - case flagNameUserLockoutThreshold, flagNameUserLockoutDuration, flagNameUserLockoutCounterResetDuration, flagNameUserLockoutDisable: - if mountConfigInput.UserLockoutConfig == nil { - mountConfigInput.UserLockoutConfig = &api.UserLockoutConfigInput{} - } - } - if fl.Name == flagNameUserLockoutThreshold { - mountConfigInput.UserLockoutConfig.LockoutThreshold = strconv.FormatUint(uint64(c.flagUserLockoutThreshold), 10) - } - if fl.Name == flagNameUserLockoutDuration { - mountConfigInput.UserLockoutConfig.LockoutDuration = ttlToAPI(c.flagUserLockoutDuration) - } - if fl.Name == flagNameUserLockoutCounterResetDuration { - mountConfigInput.UserLockoutConfig.LockoutCounterResetDuration = ttlToAPI(c.flagUserLockoutCounterResetDuration) + var allGroups []string + canonicalUsername := username + cs := *cfg.CaseSensitiveNames + if !cs { + canonicalUsername = strings.ToLower(username) + } + // Import the custom added groups from ldap backend + user, err := b.User(ctx, req.Storage, canonicalUsername) + if err == nil && user != nil && user.Groups != nil { + if b.Logger().IsDebug() { + b.Logger().Debug("adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups) } - if fl.Name == flagNameUserLockoutDisable { - mountConfigInput.UserLockoutConfig.DisableLockout = &c.flagUserLockoutDisable + allGroups = append(allGroups, user.Groups...) + } + // Merge local and LDAP groups + allGroups = append(allGroups, ldapGroups...) + + canonicalGroups := allGroups + // If not case sensitive, lowercase all + if !cs { + canonicalGroups = make([]string, len(allGroups)) + for i, v := range allGroups { + canonicalGroups[i] = strings.ToLower(v) } + } - if fl.Name == flagNamePluginVersion { - mountConfigInput.PluginVersion = c.flagPluginVersion + // Retrieve policies + var policies []string + for _, groupName := range canonicalGroups { + group, err := b.Group(ctx, req.Storage, groupName) + if err == nil && group != nil { + policies = append(policies, group.Policies...) } - }) + } + if user != nil && user.Policies != nil { + policies = append(policies, user.Policies...) + } + // Policies from each group may overlap + policies = strutil.RemoveDuplicates(policies, true) - // Append /auth (since that's where auths live) and a trailing slash to - // indicate it's a path in output - mountPath := ensureTrailingSlash(sanitizePath(args[0])) + if usernameAsAlias { + return username, policies, ldapResponse, allGroups, nil + } - if err := client.Sys().TuneMount("/auth/"+mountPath, mountConfigInput); err != nil { - c.UI.Error(fmt.Sprintf("Error tuning auth method %s: %s", mountPath, err)) - return 2 + userAttrValues := c.UserAttributes[cfg.UserAttr] + if len(userAttrValues) == 0 { + return "", nil, logical.ErrorResponse("missing entity alias attribute value"), nil, nil } + entityAliasAttribute := userAttrValues[0] - c.UI.Output(fmt.Sprintf("Success! Tuned the auth method at: %s", mountPath)) - return 0 + return entityAliasAttribute, policies, ldapResponse, allGroups, nil } + +const backendHelp = ` +The "ldap" credential provider allows authentication querying +a LDAP server, checking username and password, and associating groups +to set of policies. + +Configuration of the server is done through the "config" and "groups" +endpoints by a user with root access. Authentication is then done +by supplying the two fields for "login". +` diff --git a/command/auth_tune_test.go b/command/auth_tune_test.go index ea0449b9d5c2..d84cd64cadca 100644 --- a/command/auth_tune_test.go +++ b/command/auth_tune_test.go @@ -1,292 +1,7149 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package command +package pki import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/pem" + "fmt" + "math" + "math/big" + mathrand "math/rand" + "net" + "net/url" + "os" + "reflect" + "sort" + "strconv" "strings" + "sync" "testing" + "time" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + "golang.org/x/exp/maps" + + "github.com/hashicorp/vault/helper/testhelpers" + + "github.com/hashicorp/vault/sdk/helper/testhelpers/schema" + + "github.com/stretchr/testify/require" + + "github.com/armon/go-metrics" + "github.com/fatih/structs" "github.com/go-test/deep" + "github.com/hashicorp/go-secure-stdlib/strutil" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/testhelpers/corehelpers" - "github.com/mitchellh/cli" + auth "github.com/hashicorp/vault/api/auth/userpass" + "github.com/hashicorp/vault/builtin/credential/userpass" + logicaltest "github.com/hashicorp/vault/helper/testhelpers/logical" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/helper/certutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/mitchellh/mapstructure" + "golang.org/x/net/idna" ) -func testAuthTuneCommand(tb testing.TB) (*cli.MockUi, *AuthTuneCommand) { - tb.Helper() +var stepCount = 0 - ui := cli.NewMockUi() - return ui, &AuthTuneCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, +// From builtin/credential/cert/test-fixtures/root/rootcacert.pem +const ( + rootCACertPEM = `-----BEGIN CERTIFICATE----- +MIIDPDCCAiSgAwIBAgIUb5id+GcaMeMnYBv3MvdTGWigyJ0wDQYJKoZIhvcNAQEL +BQAwFjEUMBIGA1UEAxMLZXhhbXBsZS5jb20wHhcNMTYwMjI5MDIyNzI5WhcNMjYw +MjI2MDIyNzU5WjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAOxTMvhTuIRc2YhxZpmPwegP86cgnqfT1mXxi1A7 +Q7qax24Nqbf00I3oDMQtAJlj2RB3hvRSCb0/lkF7i1Bub+TGxuM7NtZqp2F8FgG0 +z2md+W6adwW26rlxbQKjmRvMn66G9YPTkoJmPmxt2Tccb9+apmwW7lslL5j8H48x +AHJTMb+PMP9kbOHV5Abr3PT4jXUPUr/mWBvBiKiHG0Xd/HEmlyOEPeAThxK+I5tb +6m+eB+7cL9BsvQpy135+2bRAxUphvFi5NhryJ2vlAvoJ8UqigsNK3E28ut60FAoH +SWRfFUFFYtfPgTDS1yOKU/z/XMU2giQv2HrleWt0mp4jqBUCAwEAAaOBgTB/MA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSdxLNP/ocx +7HK6JT3/sSAe76iTmzAfBgNVHSMEGDAWgBSdxLNP/ocx7HK6JT3/sSAe76iTmzAc +BgNVHREEFTATggtleGFtcGxlLmNvbYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA +wHThDRsXJunKbAapxmQ6bDxSvTvkLA6m97TXlsFgL+Q3Jrg9HoJCNowJ0pUTwhP2 +U946dCnSCkZck0fqkwVi4vJ5EQnkvyEbfN4W5qVsQKOFaFVzep6Qid4rZT6owWPa +cNNzNcXAee3/j6hgr6OQ/i3J6fYR4YouYxYkjojYyg+CMdn6q8BoV0BTsHdnw1/N +ScbnBHQIvIZMBDAmQueQZolgJcdOuBLYHe/kRy167z8nGg+PUFKIYOL8NaOU1+CJ +t2YaEibVq5MRqCbRgnd9a2vG0jr5a3Mn4CUUYv+5qIjP3hUusYenW1/EWtn1s/gk +zehNe5dFTjFpylg1o6b8Ow== +-----END CERTIFICATE-----` + rootCAKeyPEM = `-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA7FMy+FO4hFzZiHFmmY/B6A/zpyCep9PWZfGLUDtDuprHbg2p +t/TQjegMxC0AmWPZEHeG9FIJvT+WQXuLUG5v5MbG4zs21mqnYXwWAbTPaZ35bpp3 +BbbquXFtAqOZG8yfrob1g9OSgmY+bG3ZNxxv35qmbBbuWyUvmPwfjzEAclMxv48w +/2Rs4dXkBuvc9PiNdQ9Sv+ZYG8GIqIcbRd38cSaXI4Q94BOHEr4jm1vqb54H7twv +0Gy9CnLXfn7ZtEDFSmG8WLk2GvIna+UC+gnxSqKCw0rcTby63rQUCgdJZF8VQUVi +18+BMNLXI4pT/P9cxTaCJC/YeuV5a3SaniOoFQIDAQABAoIBAQCoGZJC84JnnIgb +ttZNWuWKBXbCJcDVDikOQJ9hBZbqsFg1X0CfGmQS3MHf9Ubc1Ro8zVjQh15oIEfn +8lIpdzTeXcpxLdiW8ix3ekVJF20F6pnXY8ZP6UnTeOwamXY6QPZAtb0D9UXcvY+f +nw+IVRD6082XS0Rmzu+peYWVXDy+FDN+HJRANBcdJZz8gOmNBIe0qDWx1b85d/s8 +2Kk1Wwdss1IwAGeSddTSwzBNaaHdItZaMZOqPW1gRyBfVSkcUQIE6zn2RKw2b70t +grkIvyRcTdfmiKbqkkJ+eR+ITOUt0cBZSH4cDjlQA+r7hulvoBpQBRj068Toxkcc +bTagHaPBAoGBAPWPGVkHqhTbJ/DjmqDIStxby2M1fhhHt4xUGHinhUYjQjGOtDQ9 +0mfaB7HObudRiSLydRAVGAHGyNJdQcTeFxeQbovwGiYKfZSA1IGpea7dTxPpGEdN +ksA0pzSp9MfKzX/MdLuAkEtO58aAg5YzsgX9hDNxo4MhH/gremZhEGZlAoGBAPZf +lqdYvAL0fjHGJ1FUEalhzGCGE9PH2iOqsxqLCXK7bDbzYSjvuiHkhYJHAOgVdiW1 +lB34UHHYAqZ1VVoFqJ05gax6DE2+r7K5VV3FUCaC0Zm3pavxchU9R/TKP82xRrBj +AFWwdgDTxUyvQEmgPR9sqorftO71Iz2tiwyTpIfxAoGBAIhEMLzHFAse0rtKkrRG +ccR27BbRyHeQ1Lp6sFnEHKEfT8xQdI/I/snCpCJ3e/PBu2g5Q9z416mktiyGs8ib +thTNgYsGYnxZtfaCx2pssanoBcn2wBJRae5fSapf5gY49HDG9MBYR7qCvvvYtSzU +4yWP2ZzyotpRt3vwJKxLkN5BAoGAORHpZvhiDNkvxj3da7Rqpu7VleJZA2y+9hYb +iOF+HcqWhaAY+I+XcTRrTMM/zYLzLEcEeXDEyao86uwxCjpXVZw1kotvAC9UqbTO +tnr3VwRkoxPsV4kFYTAh0+1pnC8dbcxxDmhi3Uww3tOVs7hfkEDuvF6XnebA9A+Y +LyCgMzECgYEA6cCU8QODOivIKWFRXucvWckgE6MYDBaAwe6qcLsd1Q/gpE2e3yQc +4RB3bcyiPROLzMLlXFxf1vSNJQdIaVfrRv+zJeGIiivLPU8+Eq4Lrb+tl1LepcOX +OzQeADTSCn5VidOfjDkIst9UXjMlrFfV9/oJEw5Eiqa6lkNPCGDhfA8= +-----END RSA PRIVATE KEY-----` +) + +func TestPKI_RequireCN(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + // Create a role which does require CN (default) + _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ + "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "max_ttl": "2h", + }) + if err != nil { + t.Fatal(err) + } + + // Issue a cert with require_cn set to true and with common name supplied. + // It should succeed. + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + "common_name": "foobar.com", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issue/example"), logical.UpdateOperation), resp, true) + if err != nil { + t.Fatal(err) + } + + // Issue a cert with require_cn set to true and with out supplying the + // common name. It should error out. + _, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) + if err == nil { + t.Fatalf("expected an error due to missing common_name") + } + + // Modify the role to make the common name optional + _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ + "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "max_ttl": "2h", + "require_cn": false, + }) + if err != nil { + t.Fatal(err) + } + + // Issue a cert with require_cn set to false and without supplying the + // common name. It should succeed. + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) + if err != nil { + t.Fatal(err) + } + + if resp.Data["certificate"] == "" { + t.Fatalf("expected a cert to be generated") + } + + // Issue a cert with require_cn set to false and with a common name. It + // should succeed. + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{}) + if err != nil { + t.Fatal(err) + } + + if resp.Data["certificate"] == "" { + t.Fatalf("expected a cert to be generated") } } -func TestAuthTuneCommand_Run(t *testing.T) { +func TestPKI_DeviceCert(t *testing.T) { t.Parallel() + b, s := CreateBackendWithStorage(t) - cases := []struct { - name string - args []string - out string - code int - }{ - { - "not_enough_args", - []string{}, - "Not enough arguments", - 1, - }, - { - "too_many_args", - []string{"foo", "bar"}, - "Too many arguments", - 1, - }, + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + "not_after": "9999-12-31T23:59:59Z", + "not_before_duration": "2h", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + var certBundle certutil.CertBundle + err = mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + t.Fatal(err) } - t.Run("validations", func(t *testing.T) { - t.Parallel() + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + t.Fatal(err) + } + cert := parsedCertBundle.Certificate + notAfter := cert.NotAfter.Format(time.RFC3339) + if notAfter != "9999-12-31T23:59:59Z" { + t.Fatalf("not after from certificate: %v is not matching with input parameter: %v", cert.NotAfter, "9999-12-31T23:59:59Z") + } + if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { + t.Fatalf("root/generate/internal did not properly set validity period (notBefore): was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) + } - for _, tc := range cases { - tc := tc + // Create a role which does require CN (default) + _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ + "allowed_domains": "foobar.com,zipzap.com,abc.com,xyz.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "not_after": "9999-12-31T23:59:59Z", + }) + if err != nil { + t.Fatal(err) + } - t.Run(tc.name, func(t *testing.T) { - t.Parallel() + // Issue a cert with require_cn set to true and with common name supplied. + // It should succeed. + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + "common_name": "foobar.com", + }) + if err != nil { + t.Fatal(err) + } + err = mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + t.Fatal(err) + } - client, closer := testVaultServer(t) - defer closer() + parsedCertBundle, err = certBundle.ToParsedCertBundle() + if err != nil { + t.Fatal(err) + } + cert = parsedCertBundle.Certificate + notAfter = cert.NotAfter.Format(time.RFC3339) + if notAfter != "9999-12-31T23:59:59Z" { + t.Fatal(fmt.Errorf("not after from certificate is not matching with input parameter")) + } +} - ui, cmd := testAuthTuneCommand(t) - cmd.client = client +func TestBackend_InvalidParameter(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } + _, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + "not_after": "9999-12-31T23:59:59Z", + "ttl": "25h", + }) + if err == nil { + t.Fatal(err) + } - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + "not_after": "9999-12-31T23:59:59", }) + if err == nil { + t.Fatal(err) + } +} - t.Run("integration", func(t *testing.T) { - t.Run("flags_all", func(t *testing.T) { - t.Parallel() - pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) - defer cleanup(t) +func TestBackend_CSRValues(t *testing.T) { + t.Parallel() + initTest.Do(setCerts) + b, _ := CreateBackendWithStorage(t) - client, _, closer := testVaultServerPluginDir(t, pluginDir) - defer closer() + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{}, + } - ui, cmd := testAuthTuneCommand(t) - cmd.client = client + intdata := map[string]interface{}{} + reqdata := map[string]interface{}{} + testCase.Steps = append(testCase.Steps, generateCSRSteps(t, ecCACert, ecCAKey, intdata, reqdata)...) - // Mount - if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ - Type: "userpass", - }); err != nil { - t.Fatal(err) - } + logicaltest.Test(t, testCase) +} - auths, err := client.Sys().ListAuth() - if err != nil { - t.Fatal(err) - } - mountInfo, ok := auths["my-auth/"] - if !ok { - t.Fatalf("expected mount to exist: %#v", auths) - } +func TestBackend_URLsCRUD(t *testing.T) { + t.Parallel() + initTest.Do(setCerts) + b, _ := CreateBackendWithStorage(t) - if exp := ""; mountInfo.PluginVersion != exp { - t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) - } + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{}, + } - _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "userpass", api.PluginTypeCredential) + intdata := map[string]interface{}{} + reqdata := map[string]interface{}{} + testCase.Steps = append(testCase.Steps, generateURLSteps(t, ecCACert, ecCAKey, intdata, reqdata)...) - code := cmd.Run([]string{ - "-description", "new description", - "-default-lease-ttl", "30m", - "-max-lease-ttl", "1h", - "-audit-non-hmac-request-keys", "foo,bar", - "-audit-non-hmac-response-keys", "foo,bar", - "-passthrough-request-headers", "authorization", - "-passthrough-request-headers", "www-authentication", - "-allowed-response-headers", "authorization,www-authentication", - "-listing-visibility", "unauth", - "-plugin-version", version, - "my-auth/", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + logicaltest.Test(t, testCase) +} - expected := "Success! Tuned the auth method at: my-auth/" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } +// Generates and tests steps that walk through the various possibilities +// of role flags to ensure that they are properly restricted +func TestBackend_Roles(t *testing.T) { + t.Parallel() + cases := []struct { + name string + key, cert *string + useCSR bool + }{ + {"RSA", &rsaCAKey, &rsaCACert, false}, + {"RSACSR", &rsaCAKey, &rsaCACert, true}, + {"EC", &ecCAKey, &ecCACert, false}, + {"ECCSR", &ecCAKey, &ecCACert, true}, + {"ED", &edCAKey, &edCACert, false}, + {"EDCSR", &edCAKey, &edCACert, true}, + } - auths, err = client.Sys().ListAuth() - if err != nil { - t.Fatal(err) - } + for _, tc := range cases { + tc := tc - mountInfo, ok = auths["my-auth/"] - if !ok { - t.Fatalf("expected auth to exist") - } - if exp := "new description"; mountInfo.Description != exp { - t.Errorf("expected %q to be %q", mountInfo.Description, exp) - } - if exp := "userpass"; mountInfo.Type != exp { - t.Errorf("expected %q to be %q", mountInfo.Type, exp) - } - if exp := version; mountInfo.PluginVersion != exp { - t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) - } - if exp := 1800; mountInfo.Config.DefaultLeaseTTL != exp { - t.Errorf("expected %d to be %d", mountInfo.Config.DefaultLeaseTTL, exp) - } - if exp := 3600; mountInfo.Config.MaxLeaseTTL != exp { - t.Errorf("expected %d to be %d", mountInfo.Config.MaxLeaseTTL, exp) - } - if diff := deep.Equal([]string{"authorization", "www-authentication"}, mountInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { - t.Errorf("Failed to find expected values in PassthroughRequestHeaders. Difference is: %v", diff) + t.Run(tc.name, func(t *testing.T) { + initTest.Do(setCerts) + b, _ := CreateBackendWithStorage(t) + + testCase := logicaltest.TestCase{ + LogicalBackend: b, + Steps: []logicaltest.TestStep{ + { + Operation: logical.UpdateOperation, + Path: "config/ca", + Data: map[string]interface{}{ + "pem_bundle": *tc.key + "\n" + *tc.cert, + }, + }, + }, } - if diff := deep.Equal([]string{"authorization,www-authentication"}, mountInfo.Config.AllowedResponseHeaders); len(diff) > 0 { - t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) + + testCase.Steps = append(testCase.Steps, generateRoleSteps(t, tc.useCSR)...) + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + for i, v := range testCase.Steps { + data := map[string]interface{}{} + var keys []string + for k := range v.Data { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + interf := v.Data[k] + switch v := interf.(type) { + case bool: + if !v { + continue + } + case int: + if v == 0 { + continue + } + case []string: + if len(v) == 0 { + continue + } + case string: + if v == "" { + continue + } + lines := strings.Split(v, "\n") + if len(lines) > 1 { + data[k] = lines[0] + " ... (truncated)" + continue + } + } + data[k] = interf + + } + t.Logf("Step %d:\n%s %s err=%v %+v\n\n", i+1, v.Operation, v.Path, v.ErrorOk, data) + } } - if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { - t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) + + logicaltest.Test(t, testCase) + }) + } +} + +// Performs some validity checking on the returned bundles +func checkCertsAndPrivateKey(keyType string, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration, certBundle *certutil.CertBundle) (*certutil.ParsedCertBundle, error) { + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return nil, fmt.Errorf("error parsing cert bundle: %s", err) + } + + if key != nil { + switch keyType { + case "rsa": + parsedCertBundle.PrivateKeyType = certutil.RSAPrivateKey + parsedCertBundle.PrivateKey = key + parsedCertBundle.PrivateKeyBytes = x509.MarshalPKCS1PrivateKey(key.(*rsa.PrivateKey)) + case "ec": + parsedCertBundle.PrivateKeyType = certutil.ECPrivateKey + parsedCertBundle.PrivateKey = key + parsedCertBundle.PrivateKeyBytes, err = x509.MarshalECPrivateKey(key.(*ecdsa.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("error parsing EC key: %s", err) } - if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { - t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + case "ed25519": + parsedCertBundle.PrivateKeyType = certutil.Ed25519PrivateKey + parsedCertBundle.PrivateKey = key + parsedCertBundle.PrivateKeyBytes, err = x509.MarshalPKCS8PrivateKey(key.(ed25519.PrivateKey)) + if err != nil { + return nil, fmt.Errorf("error parsing Ed25519 key: %s", err) } - }) + } + } - t.Run("flags_description", func(t *testing.T) { - t.Parallel() - t.Run("not_provided", func(t *testing.T) { - client, closer := testVaultServer(t) - defer closer() + switch { + case parsedCertBundle.Certificate == nil: + return nil, fmt.Errorf("did not find a certificate in the cert bundle") + case len(parsedCertBundle.CAChain) == 0 || parsedCertBundle.CAChain[0].Certificate == nil: + return nil, fmt.Errorf("did not find a CA in the cert bundle") + case parsedCertBundle.PrivateKey == nil: + return nil, fmt.Errorf("did not find a private key in the cert bundle") + case parsedCertBundle.PrivateKeyType == certutil.UnknownPrivateKey: + return nil, fmt.Errorf("could not figure out type of private key") + } - ui, cmd := testAuthTuneCommand(t) - cmd.client = client + switch { + case parsedCertBundle.PrivateKeyType == certutil.Ed25519PrivateKey && keyType != "ed25519": + fallthrough + case parsedCertBundle.PrivateKeyType == certutil.RSAPrivateKey && keyType != "rsa": + fallthrough + case parsedCertBundle.PrivateKeyType == certutil.ECPrivateKey && keyType != "ec": + return nil, fmt.Errorf("given key type does not match type found in bundle") + } - // Mount - if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ - Type: "userpass", - Description: "initial description", - }); err != nil { - t.Fatal(err) - } + cert := parsedCertBundle.Certificate - code := cmd.Run([]string{ - "-default-lease-ttl", "30m", - "my-auth/", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } + if usage != cert.KeyUsage { + return nil, fmt.Errorf("expected usage of %#v, got %#v; ext usage is %#v", usage, cert.KeyUsage, cert.ExtKeyUsage) + } + + // There should only be one ext usage type, because only one is requested + // in the tests + if len(cert.ExtKeyUsage) != 1 { + return nil, fmt.Errorf("got wrong size key usage in generated cert; expected 1, values are %#v", cert.ExtKeyUsage) + } + switch extUsage { + case x509.ExtKeyUsageEmailProtection: + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageEmailProtection { + return nil, fmt.Errorf("bad extended key usage") + } + case x509.ExtKeyUsageServerAuth: + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageServerAuth { + return nil, fmt.Errorf("bad extended key usage") + } + case x509.ExtKeyUsageClientAuth: + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageClientAuth { + return nil, fmt.Errorf("bad extended key usage") + } + case x509.ExtKeyUsageCodeSigning: + if cert.ExtKeyUsage[0] != x509.ExtKeyUsageCodeSigning { + return nil, fmt.Errorf("bad extended key usage") + } + } + + // TODO: We incremented 20->25 due to CircleCI execution + // being slow and pausing this test. We might consider recording the + // actual issuance time of the cert and calculating the expected + // validity period +/- fuzz, but that'd require recording and passing + // through more information. + if math.Abs(float64(time.Now().Add(validity).Unix()-cert.NotAfter.Unix())) > 25 { + return nil, fmt.Errorf("certificate validity end: %s; expected within 25 seconds of %s", cert.NotAfter.Format(time.RFC3339), time.Now().Add(validity).Format(time.RFC3339)) + } + + return parsedCertBundle, nil +} + +func generateURLSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep { + expected := certutil.URLEntries{ + IssuingCertificates: []string{ + "http://example.com/ca1", + "http://example.com/ca2", + }, + CRLDistributionPoints: []string{ + "http://example.com/crl1", + "http://example.com/crl2", + }, + OCSPServers: []string{ + "http://example.com/ocsp1", + "http://example.com/ocsp2", + }, + } + csrTemplate := x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "my@example.com", + }, + } + + priv1024, _ := rsa.GenerateKey(rand.Reader, 1024) + csr1024, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv1024) + csrPem1024 := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr1024, + }))) + + priv2048, _ := rsa.GenerateKey(rand.Reader, 2048) + csr2048, _ := x509.CreateCertificateRequest(rand.Reader, &csrTemplate, priv2048) + csrPem2048 := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr2048, + }))) - expected := "Success! Tuned the auth method at: my-auth/" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) + ret := []logicaltest.TestStep{ + { + Operation: logical.UpdateOperation, + Path: "root/generate/exported", + Data: map[string]interface{}{ + "common_name": "Root Cert", + "ttl": "180h", + }, + Check: func(resp *logical.Response) error { + if resp.Secret != nil && resp.Secret.LeaseID != "" { + return fmt.Errorf("root returned with a lease") } + return nil + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "config/urls", + Data: map[string]interface{}{ + "issuing_certificates": strings.Join(expected.IssuingCertificates, ","), + "crl_distribution_points": strings.Join(expected.CRLDistributionPoints, ","), + "ocsp_servers": strings.Join(expected.OCSPServers, ","), + }, + }, - auths, err := client.Sys().ListAuth() + { + Operation: logical.ReadOperation, + Path: "config/urls", + Check: func(resp *logical.Response) error { + if resp.Data == nil { + return fmt.Errorf("no data returned") + } + var entries certutil.URLEntries + err := mapstructure.Decode(resp.Data, &entries) if err != nil { - t.Fatal(err) + return err + } + if !reflect.DeepEqual(entries, expected) { + return fmt.Errorf("expected urls\n%#v\ndoes not match provided\n%#v\n", expected, entries) + } + + return nil + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "common_name": "intermediate.cert.com", + "csr": csrPem1024, + "format": "der", + }, + ErrorOk: true, + Check: func(resp *logical.Response) error { + if !resp.IsError() { + return fmt.Errorf("expected an error response but did not get one") } + if !strings.Contains(resp.Data["error"].(string), "2048") { + return fmt.Errorf("received an error but not about a 1024-bit key, error was: %s", resp.Data["error"].(string)) + } + + return nil + }, + }, - mountInfo, ok := auths["my-auth/"] - if !ok { - t.Fatalf("expected auth to exist") + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "common_name": "intermediate.cert.com", + "csr": csrPem2048, + "signature_bits": 512, + "format": "der", + "not_before_duration": "2h", + // Let's Encrypt -- R3 SKID + "skid": "14:2E:B3:17:B7:58:56:CB:AE:50:09:40:E6:1F:AF:9D:8B:14:C2:C6", + }, + Check: func(resp *logical.Response) error { + certString := resp.Data["certificate"].(string) + if certString == "" { + return fmt.Errorf("no certificate returned") } - if exp := "initial description"; mountInfo.Description != exp { - t.Errorf("expected %q to be %q", mountInfo.Description, exp) + if resp.Secret != nil && resp.Secret.LeaseID != "" { + return fmt.Errorf("signed intermediate returned with a lease") } - }) + certBytes, _ := base64.StdEncoding.DecodeString(certString) + certs, err := x509.ParseCertificates(certBytes) + if err != nil { + return fmt.Errorf("returned cert cannot be parsed: %w", err) + } + if len(certs) != 1 { + return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) + } + cert := certs[0] - t.Run("provided_empty", func(t *testing.T) { - client, closer := testVaultServer(t) - defer closer() + skid, _ := hex.DecodeString("142EB317B75856CBAE500940E61FAF9D8B14C2C6") - ui, cmd := testAuthTuneCommand(t) - cmd.client = client + switch { + case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL): + return fmt.Errorf("IssuingCertificateURL:\nexpected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL) + case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints): + return fmt.Errorf("CRLDistributionPoints:\nexpected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints) + case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer): + return fmt.Errorf("OCSPServer:\nexpected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer) + case !reflect.DeepEqual([]string{"intermediate.cert.com"}, cert.DNSNames): + return fmt.Errorf("DNSNames\nexpected\n%#v\ngot\n%#v\n", []string{"intermediate.cert.com"}, cert.DNSNames) + case !reflect.DeepEqual(x509.SHA512WithRSA, cert.SignatureAlgorithm): + return fmt.Errorf("Signature Algorithm:\nexpected\n%#v\ngot\n%#v\n", x509.SHA512WithRSA, cert.SignatureAlgorithm) + case !reflect.DeepEqual(skid, cert.SubjectKeyId): + return fmt.Errorf("SKID:\nexpected\n%#v\ngot\n%#v\n", skid, cert.SubjectKeyId) + } - // Mount - if err := client.Sys().EnableAuthWithOptions("my-auth", &api.EnableAuthOptions{ - Type: "userpass", - Description: "initial description", - }); err != nil { - t.Fatal(err) + if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { + t.Fatalf("root/sign-intermediate did not properly set validity period (notBefore): was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) } - code := cmd.Run([]string{ - "-description", "", - "my-auth/", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) + return nil + }, + }, + + // Same as above but exclude adding to sans + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "common_name": "intermediate.cert.com", + "csr": csrPem2048, + "format": "der", + "exclude_cn_from_sans": true, + }, + Check: func(resp *logical.Response) error { + certString := resp.Data["certificate"].(string) + if certString == "" { + return fmt.Errorf("no certificate returned") + } + if resp.Secret != nil && resp.Secret.LeaseID != "" { + return fmt.Errorf("signed intermediate returned with a lease") } + certBytes, _ := base64.StdEncoding.DecodeString(certString) + certs, err := x509.ParseCertificates(certBytes) + if err != nil { + return fmt.Errorf("returned cert cannot be parsed: %w", err) + } + if len(certs) != 1 { + return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) + } + cert := certs[0] - expected := "Success! Tuned the auth method at: my-auth/" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) + switch { + case !reflect.DeepEqual(expected.IssuingCertificates, cert.IssuingCertificateURL): + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.IssuingCertificates, cert.IssuingCertificateURL) + case !reflect.DeepEqual(expected.CRLDistributionPoints, cert.CRLDistributionPoints): + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.CRLDistributionPoints, cert.CRLDistributionPoints) + case !reflect.DeepEqual(expected.OCSPServers, cert.OCSPServer): + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", expected.OCSPServers, cert.OCSPServer) + case !reflect.DeepEqual([]string(nil), cert.DNSNames): + return fmt.Errorf("expected\n%#v\ngot\n%#v\n", []string(nil), cert.DNSNames) } - auths, err := client.Sys().ListAuth() + return nil + }, + }, + } + return ret +} + +func generateCSR(t *testing.T, csrTemplate *x509.CertificateRequest, keyType string, keyBits int) (interface{}, []byte, string) { + t.Helper() + + var priv interface{} + var err error + switch keyType { + case "rsa": + priv, err = rsa.GenerateKey(rand.Reader, keyBits) + case "ec": + switch keyBits { + case 224: + priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader) + case 256: + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case 384: + priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader) + case 521: + priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + default: + t.Fatalf("Got unknown ec< key bits: %v", keyBits) + } + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + } + + if err != nil { + t.Fatalf("Got error generating private key for CSR: %v", err) + } + + csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, priv) + if err != nil { + t.Fatalf("Got error generating CSR: %v", err) + } + + csrPem := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + }))) + + return priv, csr, csrPem +} + +func generateCSRSteps(t *testing.T, caCert, caKey string, intdata, reqdata map[string]interface{}) []logicaltest.TestStep { + csrTemplate, csrPem := generateTestCsr(t, certutil.RSAPrivateKey, 2048) + + ret := []logicaltest.TestStep{ + { + Operation: logical.UpdateOperation, + Path: "root/generate/exported", + Data: map[string]interface{}{ + "common_name": "Root Cert", + "ttl": "180h", + "max_path_length": 0, + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "use_csr_values": true, + "csr": csrPem, + "format": "der", + }, + ErrorOk: true, + }, + + { + Operation: logical.DeleteOperation, + Path: "root", + }, + + { + Operation: logical.UpdateOperation, + Path: "root/generate/exported", + Data: map[string]interface{}{ + "common_name": "Root Cert", + "ttl": "180h", + "max_path_length": 1, + }, + }, + + { + Operation: logical.UpdateOperation, + Path: "root/sign-intermediate", + Data: map[string]interface{}{ + "use_csr_values": true, + "csr": csrPem, + "format": "der", + }, + Check: func(resp *logical.Response) error { + certString := resp.Data["certificate"].(string) + if certString == "" { + return fmt.Errorf("no certificate returned") + } + certBytes, _ := base64.StdEncoding.DecodeString(certString) + certs, err := x509.ParseCertificates(certBytes) if err != nil { - t.Fatal(err) + return fmt.Errorf("returned cert cannot be parsed: %w", err) } + if len(certs) != 1 { + return fmt.Errorf("unexpected returned length of certificates: %d", len(certs)) + } + cert := certs[0] - mountInfo, ok := auths["my-auth/"] - if !ok { - t.Fatalf("expected auth to exist") + if cert.MaxPathLen != 0 { + return fmt.Errorf("max path length of %d does not match the requested of 3", cert.MaxPathLen) } - if exp := ""; mountInfo.Description != exp { - t.Errorf("expected %q to be %q", mountInfo.Description, exp) + if !cert.MaxPathLenZero { + return fmt.Errorf("max path length zero is not set") } - }) - }) - }) - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() + // We need to set these as they are filled in with unparsed values in the final cert + csrTemplate.Subject.Names = cert.Subject.Names + csrTemplate.Subject.ExtraNames = cert.Subject.ExtraNames - client, closer := testVaultServerBad(t) - defer closer() + switch { + case !reflect.DeepEqual(cert.Subject, csrTemplate.Subject): + return fmt.Errorf("cert subject\n%#v\ndoes not match csr subject\n%#v\n", cert.Subject, csrTemplate.Subject) + case !reflect.DeepEqual(cert.DNSNames, csrTemplate.DNSNames): + return fmt.Errorf("cert dns names\n%#v\ndoes not match csr dns names\n%#v\n", cert.DNSNames, csrTemplate.DNSNames) + case !reflect.DeepEqual(cert.EmailAddresses, csrTemplate.EmailAddresses): + return fmt.Errorf("cert email addresses\n%#v\ndoes not match csr email addresses\n%#v\n", cert.EmailAddresses, csrTemplate.EmailAddresses) + case !reflect.DeepEqual(cert.IPAddresses, csrTemplate.IPAddresses): + return fmt.Errorf("cert ip addresses\n%#v\ndoes not match csr ip addresses\n%#v\n", cert.IPAddresses, csrTemplate.IPAddresses) + } + return nil + }, + }, + } + return ret +} - ui, cmd := testAuthTuneCommand(t) - cmd.client = client +func generateTestCsr(t *testing.T, keyType certutil.PrivateKeyType, keyBits int) (x509.CertificateRequest, string) { + t.Helper() - code := cmd.Run([]string{ - "userpass/", - }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) + csrTemplate := x509.CertificateRequest{ + Subject: pkix.Name{ + Country: []string{"MyCountry"}, + PostalCode: []string{"MyPostalCode"}, + SerialNumber: "MySerialNumber", + CommonName: "my@example.com", + }, + DNSNames: []string{ + "name1.example.com", + "name2.example.com", + "name3.example.com", + }, + EmailAddresses: []string{ + "name1@example.com", + "name2@example.com", + "name3@example.com", + }, + IPAddresses: []net.IP{ + net.ParseIP("::ff:1:2:3:4"), + net.ParseIP("::ff:5:6:7:8"), + }, + } + + _, _, csrPem := generateCSR(t, &csrTemplate, string(keyType), keyBits) + return csrTemplate, csrPem +} + +// Generates steps to test out various role permutations +func generateRoleSteps(t *testing.T, useCSRs bool) []logicaltest.TestStep { + roleVals := roleEntry{ + MaxTTL: 12 * time.Hour, + KeyType: "rsa", + KeyBits: 2048, + RequireCN: true, + AllowWildcardCertificates: new(bool), + } + *roleVals.AllowWildcardCertificates = true + + issueVals := certutil.IssueData{} + ret := []logicaltest.TestStep{} + + roleTestStep := logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "roles/test", + } + var issueTestStep logicaltest.TestStep + if useCSRs { + issueTestStep = logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "sign/test", + } + } else { + issueTestStep = logicaltest.TestStep{ + Operation: logical.UpdateOperation, + Path: "issue/test", } + } - expected := "Error tuning auth method userpass/: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) + generatedRSAKeys := map[int]crypto.Signer{} + generatedECKeys := map[int]crypto.Signer{} + generatedEdKeys := map[int]crypto.Signer{} + /* + // For the number of tests being run, a seed of 1 has been tested + // to hit all of the various values below. However, for normal + // testing we use a randomized time for maximum fuzziness. + */ + var seed int64 = 1 + fixedSeed := os.Getenv("VAULT_PKITESTS_FIXED_SEED") + if len(fixedSeed) == 0 { + seed = time.Now().UnixNano() + } else { + var err error + seed, err = strconv.ParseInt(fixedSeed, 10, 64) + if err != nil { + t.Fatalf("error parsing fixed seed of %s: %v", fixedSeed, err) } - }) + } + mathRand := mathrand.New(mathrand.NewSource(seed)) + // t.Logf("seed under test: %v", seed) - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() + // Used by tests not toggling common names to turn off the behavior of random key bit fuzziness + keybitSizeRandOff := false - _, cmd := testAuthTuneCommand(t) - assertNoTabs(t, cmd) - }) -} + genericErrorOkCheck := func(resp *logical.Response) error { + if resp.IsError() { + return nil + } + return fmt.Errorf("expected an error, but did not seem to get one") + } + + // Adds tests with the currently configured issue/role information + addTests := func(testCheck logicaltest.TestCheckFunc) { + stepCount++ + // t.Logf("test step %d\nrole vals: %#v\n", stepCount, roleVals) + stepCount++ + // t.Logf("test step %d\nissue vals: %#v\n", stepCount, issueTestStep) + roleTestStep.Data = roleVals.ToResponseData() + roleTestStep.Data["generate_lease"] = false + ret = append(ret, roleTestStep) + issueTestStep.Data = structs.New(issueVals).Map() + switch { + case issueTestStep.ErrorOk: + issueTestStep.Check = genericErrorOkCheck + case testCheck != nil: + issueTestStep.Check = testCheck + default: + issueTestStep.Check = nil + } + ret = append(ret, issueTestStep) + } + + getCountryCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.Country, true) + if !reflect.DeepEqual(cert.Subject.Country, expected) { + return fmt.Errorf("error: returned certificate has Country of %s but %s was specified in the role", cert.Subject.Country, expected) + } + return nil + } + } + + getOuCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicatesStable(role.OU, true) + if !reflect.DeepEqual(cert.Subject.OrganizationalUnit, expected) { + return fmt.Errorf("error: returned certificate has OU of %s but %s was specified in the role", cert.Subject.OrganizationalUnit, expected) + } + return nil + } + } + + getOrganizationCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.Organization, true) + if !reflect.DeepEqual(cert.Subject.Organization, expected) { + return fmt.Errorf("error: returned certificate has Organization of %s but %s was specified in the role", cert.Subject.Organization, expected) + } + return nil + } + } + + getLocalityCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.Locality, true) + if !reflect.DeepEqual(cert.Subject.Locality, expected) { + return fmt.Errorf("error: returned certificate has Locality of %s but %s was specified in the role", cert.Subject.Locality, expected) + } + return nil + } + } + + getProvinceCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.Province, true) + if !reflect.DeepEqual(cert.Subject.Province, expected) { + return fmt.Errorf("error: returned certificate has Province of %s but %s was specified in the role", cert.Subject.Province, expected) + } + return nil + } + } + + getStreetAddressCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.StreetAddress, true) + if !reflect.DeepEqual(cert.Subject.StreetAddress, expected) { + return fmt.Errorf("error: returned certificate has StreetAddress of %s but %s was specified in the role", cert.Subject.StreetAddress, expected) + } + return nil + } + } + + getPostalCodeCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + expected := strutil.RemoveDuplicates(role.PostalCode, true) + if !reflect.DeepEqual(cert.Subject.PostalCode, expected) { + return fmt.Errorf("error: returned certificate has PostalCode of %s but %s was specified in the role", cert.Subject.PostalCode, expected) + } + return nil + } + } + + getNotBeforeCheck := func(role roleEntry) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + + actualDiff := time.Since(cert.NotBefore) + certRoleDiff := (role.NotBeforeDuration - actualDiff).Truncate(time.Second) + // These times get truncated, so give a 1 second buffer on each side + if certRoleDiff >= -1*time.Second && certRoleDiff <= 1*time.Second { + return nil + } + return fmt.Errorf("validity period out of range diff: %v", certRoleDiff) + } + } + + // Returns a TestCheckFunc that performs various validity checks on the + // returned certificate information, mostly within checkCertsAndPrivateKey + getCnCheck := func(name string, role roleEntry, key crypto.Signer, usage x509.KeyUsage, extUsage x509.ExtKeyUsage, validity time.Duration) logicaltest.TestCheckFunc { + var certBundle certutil.CertBundle + return func(resp *logical.Response) error { + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := checkCertsAndPrivateKey(role.KeyType, key, usage, extUsage, validity, &certBundle) + if err != nil { + return fmt.Errorf("error checking generated certificate: %s", err) + } + cert := parsedCertBundle.Certificate + if cert.Subject.CommonName != name { + return fmt.Errorf("error: returned certificate has CN of %s but %s was requested", cert.Subject.CommonName, name) + } + if strings.Contains(cert.Subject.CommonName, "@") { + if len(cert.DNSNames) != 0 || len(cert.EmailAddresses) != 1 { + return fmt.Errorf("error: found more than one DNS SAN or not one Email SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses) + } + } else { + if len(cert.DNSNames) != 1 || len(cert.EmailAddresses) != 0 { + return fmt.Errorf("error: found more than one Email SAN or not one DNS SAN but only one was requested, cert.DNSNames = %#v, cert.EmailAddresses = %#v", cert.DNSNames, cert.EmailAddresses) + } + } + var retName string + if len(cert.DNSNames) > 0 { + retName = cert.DNSNames[0] + } + if len(cert.EmailAddresses) > 0 { + retName = cert.EmailAddresses[0] + } + if retName != name { + // Check IDNA + p := idna.New( + idna.StrictDomainName(true), + idna.VerifyDNSLength(true), + ) + converted, err := p.ToUnicode(retName) + if err != nil { + t.Fatal(err) + } + if converted != name { + return fmt.Errorf("error: returned certificate has a DNS SAN of %s (from idna: %s) but %s was requested", retName, converted, name) + } + } + return nil + } + } + + type csrPlan struct { + errorOk bool + roleKeyBits int + cert string + privKey crypto.Signer + } + + getCsr := func(keyType string, keyBits int, csrTemplate *x509.CertificateRequest) (*pem.Block, crypto.Signer) { + var privKey crypto.Signer + var ok bool + switch keyType { + case "rsa": + privKey, ok = generatedRSAKeys[keyBits] + if !ok { + privKey, _ = rsa.GenerateKey(rand.Reader, keyBits) + generatedRSAKeys[keyBits] = privKey + } + + case "ec": + var curve elliptic.Curve + + switch keyBits { + case 224: + curve = elliptic.P224() + case 256: + curve = elliptic.P256() + case 384: + curve = elliptic.P384() + case 521: + curve = elliptic.P521() + } + + privKey, ok = generatedECKeys[keyBits] + if !ok { + privKey, _ = ecdsa.GenerateKey(curve, rand.Reader) + generatedECKeys[keyBits] = privKey + } + + case "ed25519": + privKey, ok = generatedEdKeys[keyBits] + if !ok { + _, privKey, _ = ed25519.GenerateKey(rand.Reader) + generatedEdKeys[keyBits] = privKey + } + + default: + panic("invalid key type: " + keyType) + } + + csr, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privKey) + if err != nil { + t.Fatalf("Error creating certificate request: %s", err) + } + block := pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + } + return &block, privKey + } + + getRandCsr := func(keyType string, errorOk bool, csrTemplate *x509.CertificateRequest) csrPlan { + rsaKeyBits := []int{2048, 3072, 4096, 8192} + ecKeyBits := []int{224, 256, 384, 521} + plan := csrPlan{errorOk: errorOk} + + var testBitSize int + switch keyType { + case "rsa": + plan.roleKeyBits = rsaKeyBits[mathRand.Int()%len(rsaKeyBits)] + testBitSize = plan.roleKeyBits + + // If we don't expect an error already, randomly choose a + // key size and expect an error if it's less than the role + // setting + if !keybitSizeRandOff && !errorOk { + testBitSize = rsaKeyBits[mathRand.Int()%len(rsaKeyBits)] + } + + if testBitSize < plan.roleKeyBits { + plan.errorOk = true + } + + case "ec": + plan.roleKeyBits = ecKeyBits[mathRand.Int()%len(ecKeyBits)] + testBitSize = plan.roleKeyBits + + // If we don't expect an error already, randomly choose a + // key size and expect an error if it's less than the role + // setting + if !keybitSizeRandOff && !errorOk { + testBitSize = ecKeyBits[mathRand.Int()%len(ecKeyBits)] + } + + if testBitSize < plan.roleKeyBits { + plan.errorOk = true + } + + default: + panic("invalid key type: " + keyType) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("roleKeyBits=%d testBitSize=%d errorOk=%v", plan.roleKeyBits, testBitSize, plan.errorOk) + } + + block, privKey := getCsr(keyType, testBitSize, csrTemplate) + plan.cert = strings.TrimSpace(string(pem.EncodeToMemory(block))) + plan.privKey = privKey + return plan + } + + // Common names to test with the various role flags toggled + var commonNames struct { + Localhost bool `structs:"localhost"` + BareDomain bool `structs:"example.com"` + SecondDomain bool `structs:"foobar.com"` + SubDomain bool `structs:"foo.example.com"` + Wildcard bool `structs:"*.example.com"` + SubSubdomain bool `structs:"foo.bar.example.com"` + SubSubdomainWildcard bool `structs:"*.bar.example.com"` + GlobDomain bool `structs:"fooexample.com"` + IDN bool `structs:"daɪˈɛrɨsɨs"` + AnyHost bool `structs:"porkslap.beer"` + } + + // Adds a series of tests based on the current selection of + // allowed common names; contains some (seeded) randomness + // + // This allows for a variety of common names to be tested in various + // combinations with allowed toggles of the role + addCnTests := func() { + cnMap := structs.New(commonNames).Map() + for name, allowedInt := range cnMap { + roleVals.KeyType = "rsa" + roleVals.KeyBits = 2048 + if mathRand.Int()%3 == 1 { + roleVals.KeyType = "ec" + roleVals.KeyBits = 224 + } + + roleVals.ServerFlag = false + roleVals.ClientFlag = false + roleVals.CodeSigningFlag = false + roleVals.EmailProtectionFlag = false + + var usage []string + if mathRand.Int()%2 == 1 { + usage = append(usage, "DigitalSignature") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "ContentCoMmitment") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "KeyEncipherment") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "DataEncipherment") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "KeyAgreemEnt") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "CertSign") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "CRLSign") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "EncipherOnly") + } + if mathRand.Int()%2 == 1 { + usage = append(usage, "DecipherOnly") + } + + roleVals.KeyUsage = usage + parsedKeyUsage := parseKeyUsages(roleVals.KeyUsage) + if parsedKeyUsage == 0 && len(usage) != 0 { + panic("parsed key usages was zero") + } + + var extUsage x509.ExtKeyUsage + i := mathRand.Int() % 4 + switch { + case i == 0: + // Punt on this for now since I'm not clear the actual proper + // way to format these + if name != "daɪˈɛrɨsɨs" { + extUsage = x509.ExtKeyUsageEmailProtection + roleVals.EmailProtectionFlag = true + break + } + fallthrough + case i == 1: + extUsage = x509.ExtKeyUsageServerAuth + roleVals.ServerFlag = true + case i == 2: + extUsage = x509.ExtKeyUsageClientAuth + roleVals.ClientFlag = true + default: + extUsage = x509.ExtKeyUsageCodeSigning + roleVals.CodeSigningFlag = true + } + + allowed := allowedInt.(bool) + issueVals.CommonName = name + if roleVals.EmailProtectionFlag { + if !strings.HasPrefix(name, "*") { + issueVals.CommonName = "user@" + issueVals.CommonName + } + } + + issueTestStep.ErrorOk = !allowed + + validity := roleVals.MaxTTL + + if useCSRs { + templ := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: issueVals.CommonName, + }, + } + plan := getRandCsr(roleVals.KeyType, issueTestStep.ErrorOk, templ) + issueVals.CSR = plan.cert + roleVals.KeyBits = plan.roleKeyBits + issueTestStep.ErrorOk = plan.errorOk + + addTests(getCnCheck(issueVals.CommonName, roleVals, plan.privKey, x509.KeyUsage(parsedKeyUsage), extUsage, validity)) + } else { + addTests(getCnCheck(issueVals.CommonName, roleVals, nil, x509.KeyUsage(parsedKeyUsage), extUsage, validity)) + } + } + } + + funcs := []interface{}{ + addCnTests, getCnCheck, getCountryCheck, getLocalityCheck, getNotBeforeCheck, + getOrganizationCheck, getOuCheck, getPostalCodeCheck, getRandCsr, getStreetAddressCheck, + getProvinceCheck, + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("funcs=%d", len(funcs)) + } + + // Common Name tests + { + // common_name not provided + issueVals.CommonName = "" + issueTestStep.ErrorOk = true + addTests(nil) + + // Nothing is allowed + addCnTests() + + roleVals.AllowLocalhost = true + commonNames.Localhost = true + addCnTests() + + roleVals.AllowedDomains = []string{"foobar.com"} + addCnTests() + + roleVals.AllowedDomains = []string{"example.com"} + roleVals.AllowSubdomains = true + commonNames.SubDomain = true + commonNames.Wildcard = true + commonNames.SubSubdomain = true + commonNames.SubSubdomainWildcard = true + addCnTests() + + roleVals.AllowedDomains = []string{"foobar.com", "example.com"} + commonNames.SecondDomain = true + roleVals.AllowBareDomains = true + commonNames.BareDomain = true + addCnTests() + + roleVals.AllowedDomains = []string{"foobar.com", "*example.com"} + roleVals.AllowGlobDomains = true + commonNames.GlobDomain = true + addCnTests() + + roleVals.AllowAnyName = true + roleVals.EnforceHostnames = true + commonNames.AnyHost = true + commonNames.IDN = true + addCnTests() + + roleVals.EnforceHostnames = false + addCnTests() + + // Ensure that we end up with acceptable key sizes since they won't be + // toggled any longer + keybitSizeRandOff = true + addCnTests() + } + // Country tests + { + roleVals.Country = []string{"foo"} + addTests(getCountryCheck(roleVals)) + + roleVals.Country = []string{"foo", "bar"} + addTests(getCountryCheck(roleVals)) + } + // OU tests + { + roleVals.OU = []string{"foo"} + addTests(getOuCheck(roleVals)) + + roleVals.OU = []string{"bar", "foo"} + addTests(getOuCheck(roleVals)) + } + // Organization tests + { + roleVals.Organization = []string{"system:masters"} + addTests(getOrganizationCheck(roleVals)) + + roleVals.Organization = []string{"foo", "bar"} + addTests(getOrganizationCheck(roleVals)) + } + // Locality tests + { + roleVals.Locality = []string{"foo"} + addTests(getLocalityCheck(roleVals)) + + roleVals.Locality = []string{"foo", "bar"} + addTests(getLocalityCheck(roleVals)) + } + // Province tests + { + roleVals.Province = []string{"foo"} + addTests(getProvinceCheck(roleVals)) + + roleVals.Province = []string{"foo", "bar"} + addTests(getProvinceCheck(roleVals)) + } + // StreetAddress tests + { + roleVals.StreetAddress = []string{"123 foo street"} + addTests(getStreetAddressCheck(roleVals)) + + roleVals.StreetAddress = []string{"123 foo street", "456 bar avenue"} + addTests(getStreetAddressCheck(roleVals)) + } + // PostalCode tests + { + roleVals.PostalCode = []string{"f00"} + addTests(getPostalCodeCheck(roleVals)) + + roleVals.PostalCode = []string{"f00", "b4r"} + addTests(getPostalCodeCheck(roleVals)) + } + // NotBefore tests + { + roleVals.NotBeforeDuration = 10 * time.Second + addTests(getNotBeforeCheck(roleVals)) + + roleVals.NotBeforeDuration = 30 * time.Second + addTests(getNotBeforeCheck(roleVals)) + + roleVals.NotBeforeDuration = 0 + } + + // IP SAN tests + { + getIpCheck := func(expectedIp ...net.IP) logicaltest.TestCheckFunc { + return func(resp *logical.Response) error { + var certBundle certutil.CertBundle + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error parsing cert bundle: %s", err) + } + cert := parsedCertBundle.Certificate + var expected []net.IP + expected = append(expected, expectedIp...) + if diff := deep.Equal(cert.IPAddresses, expected); len(diff) > 0 { + return fmt.Errorf("wrong SAN IPs, diff: %v", diff) + } + return nil + } + } + addIPSANTests := func(useCSRs, useCSRSANs, allowIPSANs, errorOk bool, ipSANs string, csrIPSANs []net.IP, check logicaltest.TestCheckFunc) { + if useCSRs { + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: issueVals.CommonName, + }, + IPAddresses: csrIPSANs, + } + block, _ := getCsr(roleVals.KeyType, roleVals.KeyBits, csrTemplate) + issueVals.CSR = strings.TrimSpace(string(pem.EncodeToMemory(block))) + } + oldRoleVals, oldIssueVals, oldIssueTestStep := roleVals, issueVals, issueTestStep + roleVals.UseCSRSANs = useCSRSANs + roleVals.AllowIPSANs = allowIPSANs + issueVals.CommonName = "someone@example.com" + issueVals.IPSANs = ipSANs + issueTestStep.ErrorOk = errorOk + addTests(check) + roleVals, issueVals, issueTestStep = oldRoleVals, oldIssueVals, oldIssueTestStep + } + roleVals.AllowAnyName = true + roleVals.EnforceHostnames = true + roleVals.AllowLocalhost = true + roleVals.UseCSRCommonName = true + commonNames.Localhost = true + + netip1, netip2 := net.IP{127, 0, 0, 1}, net.IP{170, 171, 172, 173} + textip1, textip3 := "127.0.0.1", "::1" + + // IPSANs not allowed and not provided, should not be an error. + addIPSANTests(useCSRs, false, false, false, "", nil, getIpCheck()) + + // IPSANs not allowed, valid IPSANs provided, should be an error. + addIPSANTests(useCSRs, false, false, true, textip1+","+textip3, nil, nil) + + // IPSANs allowed, bogus IPSANs provided, should be an error. + addIPSANTests(useCSRs, false, true, true, "foobar", nil, nil) + + // Given IPSANs as API argument and useCSRSANs false, CSR arg ignored. + addIPSANTests(useCSRs, false, true, false, textip1, + []net.IP{netip2}, getIpCheck(netip1)) + + if useCSRs { + // IPSANs not allowed, valid IPSANs provided via CSR, should be an error. + addIPSANTests(useCSRs, true, false, true, "", []net.IP{netip1}, nil) + + // Given IPSANs as both API and CSR arguments and useCSRSANs=true, API arg ignored. + addIPSANTests(useCSRs, true, true, false, textip3, + []net.IP{netip1, netip2}, getIpCheck(netip1, netip2)) + } + } + + { + getOtherCheck := func(expectedOthers ...otherNameUtf8) logicaltest.TestCheckFunc { + return func(resp *logical.Response) error { + var certBundle certutil.CertBundle + err := mapstructure.Decode(resp.Data, &certBundle) + if err != nil { + return err + } + parsedCertBundle, err := certBundle.ToParsedCertBundle() + if err != nil { + return fmt.Errorf("error parsing cert bundle: %s", err) + } + cert := parsedCertBundle.Certificate + foundOthers, err := getOtherSANsFromX509Extensions(cert.Extensions) + if err != nil { + return err + } + var expected []otherNameUtf8 + expected = append(expected, expectedOthers...) + if diff := deep.Equal(foundOthers, expected); len(diff) > 0 { + return fmt.Errorf("wrong SAN IPs, diff: %v", diff) + } + return nil + } + } + + addOtherSANTests := func(useCSRs, useCSRSANs bool, allowedOtherSANs []string, errorOk bool, otherSANs []string, csrOtherSANs []otherNameUtf8, check logicaltest.TestCheckFunc) { + otherSansMap := func(os []otherNameUtf8) map[string][]string { + ret := make(map[string][]string) + for _, o := range os { + ret[o.oid] = append(ret[o.oid], o.value) + } + return ret + } + if useCSRs { + csrTemplate := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: issueVals.CommonName, + }, + } + if err := handleOtherCSRSANs(csrTemplate, otherSansMap(csrOtherSANs)); err != nil { + t.Fatal(err) + } + block, _ := getCsr(roleVals.KeyType, roleVals.KeyBits, csrTemplate) + issueVals.CSR = strings.TrimSpace(string(pem.EncodeToMemory(block))) + } + oldRoleVals, oldIssueVals, oldIssueTestStep := roleVals, issueVals, issueTestStep + roleVals.UseCSRSANs = useCSRSANs + roleVals.AllowedOtherSANs = allowedOtherSANs + issueVals.CommonName = "someone@example.com" + issueVals.OtherSANs = strings.Join(otherSANs, ",") + issueTestStep.ErrorOk = errorOk + addTests(check) + roleVals, issueVals, issueTestStep = oldRoleVals, oldIssueVals, oldIssueTestStep + } + roleVals.AllowAnyName = true + roleVals.EnforceHostnames = true + roleVals.AllowLocalhost = true + roleVals.UseCSRCommonName = true + commonNames.Localhost = true + + newOtherNameUtf8 := func(s string) (ret otherNameUtf8) { + pieces := strings.Split(s, ";") + if len(pieces) == 2 { + piecesRest := strings.Split(pieces[1], ":") + if len(piecesRest) == 2 { + switch strings.ToUpper(piecesRest[0]) { + case "UTF-8", "UTF8": + return otherNameUtf8{oid: pieces[0], value: piecesRest[1]} + } + } + } + t.Fatalf("error parsing otherName: %q", s) + return + } + oid1 := "1.3.6.1.4.1.311.20.2.3" + oth1str := oid1 + ";utf8:devops@nope.com" + oth1 := newOtherNameUtf8(oth1str) + oth2 := otherNameUtf8{oid1, "me@example.com"} + // allowNone, allowAll := []string{}, []string{oid1 + ";UTF-8:*"} + allowNone, allowAll := []string{}, []string{"*"} + + // OtherSANs not allowed and not provided, should not be an error. + addOtherSANTests(useCSRs, false, allowNone, false, nil, nil, getOtherCheck()) + + // OtherSANs not allowed, valid OtherSANs provided, should be an error. + addOtherSANTests(useCSRs, false, allowNone, true, []string{oth1str}, nil, nil) + + // OtherSANs allowed, bogus OtherSANs provided, should be an error. + addOtherSANTests(useCSRs, false, allowAll, true, []string{"foobar"}, nil, nil) + + // Given OtherSANs as API argument and useCSRSANs false, CSR arg ignored. + addOtherSANTests(useCSRs, false, allowAll, false, []string{oth1str}, + []otherNameUtf8{oth2}, getOtherCheck(oth1)) + + if useCSRs { + // OtherSANs not allowed, valid OtherSANs provided via CSR, should be an error. + addOtherSANTests(useCSRs, true, allowNone, true, nil, []otherNameUtf8{oth1}, nil) + + // Given OtherSANs as both API and CSR arguments and useCSRSANs=true, API arg ignored. + addOtherSANTests(useCSRs, false, allowAll, false, []string{oth2.String()}, + []otherNameUtf8{oth1}, getOtherCheck(oth2)) + } + } + + // Lease tests + { + roleTestStep.ErrorOk = true + roleVals.Lease = "" + roleVals.MaxTTL = 0 + addTests(nil) + + roleVals.Lease = "12h" + roleVals.MaxTTL = 6 * time.Hour + addTests(nil) + + roleTestStep.ErrorOk = false + roleVals.TTL = 0 + roleVals.MaxTTL = 12 * time.Hour + } + + // Listing test + ret = append(ret, logicaltest.TestStep{ + Operation: logical.ListOperation, + Path: "roles/", + Check: func(resp *logical.Response) error { + if resp.Data == nil { + return fmt.Errorf("nil data") + } + + keysRaw, ok := resp.Data["keys"] + if !ok { + return fmt.Errorf("no keys found") + } + + keys, ok := keysRaw.([]string) + if !ok { + return fmt.Errorf("could not convert keys to a string list") + } + + if len(keys) != 1 { + return fmt.Errorf("unexpected keys length of %d", len(keys)) + } + + if keys[0] != "test" { + return fmt.Errorf("unexpected key value of %s", keys[0]) + } + + return nil + }, + }) + + return ret +} + +func TestRolesAltIssuer(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Create two issuers. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root a - example.com", + "issuer_name": "root-a", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootAPem := resp.Data["certificate"].(string) + rootACert := parseCert(t, rootAPem) + + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root b - example.com", + "issuer_name": "root-b", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootBPem := resp.Data["certificate"].(string) + rootBCert := parseCert(t, rootBPem) + + // Create three roles: one with no assignment, one with explicit root-a, + // one with explicit root-b. + _, err = CBWrite(b, s, "roles/use-default", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "roles/use-root-a", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + "issuer_ref": "root-a", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "roles/use-root-b", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "issuer_ref": "root-b", + }) + require.NoError(t, err) + + // Now issue certs against these roles. + resp, err = CBWrite(b, s, "issue/use-default", map[string]interface{}{ + "common_name": "testing", + "ttl": "5s", + }) + require.NoError(t, err) + leafPem := resp.Data["certificate"].(string) + leafCert := parseCert(t, leafPem) + err = leafCert.CheckSignatureFrom(rootACert) + require.NoError(t, err, "should be signed by root-a but wasn't") + + resp, err = CBWrite(b, s, "issue/use-root-a", map[string]interface{}{ + "common_name": "testing", + "ttl": "5s", + }) + require.NoError(t, err) + leafPem = resp.Data["certificate"].(string) + leafCert = parseCert(t, leafPem) + err = leafCert.CheckSignatureFrom(rootACert) + require.NoError(t, err, "should be signed by root-a but wasn't") + + resp, err = CBWrite(b, s, "issue/use-root-b", map[string]interface{}{ + "common_name": "testing", + "ttl": "5s", + }) + require.NoError(t, err) + leafPem = resp.Data["certificate"].(string) + leafCert = parseCert(t, leafPem) + err = leafCert.CheckSignatureFrom(rootBCert) + require.NoError(t, err, "should be signed by root-b but wasn't") + + // Update the default issuer to be root B and make sure that the + // use-default role updates. + _, err = CBWrite(b, s, "config/issuers", map[string]interface{}{ + "default": "root-b", + }) + require.NoError(t, err) + + resp, err = CBWrite(b, s, "issue/use-default", map[string]interface{}{ + "common_name": "testing", + "ttl": "5s", + }) + require.NoError(t, err) + leafPem = resp.Data["certificate"].(string) + leafCert = parseCert(t, leafPem) + err = leafCert.CheckSignatureFrom(rootBCert) + require.NoError(t, err, "should be signed by root-b but wasn't") +} + +func TestBackend_PathFetchValidRaw(t *testing.T) { + t.Parallel() + b, storage := CreateBackendWithStorage(t) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: map[string]interface{}{ + "common_name": "test.com", + "ttl": "6h", + }, + MountPoint: "pki/", + }) + require.NoError(t, err) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", resp) + } + rootCaAsPem := resp.Data["certificate"].(string) + + // Chain should contain the root. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "ca_chain", + Storage: storage, + Data: map[string]interface{}{}, + MountPoint: "pki/", + }) + require.NoError(t, err) + if resp != nil && resp.IsError() { + t.Fatalf("failed read ca_chain, %#v", resp) + } + if strings.Count(string(resp.Data[logical.HTTPRawBody].([]byte)), rootCaAsPem) != 1 { + t.Fatalf("expected raw chain to contain the root cert") + } + + // The ca/pem should return us the actual CA... + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "ca/pem", + Storage: storage, + Data: map[string]interface{}{}, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("ca/pem"), logical.ReadOperation), resp, true) + require.NoError(t, err) + if resp != nil && resp.IsError() { + t.Fatalf("failed read ca/pem, %#v", resp) + } + // check the raw cert matches the response body + if !bytes.Equal(resp.Data[logical.HTTPRawBody].([]byte), []byte(rootCaAsPem)) { + t.Fatalf("failed to get raw cert") + } + + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/example", + Storage: storage, + Data: map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "no_store": "false", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + // Now issue a short-lived certificate from our pki-external. + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/example", + Storage: storage, + Data: map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }, + MountPoint: "pki/", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + + issueCrtAsPem := resp.Data["certificate"].(string) + issuedCrt := parseCert(t, issueCrtAsPem) + expectedSerial := serialFromCert(issuedCrt) + expectedCert := []byte(issueCrtAsPem) + + // get der cert + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("cert/%s/raw", expectedSerial), + Storage: storage, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to get raw cert, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // check the raw cert matches the response body + rawBody := resp.Data[logical.HTTPRawBody].([]byte) + bodyAsPem := []byte(strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: rawBody})))) + if !bytes.Equal(bodyAsPem, expectedCert) { + t.Fatalf("failed to get raw cert for serial number: %s", expectedSerial) + } + if resp.Data[logical.HTTPContentType] != "application/pkix-cert" { + t.Fatalf("failed to get raw cert content-type") + } + + // get pem + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: fmt.Sprintf("cert/%s/raw/pem", expectedSerial), + Storage: storage, + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to get raw, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // check the pem cert matches the response body + if !bytes.Equal(resp.Data[logical.HTTPRawBody].([]byte), expectedCert) { + t.Fatalf("failed to get pem cert") + } + if resp.Data[logical.HTTPContentType] != "application/pem-certificate-chain" { + t.Fatalf("failed to get raw cert content-type") + } +} + +func TestBackend_PathFetchCertList(t *testing.T) { + t.Parallel() + // create the backend + b, storage := CreateBackendWithStorage(t) + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "ttl": "6h", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + MountPoint: "pki/", + }) + + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // config urls + urlsData := map[string]interface{}{ + "issuing_certificates": "http://127.0.0.1:8200/v1/pki/ca", + "crl_distribution_points": "http://127.0.0.1:8200/v1/pki/crl", + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "config/urls", + Storage: storage, + Data: urlsData, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.UpdateOperation), resp, true) + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ReadOperation, + Path: "config/urls", + Storage: storage, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/urls"), logical.ReadOperation), resp, true) + + if resp != nil && resp.IsError() { + t.Fatalf("failed to config urls, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // create a role entry + roleData := map[string]interface{}{ + "allowed_domains": "test.com", + "allow_subdomains": "true", + "max_ttl": "4h", + } + + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/test-example", + Storage: storage, + Data: roleData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create a role, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + // issue some certs + i := 1 + for i < 10 { + certData := map[string]interface{}{ + "common_name": "example.test.com", + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "issue/test-example", + Storage: storage, + Data: certData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to issue a cert, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + + i = i + 1 + } + + // list certs + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "certs", + Storage: storage, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to list certs, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + // check that the root and 9 additional certs are all listed + if len(resp.Data["keys"].([]string)) != 10 { + t.Fatalf("failed to list all 10 certs") + } + + // list certs/ + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.ListOperation, + Path: "certs/", + Storage: storage, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to list certs, %#v", resp) + } + if err != nil { + t.Fatal(err) + } + // check that the root and 9 additional certs are all listed + if len(resp.Data["keys"].([]string)) != 10 { + t.Fatalf("failed to list all 10 certs") + } +} + +func TestBackend_SignVerbatim(t *testing.T) { + t.Parallel() + testCases := []struct { + testName string + keyType string + }{ + {testName: "RSA", keyType: "rsa"}, + {testName: "ED25519", keyType: "ed25519"}, + {testName: "EC", keyType: "ec"}, + {testName: "Any", keyType: "any"}, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + runTestSignVerbatim(t, tc.keyType) + }) + } +} + +func runTestSignVerbatim(t *testing.T, keyType string) { + // create the backend + b, storage := CreateBackendWithStorage(t) + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "not_after": "9999-12-31T23:59:59Z", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + + // create a CSR and key + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + csrReq := &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "foo.bar.com", + }, + // Check that otherName extensions are not duplicated (see hashicorp/vault#16700). + // If these extensions are duplicated, sign-verbatim will fail when parsing the signed certificate on Go 1.19+ (see golang/go#50988). + // On older versions of Go this test will fail due to an explicit check for duplicate otherNames later in this test. + ExtraExtensions: []pkix.Extension{ + { + Id: oidExtensionSubjectAltName, + Critical: false, + Value: []byte{0x30, 0x26, 0xA0, 0x24, 0x06, 0x0A, 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0x37, 0x14, 0x02, 0x03, 0xA0, 0x16, 0x0C, 0x14, 0x75, 0x73, 0x65, 0x72, 0x6E, 0x61, 0x6D, 0x65, 0x40, 0x65, 0x78, 0x61, 0x6D, 0x70, 0x6C, 0x65, 0x2E, 0x63, 0x6F, 0x6D}, + }, + }, + } + csr, err := x509.CreateCertificateRequest(rand.Reader, csrReq, key) + if err != nil { + t.Fatal(err) + } + if len(csr) == 0 { + t.Fatal("generated csr is empty") + } + pemCSR := strings.TrimSpace(string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + }))) + if len(pemCSR) == 0 { + t.Fatal("pem csr is empty") + } + + signVerbatimData := map[string]interface{}{ + "csr": pemCSR, + } + if keyType == "rsa" { + signVerbatimData["signature_bits"] = 512 + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim", + Storage: storage, + Data: signVerbatimData, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign-verbatim"), logical.UpdateOperation), resp, true) + + if resp != nil && resp.IsError() { + t.Fatalf("failed to sign-verbatim basic CSR: %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + if resp.Secret != nil { + t.Fatal("secret is not nil") + } + + // create a role entry; we use this to check that sign-verbatim when used with a role is still honoring TTLs + roleData := map[string]interface{}{ + "ttl": "4h", + "max_ttl": "8h", + "key_type": keyType, + "not_before_duration": "2h", + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/test", + Storage: storage, + Data: roleData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create a role, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim/test", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + "ttl": "5h", + }, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to sign-verbatim ttl'd CSR: %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + if resp.Secret != nil { + t.Fatal("got a lease when we should not have") + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim/test", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + "ttl": "12h", + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf(resp.Error().Error()) + } + if resp.Data == nil || resp.Data["certificate"] == nil { + t.Fatal("did not get expected data") + } + certString := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(certString)) + if block == nil { + t.Fatal("nil pem block") + } + certs, err := x509.ParseCertificates(block.Bytes) + if err != nil { + t.Fatal(err) + } + if len(certs) != 1 { + t.Fatalf("expected a single cert, got %d", len(certs)) + } + cert := certs[0] + if math.Abs(float64(time.Now().Add(12*time.Hour).Unix()-cert.NotAfter.Unix())) < 10 { + t.Fatalf("sign-verbatim did not properly cap validity period (notAfter) on signed CSR: was %v vs requested %v but should've been %v", cert.NotAfter, time.Now().Add(12*time.Hour), time.Now().Add(8*time.Hour)) + } + if math.Abs(float64(time.Now().Add(-2*time.Hour).Unix()-cert.NotBefore.Unix())) > 10 { + t.Fatalf("sign-verbatim did not properly cap validity period (notBefore) on signed CSR: was %v vs expected %v", cert.NotBefore, time.Now().Add(-2*time.Hour)) + } + + // Now check signing a certificate using the not_after input using the Y10K value + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim/test", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + "not_after": "9999-12-31T23:59:59Z", + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp != nil && resp.IsError() { + t.Fatalf(resp.Error().Error()) + } + if resp.Data == nil || resp.Data["certificate"] == nil { + t.Fatal("did not get expected data") + } + certString = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certString)) + if block == nil { + t.Fatal("nil pem block") + } + certs, err = x509.ParseCertificates(block.Bytes) + if err != nil { + t.Fatal(err) + } + if len(certs) != 1 { + t.Fatalf("expected a single cert, got %d", len(certs)) + } + cert = certs[0] + + // Fallback check for duplicate otherName, necessary on Go versions before 1.19. + // We assume that there is only one SAN in the original CSR and that it is an otherName. + san_count := 0 + for _, ext := range cert.Extensions { + if ext.Id.Equal(oidExtensionSubjectAltName) { + san_count += 1 + } + } + if san_count != 1 { + t.Fatalf("expected one SAN extension, got %d", san_count) + } + + notAfter := cert.NotAfter.Format(time.RFC3339) + if notAfter != "9999-12-31T23:59:59Z" { + t.Fatal(fmt.Errorf("not after from certificate is not matching with input parameter")) + } + + // now check that if we set generate-lease it takes it from the role and the TTLs match + roleData = map[string]interface{}{ + "ttl": "4h", + "max_ttl": "8h", + "generate_lease": true, + "key_type": keyType, + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "roles/test", + Storage: storage, + Data: roleData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to create a role, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "sign-verbatim/test", + Storage: storage, + Data: map[string]interface{}{ + "csr": pemCSR, + "ttl": "5h", + }, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to sign-verbatim role-leased CSR: %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + if resp.Secret == nil { + t.Fatalf("secret is nil, response is %#v", *resp) + } + if math.Abs(float64(resp.Secret.TTL-(5*time.Hour))) > float64(5*time.Hour) { + t.Fatalf("ttl not default; wanted %v, got %v", b.System().DefaultLeaseTTL(), resp.Secret.TTL) + } +} + +func TestBackend_Root_Idempotency(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // This is a change within 1.11, we are no longer idempotent across generate/internal calls. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + keyId1 := resp.Data["key_id"] + issuerId1 := resp.Data["issuer_id"] + cert := parseCert(t, resp.Data["certificate"].(string)) + certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") + + // -> Validate the SKID matches between the root cert and the key + resp, err = CBRead(b, s, "key/"+keyId1.(keyID).String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + require.Equal(t, resp.Data["subject_key_id"], certSkid) + + resp, err = CBRead(b, s, "cert/ca_chain") + require.NoError(t, err, "error reading ca_chain: %v", err) + + r1Data := resp.Data + + // Calling generate/internal should generate a new CA as well. + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + keyId2 := resp.Data["key_id"] + issuerId2 := resp.Data["issuer_id"] + cert = parseCert(t, resp.Data["certificate"].(string)) + certSkid = certutil.GetHexFormatted(cert.SubjectKeyId, ":") + + // -> Validate the SKID matches between the root cert and the key + resp, err = CBRead(b, s, "key/"+keyId2.(keyID).String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + require.Equal(t, resp.Data["subject_key_id"], certSkid) + + // Make sure that we actually generated different issuer and key values + require.NotEqual(t, keyId1, keyId2) + require.NotEqual(t, issuerId1, issuerId2) + + // Now because the issued CA's have no links, the call to ca_chain should return the same data (ca chain from default) + resp, err = CBRead(b, s, "cert/ca_chain") + require.NoError(t, err, "error reading ca_chain: %v", err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("cert/ca_chain"), logical.ReadOperation), resp, true) + + r2Data := resp.Data + if !reflect.DeepEqual(r1Data, r2Data) { + t.Fatal("got different ca certs") + } + + // Now let's validate that the import bundle is idempotent. + pemBundleRootCA := rootCACertPEM + "\n" + rootCAKeyPEM + resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/ca"), logical.UpdateOperation), resp, true) + + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + firstMapping := resp.Data["mapping"].(map[string]string) + firstImportedKeys := resp.Data["imported_keys"].([]string) + firstImportedIssuers := resp.Data["imported_issuers"].([]string) + firstExistingKeys := resp.Data["existing_keys"].([]string) + firstExistingIssuers := resp.Data["existing_issuers"].([]string) + + require.NotContains(t, firstImportedKeys, keyId1) + require.NotContains(t, firstImportedKeys, keyId2) + require.NotContains(t, firstImportedIssuers, issuerId1) + require.NotContains(t, firstImportedIssuers, issuerId2) + require.Empty(t, firstExistingKeys) + require.Empty(t, firstExistingIssuers) + require.NotEmpty(t, firstMapping) + require.Equal(t, 1, len(firstMapping)) + + var issuerId3 string + var keyId3 string + for i, k := range firstMapping { + issuerId3 = i + keyId3 = k + } + + // Performing this again should result in no key/issuer ids being imported/generated. + resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + secondMapping := resp.Data["mapping"].(map[string]string) + secondImportedKeys := resp.Data["imported_keys"] + secondImportedIssuers := resp.Data["imported_issuers"] + secondExistingKeys := resp.Data["existing_keys"] + secondExistingIssuers := resp.Data["existing_issuers"] + + require.Empty(t, secondImportedKeys) + require.Empty(t, secondImportedIssuers) + require.Contains(t, secondExistingKeys, keyId3) + require.Contains(t, secondExistingIssuers, issuerId3) + require.Equal(t, 1, len(secondMapping)) + + resp, err = CBDelete(b, s, "root") + require.NoError(t, err) + require.NotNil(t, resp) + require.Equal(t, 1, len(resp.Warnings)) + + // Make sure we can delete twice... + resp, err = CBDelete(b, s, "root") + require.NoError(t, err) + require.NotNil(t, resp) + require.Equal(t, 1, len(resp.Warnings)) + + _, err = CBRead(b, s, "cert/ca_chain") + require.Error(t, err, "expected an error fetching deleted ca_chain") + + // We should be able to import the same ca bundle as before and get a different key/issuer ids + resp, err = CBWrite(b, s, "config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + postDeleteImportedKeys := resp.Data["imported_keys"] + postDeleteImportedIssuers := resp.Data["imported_issuers"] + + // Make sure that we actually generated different issuer and key values, then the previous import + require.NotNil(t, postDeleteImportedKeys) + require.NotNil(t, postDeleteImportedIssuers) + require.NotEqual(t, postDeleteImportedKeys, firstImportedKeys) + require.NotEqual(t, postDeleteImportedIssuers, firstImportedIssuers) + + resp, err = CBRead(b, s, "cert/ca_chain") + require.NoError(t, err) + + caChainPostDelete := resp.Data + if reflect.DeepEqual(r1Data, caChainPostDelete) { + t.Fatal("ca certs from ca_chain were the same post delete, should have changed.") + } +} + +func TestBackend_SignIntermediate_AllowedPastCAValidity(t *testing.T) { + t.Parallel() + b_root, s_root := CreateBackendWithStorage(t) + b_int, s_int := CreateBackendWithStorage(t) + var err error + + // Direct issuing from root + _, err = CBWrite(b_root, s_root, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b_root, s_root, "roles/test", map[string]interface{}{ + "allow_bare_domains": true, + "allow_subdomains": true, + "allow_any_name": true, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := CBWrite(b_int, s_int, "intermediate/generate/internal", map[string]interface{}{ + "common_name": "myint.com", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b_root.Route("intermediate/generate/internal"), logical.UpdateOperation), resp, true) + require.Contains(t, resp.Data, "key_id") + intKeyId := resp.Data["key_id"].(keyID) + csr := resp.Data["csr"] + + resp, err = CBRead(b_int, s_int, "key/"+intKeyId.String()) + require.NoError(t, err) + require.NotNil(t, resp, "expected a response") + intSkid := resp.Data["subject_key_id"].(string) + + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b_root, s_root, "sign/test", map[string]interface{}{ + "common_name": "myint.com", + "csr": csr, + "ttl": "60h", + }) + require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") + + _, err = CBWrite(b_root, s_root, "sign-verbatim/test", map[string]interface{}{ + "common_name": "myint.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "csr": csr, + "ttl": "60h", + }) + require.ErrorContains(t, err, "that is beyond the expiration of the CA certificate") + + resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ + "common_name": "myint.com", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:caadmin@example.com", + "csr": csr, + "ttl": "60h", + }) + if err != nil { + t.Fatalf("got error: %v", err) + } + if resp == nil { + t.Fatal("got nil response") + } + if len(resp.Warnings) == 0 { + t.Fatalf("expected warnings, got %#v", *resp) + } + + cert := parseCert(t, resp.Data["certificate"].(string)) + certSkid := certutil.GetHexFormatted(cert.SubjectKeyId, ":") + require.Equal(t, intSkid, certSkid) +} + +func TestBackend_ConsulSignLeafWithLegacyRole(t *testing.T) { + t.Parallel() + // create the backend + b, s := CreateBackendWithStorage(t) + + // generate root + data, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + require.NoError(t, err, "failed generating internal root cert") + rootCaPem := data.Data["certificate"].(string) + + // Create a signing role like Consul did with the default args prior to Vault 1.10 + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allow_any_name": true, + "allowed_serial_numbers": []string{"MySerialNumber"}, + "key_type": "any", + "key_bits": "2048", + "signature_bits": "256", + }) + require.NoError(t, err, "failed creating legacy role") + + _, csrPem := generateTestCsr(t, certutil.ECPrivateKey, 256) + data, err = CBWrite(b, s, "sign/test", map[string]interface{}{ + "csr": csrPem, + }) + require.NoError(t, err, "failed signing csr") + certAsPem := data.Data["certificate"].(string) + + signedCert := parseCert(t, certAsPem) + rootCert := parseCert(t, rootCaPem) + requireSignedBy(t, signedCert, rootCert) +} + +func TestBackend_SignSelfIssued(t *testing.T) { + t.Parallel() + // create the backend + b, storage := CreateBackendWithStorage(t) + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "ttl": "172800", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "foo.bar.com", + }, + SerialNumber: big.NewInt(1234), + IsCA: false, + BasicConstraintsValid: true, + } + + ss, _ := getSelfSigned(t, template, template, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if !resp.IsError() { + t.Fatalf("expected error due to non-CA; got: %#v", *resp) + } + + // Set CA to true, but leave issuer alone + template.IsCA = true + + issuer := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "bar.foo.com", + }, + SerialNumber: big.NewInt(2345), + IsCA: true, + BasicConstraintsValid: true, + } + ss, ssCert := getSelfSigned(t, template, issuer, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if !resp.IsError() { + t.Fatalf("expected error due to different issuer; cert info is\nIssuer\n%#v\nSubject\n%#v\n", ssCert.Issuer, ssCert.Subject) + } + + ss, _ = getSelfSigned(t, template, template, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + MountPoint: "pki/", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("root/sign-self-issued"), logical.UpdateOperation), resp, true) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + if resp.IsError() { + t.Fatalf("error in response: %s", resp.Error().Error()) + } + + newCertString := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(newCertString)) + newCert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + + sc := b.makeStorageContext(context.Background(), storage) + signingBundle, err := sc.fetchCAInfo(defaultRef, ReadOnlyUsage) + if err != nil { + t.Fatal(err) + } + if reflect.DeepEqual(newCert.Subject, newCert.Issuer) { + t.Fatal("expected different subject/issuer") + } + if !reflect.DeepEqual(newCert.Issuer, signingBundle.Certificate.Subject) { + t.Fatalf("expected matching issuer/CA subject\n\nIssuer:\n%#v\nSubject:\n%#v\n", newCert.Issuer, signingBundle.Certificate.Subject) + } + if bytes.Equal(newCert.AuthorityKeyId, newCert.SubjectKeyId) { + t.Fatal("expected different authority/subject") + } + if !bytes.Equal(newCert.AuthorityKeyId, signingBundle.Certificate.SubjectKeyId) { + t.Fatal("expected authority on new cert to be same as signing subject") + } + if newCert.Subject.CommonName != "foo.bar.com" { + t.Fatalf("unexpected common name on new cert: %s", newCert.Subject.CommonName) + } +} + +// TestBackend_SignSelfIssued_DifferentTypes tests the functionality of the +// require_matching_certificate_algorithms flag. +func TestBackend_SignSelfIssued_DifferentTypes(t *testing.T) { + t.Parallel() + // create the backend + b, storage := CreateBackendWithStorage(t) + + // generate root + rootData := map[string]interface{}{ + "common_name": "test.com", + "ttl": "172800", + "key_type": "ec", + "key_bits": "521", + } + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/generate/internal", + Storage: storage, + Data: rootData, + MountPoint: "pki/", + }) + if resp != nil && resp.IsError() { + t.Fatalf("failed to generate root, %#v", *resp) + } + if err != nil { + t.Fatal(err) + } + + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "foo.bar.com", + }, + SerialNumber: big.NewInt(1234), + IsCA: true, + BasicConstraintsValid: true, + } + + // Tests absent the flag + ss, _ := getSelfSigned(t, template, template, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + + // Set CA to true, but leave issuer alone + template.IsCA = true + + // Tests with flag present but false + ss, _ = getSelfSigned(t, template, template, key) + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + "require_matching_certificate_algorithms": false, + }, + MountPoint: "pki/", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("got nil response") + } + + // Test with flag present and true + ss, _ = getSelfSigned(t, template, template, key) + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Path: "root/sign-self-issued", + Storage: storage, + Data: map[string]interface{}{ + "certificate": ss, + "require_matching_certificate_algorithms": true, + }, + MountPoint: "pki/", + }) + if err == nil { + t.Fatal("expected error due to mismatched algorithms") + } +} + +// This is a really tricky test because the Go stdlib asn1 package is incapable +// of doing the right thing with custom OID SANs (see comments in the package, +// it's readily admitted that it's too magic) but that means that any +// validation logic written for this test isn't being independently verified, +// as in, if cryptobytes is used to decode it to make the test work, that +// doesn't mean we're encoding and decoding correctly, only that we made the +// test pass. Instead, when run verbosely it will first perform a bunch of +// checks to verify that the OID SAN logic doesn't screw up other SANs, then +// will spit out the PEM. This can be validated independently. +// +// You want the hex dump of the octet string corresponding to the X509v3 +// Subject Alternative Name. There's a nice online utility at +// https://lapo.it/asn1js that can be used to view the structure of an +// openssl-generated other SAN at +// https://lapo.it/asn1js/#3022A020060A2B060104018237140203A0120C106465766F7073406C6F63616C686F7374 +// (openssl asn1parse can also be used with -strparse using an offset of the +// hex blob for the subject alternative names extension). +// +// The structure output from here should match that precisely (even if the OID +// itself doesn't) in the second test. +// +// The test that encodes two should have them be in separate elements in the +// top-level sequence; see +// https://lapo.it/asn1js/#3046A020060A2B060104018237140203A0120C106465766F7073406C6F63616C686F7374A022060A2B060104018237140204A0140C12322D6465766F7073406C6F63616C686F7374 for an openssl-generated example. +// +// The good news is that it's valid to simply copy and paste the PEM output from +// here into the form at that site as it will do the right thing so it's pretty +// easy to validate. +func TestBackend_OID_SANs(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + var err error + var resp *logical.Response + var certStr string + var block *pem.Block + var cert *x509.Certificate + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allowed_domains": []string{"foobar.com", "zipzap.com"}, + "allow_bare_domains": true, + "allow_subdomains": true, + "allow_ip_sans": true, + "allowed_other_sans": "1.3.6.1.4.1.311.20.2.3;UTF8:devops@*,1.3.6.1.4.1.311.20.2.4;utf8:d*e@foobar.com", + }) + if err != nil { + t.Fatal(err) + } + + // Get a baseline before adding OID SANs. In the next sections we'll verify + // that the SANs are all added even as the OID SAN inclusion forces other + // adding logic (custom rather than built-in Golang logic) + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foobar.com,foo.foobar.com,bar.foobar.com", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.IPAddresses[0].String() != "1.2.3.4" { + t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) + } + if len(cert.DNSNames) != 3 || + cert.DNSNames[0] != "bar.foobar.com" || + cert.DNSNames[1] != "foo.foobar.com" || + cert.DNSNames[2] != "foobar.com" { + t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) + } + + // First test some bad stuff that shouldn't work + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid value for the first possibility + "other_sans": "1.3.6.1.4.1.311.20.2.3;UTF8:devop@nope.com", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid OID for the first possibility + "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF8:devops@nope.com", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid name for the second possibility + "other_sans": "1.3.6.1.4.1.311.20.2.4;UTF8:d34g@foobar.com", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid OID for the second possibility + "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF8:d34e@foobar.com", + }) + if err == nil { + t.Fatal("expected error") + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + // Not a valid type + "other_sans": "1.3.6.1.4.1.311.20.2.5;UTF2:d34e@foobar.com", + }) + if err == nil { + t.Fatal("expected error") + } + + // Valid for first possibility + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "other_sans": "1.3.6.1.4.1.311.20.2.3;utf8:devops@nope.com", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.IPAddresses[0].String() != "1.2.3.4" { + t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) + } + if len(cert.DNSNames) != 3 || + cert.DNSNames[0] != "bar.foobar.com" || + cert.DNSNames[1] != "foo.foobar.com" || + cert.DNSNames[2] != "foobar.com" { + t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 1 to check:\n%s", certStr) + } + + // Valid for second possibility + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "other_sans": "1.3.6.1.4.1.311.20.2.4;UTF8:d234e@foobar.com", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.IPAddresses[0].String() != "1.2.3.4" { + t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) + } + if len(cert.DNSNames) != 3 || + cert.DNSNames[0] != "bar.foobar.com" || + cert.DNSNames[1] != "foo.foobar.com" || + cert.DNSNames[2] != "foobar.com" { + t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 2 to check:\n%s", certStr) + } + + // Valid for both + oid1, type1, val1 := "1.3.6.1.4.1.311.20.2.3", "utf8", "devops@nope.com" + oid2, type2, val2 := "1.3.6.1.4.1.311.20.2.4", "utf-8", "d234e@foobar.com" + otherNames := []string{ + fmt.Sprintf("%s;%s:%s", oid1, type1, val1), + fmt.Sprintf("%s;%s:%s", oid2, type2, val2), + } + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "other_sans": strings.Join(otherNames, ","), + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.IPAddresses[0].String() != "1.2.3.4" { + t.Fatalf("unexpected IP SAN %q", cert.IPAddresses[0].String()) + } + if len(cert.DNSNames) != 3 || + cert.DNSNames[0] != "bar.foobar.com" || + cert.DNSNames[1] != "foo.foobar.com" || + cert.DNSNames[2] != "foobar.com" { + t.Fatalf("unexpected DNS SANs %v", cert.DNSNames) + } + expectedOtherNames := []otherNameUtf8{{oid1, val1}, {oid2, val2}} + foundOtherNames, err := getOtherSANsFromX509Extensions(cert.Extensions) + if err != nil { + t.Fatal(err) + } + if diff := deep.Equal(expectedOtherNames, foundOtherNames); len(diff) != 0 { + t.Errorf("unexpected otherNames: %v", diff) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 3 to check:\n%s", certStr) + } +} + +func TestBackend_AllowedSerialNumbers(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + var err error + var resp *logical.Response + var certStr string + var block *pem.Block + var cert *x509.Certificate + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + // First test that Serial Numbers are not allowed + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "ttl": "1h", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "ttl": "1h", + "serial_number": "foobar", + }) + if err == nil { + t.Fatal("expected error") + } + + // Update the role to allow serial numbers + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "allowed_serial_numbers": "f00*,b4r*", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "ttl": "1h", + // Not a valid serial number + "serial_number": "foobar", + }) + if err == nil { + t.Fatal("expected error") + } + + // Valid for first possibility + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "serial_number": "f00bar", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.Subject.SerialNumber != "f00bar" { + t.Fatalf("unexpected Subject SerialNumber %s", cert.Subject.SerialNumber) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 1 to check:\n%s", certStr) + } + + // Valid for second possibility + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar", + "serial_number": "b4rf00", + }) + if err != nil { + t.Fatal(err) + } + certStr = resp.Data["certificate"].(string) + block, _ = pem.Decode([]byte(certStr)) + cert, err = x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + if cert.Subject.SerialNumber != "b4rf00" { + t.Fatalf("unexpected Subject SerialNumber %s", cert.Subject.SerialNumber) + } + if len(os.Getenv("VAULT_VERBOSE_PKITESTS")) > 0 { + t.Logf("certificate 2 to check:\n%s", certStr) + } +} + +func TestBackend_URI_SANs(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + var err error + + _, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allowed_domains": []string{"foobar.com", "zipzap.com"}, + "allow_bare_domains": true, + "allow_subdomains": true, + "allow_ip_sans": true, + "allowed_uri_sans": []string{"http://someuri/abc", "spiffe://host.com/*"}, + }) + if err != nil { + t.Fatal(err) + } + + // First test some bad stuff that shouldn't work + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "uri_sans": "http://www.mydomain.com/zxf", + }) + if err == nil { + t.Fatal("expected error") + } + + // Test valid single entry + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "uri_sans": "http://someuri/abc", + }) + if err != nil { + t.Fatal(err) + } + + // Test globed entry + _, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "uri_sans": "spiffe://host.com/something", + }) + if err != nil { + t.Fatal(err) + } + + // Test multiple entries + resp, err := CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "foobar.com", + "ip_sans": "1.2.3.4", + "alt_names": "foo.foobar.com,bar.foobar.com", + "ttl": "1h", + "uri_sans": "spiffe://host.com/something,http://someuri/abc", + }) + if err != nil { + t.Fatal(err) + } + + certStr := resp.Data["certificate"].(string) + block, _ := pem.Decode([]byte(certStr)) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + t.Fatal(err) + } + + URI0, _ := url.Parse("spiffe://host.com/something") + URI1, _ := url.Parse("http://someuri/abc") + + if len(cert.URIs) != 2 { + t.Fatalf("expected 2 valid URIs SANs %v", cert.URIs) + } + + if cert.URIs[0].String() != URI0.String() || cert.URIs[1].String() != URI1.String() { + t.Fatalf( + "expected URIs SANs %v to equal provided values spiffe://host.com/something, http://someuri/abc", + cert.URIs) + } +} + +func TestBackend_AllowedURISANsTemplate(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Write test policy for userpass auth method. + err := client.Sys().PutPolicy("test", ` + path "pki/*" { + capabilities = ["update"] + }`) + if err != nil { + t.Fatal(err) + } + + // Enable userpass auth method. + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + + // Configure test role for userpass. + if _, err := client.Logical().Write("auth/userpass/users/userpassname", map[string]interface{}{ + "password": "test", + "policies": "test", + }); err != nil { + t.Fatal(err) + } + + // Login userpass for test role and keep client token. + secret, err := client.Logical().Write("auth/userpass/login/userpassname", map[string]interface{}{ + "password": "test", + }) + if err != nil || secret == nil { + t.Fatal(err) + } + userpassToken := secret.Auth.ClientToken + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Mount PKI. + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Generate internal CA. + _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + // Write role PKI. + _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ + "allowed_uri_sans": []string{ + "spiffe://domain/{{identity.entity.aliases." + userpassAccessor + ".name}}", + "spiffe://domain/{{identity.entity.aliases." + userpassAccessor + ".name}}/*", "spiffe://domain/foo", + }, + "allowed_uri_sans_template": true, + "require_cn": false, + }) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with identity templating + client.SetToken(userpassToken) + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/userpassname, spiffe://domain/foo"}) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with identity templating and glob + client.SetToken(userpassToken) + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/userpassname/bar"}) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with non-matching identity template parameter + client.SetToken(userpassToken) + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/unknownuser"}) + if err == nil { + t.Fatal(err) + } + + // Set allowed_uri_sans_template to false. + _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ + "allowed_uri_sans_template": false, + }) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with userpassToken. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"uri_sans": "spiffe://domain/users/userpassname"}) + if err == nil { + t.Fatal("expected error") + } +} + +func TestBackend_AllowedDomainsTemplate(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Write test policy for userpass auth method. + err := client.Sys().PutPolicy("test", ` + path "pki/*" { + capabilities = ["update"] + }`) + if err != nil { + t.Fatal(err) + } + + // Enable userpass auth method. + if err := client.Sys().EnableAuth("userpass", "userpass", ""); err != nil { + t.Fatal(err) + } + + // Configure test role for userpass. + if _, err := client.Logical().Write("auth/userpass/users/userpassname", map[string]interface{}{ + "password": "test", + "policies": "test", + }); err != nil { + t.Fatal(err) + } + + // Login userpass for test role and set client token + userpassAuth, err := auth.NewUserpassAuth("userpassname", &auth.Password{FromString: "test"}) + if err != nil { + t.Fatal(err) + } + + // Get auth accessor for identity template. + auths, err := client.Sys().ListAuth() + if err != nil { + t.Fatal(err) + } + userpassAccessor := auths["userpass/"].Accessor + + // Mount PKI. + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Generate internal CA. + _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + // Write role PKI. + _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ + "allowed_domains": []string{ + "foobar.com", "zipzap.com", "{{identity.entity.aliases." + userpassAccessor + ".name}}", + "foo.{{identity.entity.aliases." + userpassAccessor + ".name}}.example.com", + }, + "allowed_domains_template": true, + "allow_bare_domains": true, + }) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with userpassToken. + secret, err := client.Auth().Login(context.TODO(), userpassAuth) + if err != nil { + t.Fatal(err) + } + if err != nil || secret == nil { + t.Fatal(err) + } + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "userpassname"}) + if err != nil { + t.Fatal(err) + } + + // Issue certificate for foobar.com to verify allowed_domain_template doesn't break plain domains. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "foobar.com"}) + if err != nil { + t.Fatal(err) + } + + // Issue certificate for unknown userpassname. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "unknownuserpassname"}) + if err == nil { + t.Fatal("expected error") + } + + // Issue certificate for foo.userpassname.domain. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "foo.userpassname.example.com"}) + if err != nil { + t.Fatal("expected error") + } + + // Set allowed_domains_template to false. + _, err = client.Logical().Write("pki/roles/test", map[string]interface{}{ + "allowed_domains_template": false, + }) + if err != nil { + t.Fatal(err) + } + + // Issue certificate with userpassToken. + _, err = client.Logical().Write("pki/issue/test", map[string]interface{}{"common_name": "userpassname"}) + if err == nil { + t.Fatal("expected error") + } +} + +func TestReadWriteDeleteRoles(t *testing.T) { + t.Parallel() + ctx := context.Background() + coreConfig := &vault.CoreConfig{ + CredentialBackends: map[string]logical.Factory{ + "userpass": userpass.Factory, + }, + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI. + err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().ReadWithContext(ctx, "pki/roles/test") + if err != nil { + t.Fatal(err) + } + + if resp != nil { + t.Fatalf("response should have been emtpy but was:\n%#v", resp) + } + + // Write role PKI. + _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{}) + if err != nil { + t.Fatal(err) + } + + // Read the role. + resp, err = client.Logical().ReadWithContext(ctx, "pki/roles/test") + if err != nil { + t.Fatal(err) + } + + if resp.Data == nil { + t.Fatal("default data within response was nil when it should have contained data") + } + + // Validate that we have not changed any defaults unknowingly + expectedData := map[string]interface{}{ + "key_type": "rsa", + "use_csr_sans": true, + "client_flag": true, + "allowed_serial_numbers": []interface{}{}, + "generate_lease": false, + "signature_bits": json.Number("256"), + "use_pss": false, + "allowed_domains": []interface{}{}, + "allowed_uri_sans_template": false, + "enforce_hostnames": true, + "policy_identifiers": []interface{}{}, + "require_cn": true, + "allowed_domains_template": false, + "allow_token_displayname": false, + "country": []interface{}{}, + "not_after": "", + "postal_code": []interface{}{}, + "use_csr_common_name": true, + "allow_localhost": true, + "allow_subdomains": false, + "allow_wildcard_certificates": true, + "allowed_other_sans": []interface{}{}, + "allowed_uri_sans": []interface{}{}, + "basic_constraints_valid_for_non_ca": false, + "key_usage": []interface{}{"DigitalSignature", "KeyAgreement", "KeyEncipherment"}, + "not_before_duration": json.Number("30"), + "allow_glob_domains": false, + "ttl": json.Number("0"), + "ou": []interface{}{}, + "email_protection_flag": false, + "locality": []interface{}{}, + "server_flag": true, + "allow_bare_domains": false, + "allow_ip_sans": true, + "ext_key_usage_oids": []interface{}{}, + "allow_any_name": false, + "ext_key_usage": []interface{}{}, + "key_bits": json.Number("2048"), + "max_ttl": json.Number("0"), + "no_store": false, + "organization": []interface{}{}, + "province": []interface{}{}, + "street_address": []interface{}{}, + "code_signing_flag": false, + "issuer_ref": "default", + "cn_validations": []interface{}{"email", "hostname"}, + "allowed_user_ids": []interface{}{}, + } + + if diff := deep.Equal(expectedData, resp.Data); len(diff) > 0 { + t.Fatalf("pki role default values have changed, diff: %v", diff) + } + + _, err = client.Logical().DeleteWithContext(ctx, "pki/roles/test") + if err != nil { + t.Fatal(err) + } + + resp, err = client.Logical().ReadWithContext(ctx, "pki/roles/test") + if err != nil { + t.Fatal(err) + } + + if resp != nil { + t.Fatalf("response should have been empty but was:\n%#v", resp) + } +} + +func setCerts() { + cak, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + panic(err) + } + marshaledKey, err := x509.MarshalECPrivateKey(cak) + if err != nil { + panic(err) + } + keyPEMBlock := &pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: marshaledKey, + } + ecCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + subjKeyID, err := certutil.GetSubjKeyID(cak) + if err != nil { + panic(err) + } + caCertTemplate := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "root.localhost", + }, + SubjectKeyId: subjKeyID, + DNSNames: []string{"root.localhost"}, + KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign), + SerialNumber: big.NewInt(mathrand.Int63()), + NotAfter: time.Now().Add(262980 * time.Hour), + BasicConstraintsValid: true, + IsCA: true, + } + caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, cak.Public(), cak) + if err != nil { + panic(err) + } + caCertPEMBlock := &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + ecCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) + + rak, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + panic(err) + } + marshaledKey = x509.MarshalPKCS1PrivateKey(rak) + keyPEMBlock = &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: marshaledKey, + } + rsaCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + _, err = certutil.GetSubjKeyID(rak) + if err != nil { + panic(err) + } + caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, rak.Public(), rak) + if err != nil { + panic(err) + } + caCertPEMBlock = &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + rsaCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) + + _, edk, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + panic(err) + } + marshaledKey, err = x509.MarshalPKCS8PrivateKey(edk) + if err != nil { + panic(err) + } + keyPEMBlock = &pem.Block{ + Type: "PRIVATE KEY", + Bytes: marshaledKey, + } + edCAKey = strings.TrimSpace(string(pem.EncodeToMemory(keyPEMBlock))) + if err != nil { + panic(err) + } + _, err = certutil.GetSubjKeyID(edk) + if err != nil { + panic(err) + } + caBytes, err = x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, edk.Public(), edk) + if err != nil { + panic(err) + } + caCertPEMBlock = &pem.Block{ + Type: "CERTIFICATE", + Bytes: caBytes, + } + edCACert = strings.TrimSpace(string(pem.EncodeToMemory(caCertPEMBlock))) +} + +func TestBackend_RevokePlusTidy_Intermediate(t *testing.T) { + // Use a ridiculously long time to minimize the chance + // that we have to deal with more than one interval. + // InMemSink rounds down to an interval boundary rather than + // starting one at the time of initialization. + // + // This test is not parallelizable. + inmemSink := metrics.NewInmemSink( + 1000000*time.Hour, + 2000000*time.Hour) + + metricsConf := metrics.DefaultConfig("") + metricsConf.EnableHostname = false + metricsConf.EnableHostnameLabel = false + metricsConf.EnableServiceLabel = false + metricsConf.EnableTypePrefix = false + + _, err := metrics.NewGlobal(metricsConf, inmemSink) + if err != nil { + t.Fatal(err) + } + + // Enable PKI secret engine + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + // Mount /pki as a root CA + err = client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Set up Metric Configuration, then restart to enable it + _, err = client.Logical().Write("pki/config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": true, + }) + _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ + "mounts": "pki/", + }) + + // Check the metrics initialized in order to calculate backendUUID for /pki + // BackendUUID not consistent during tests with UUID from /sys/mounts/pki + metricsSuffix := "total_certificates_stored" + backendUUID := "" + mostRecentInterval := inmemSink.Data()[len(inmemSink.Data())-1] + for _, existingGauge := range mostRecentInterval.Gauges { + if strings.HasSuffix(existingGauge.Name, metricsSuffix) { + expandedGaugeName := existingGauge.Name + backendUUID = strings.Split(expandedGaugeName, ".")[2] + break + } + } + if backendUUID == "" { + t.Fatalf("No Gauge Found ending with %s", metricsSuffix) + } + + // Set the cluster's certificate as the root CA in /pki + pemBundleRootCA := string(cluster.CACertPEM) + string(cluster.CAKeyPEM) + _, err = client.Logical().Write("pki/config/ca", map[string]interface{}{ + "pem_bundle": pemBundleRootCA, + }) + if err != nil { + t.Fatal(err) + } + + // Mount /pki2 to operate as an intermediate CA + err = client.Sys().Mount("pki2", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + if err != nil { + t.Fatal(err) + } + // Set up Metric Configuration, then restart to enable it + _, err = client.Logical().Write("pki2/config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": true, + }) + _, err = client.Logical().Write("/sys/plugins/reload/backend", map[string]interface{}{ + "mounts": "pki2/", + }) + + // Create a CSR for the intermediate CA + secret, err := client.Logical().Write("pki2/intermediate/generate/internal", nil) + if err != nil { + t.Fatal(err) + } + intermediateCSR := secret.Data["csr"].(string) + + // Sign the intermediate CSR using /pki + secret, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ + "permitted_dns_domains": ".myvault.com", + "csr": intermediateCSR, + "ttl": "10s", + }) + if err != nil { + t.Fatal(err) + } + intermediateCertSerial := secret.Data["serial_number"].(string) + intermediateCASerialColon := strings.ReplaceAll(strings.ToLower(intermediateCertSerial), ":", "-") + + // Get the intermediate cert after signing + secret, err = client.Logical().Read("pki/cert/" + intermediateCASerialColon) + if err != nil { + t.Fatal(err) + } + + if secret == nil || len(secret.Data) == 0 || len(secret.Data["certificate"].(string)) == 0 { + t.Fatal("expected certificate information from read operation") + } + + // Issue a revoke on on /pki + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": intermediateCertSerial, + }) + if err != nil { + t.Fatal(err) + } + + // Check the cert-count metrics + expectedCertCountGaugeMetrics := map[string]float32{ + "secrets.pki." + backendUUID + ".total_revoked_certificates_stored": 1, + "secrets.pki." + backendUUID + ".total_certificates_stored": 1, + } + mostRecentInterval = inmemSink.Data()[len(inmemSink.Data())-1] + for gauge, value := range expectedCertCountGaugeMetrics { + if _, ok := mostRecentInterval.Gauges[gauge]; !ok { + t.Fatalf("Expected metrics to include a value for gauge %s", gauge) + } + if value != mostRecentInterval.Gauges[gauge].Value { + t.Fatalf("Expected value metric %s to be %f but got %f", gauge, value, mostRecentInterval.Gauges[gauge].Value) + } + } + + // Revoke adds a fixed 2s buffer, so we sleep for a bit longer to ensure + // the revocation time is past the current time. + time.Sleep(3 * time.Second) + + // Issue a tidy on /pki + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + }) + if err != nil { + t.Fatal(err) + } + + // Sleep a bit to make sure we're past the safety buffer + time.Sleep(2 * time.Second) + + // Get CRL and ensure the tidied cert is still in the list after the tidy + // operation since it's not past the NotAfter (ttl) value yet. + crl := getParsedCrl(t, client, "pki") + + revokedCerts := crl.TBSCertList.RevokedCertificates + if len(revokedCerts) == 0 { + t.Fatal("expected CRL to be non-empty") + } + + sn := certutil.GetHexFormatted(revokedCerts[0].SerialNumber.Bytes(), ":") + if sn != intermediateCertSerial { + t.Fatalf("expected: %v, got: %v", intermediateCertSerial, sn) + } + + // Wait for cert to expire + time.Sleep(10 * time.Second) + + // Issue a tidy on /pki + _, err = client.Logical().Write("pki/tidy", map[string]interface{}{ + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "safety_buffer": "1s", + }) + if err != nil { + t.Fatal(err) + } + + // Sleep a bit to make sure we're past the safety buffer + time.Sleep(2 * time.Second) + + // Issue a tidy-status on /pki + { + tidyStatus, err := client.Logical().Read("pki/tidy-status") + if err != nil { + t.Fatal(err) + } + expectedData := map[string]interface{}{ + "safety_buffer": json.Number("1"), + "issuer_safety_buffer": json.Number("31536000"), + "revocation_queue_safety_buffer": json.Number("172800"), + "tidy_cert_store": true, + "tidy_revoked_certs": true, + "tidy_revoked_cert_issuer_associations": false, + "tidy_expired_issuers": false, + "tidy_move_legacy_ca_bundle": false, + "tidy_revocation_queue": false, + "tidy_cross_cluster_revoked_certs": false, + "pause_duration": "0s", + "state": "Finished", + "error": nil, + "time_started": nil, + "time_finished": nil, + "last_auto_tidy_finished": nil, + "message": nil, + "cert_store_deleted_count": json.Number("1"), + "revoked_cert_deleted_count": json.Number("1"), + "missing_issuer_cert_count": json.Number("0"), + "current_cert_store_count": json.Number("0"), + "current_revoked_cert_count": json.Number("0"), + "revocation_queue_deleted_count": json.Number("0"), + "cross_revoked_cert_deleted_count": json.Number("0"), + "internal_backend_uuid": backendUUID, + "tidy_acme": false, + "acme_account_safety_buffer": json.Number("2592000"), + "acme_orders_deleted_count": json.Number("0"), + "acme_account_revoked_count": json.Number("0"), + "acme_account_deleted_count": json.Number("0"), + "total_acme_account_count": json.Number("0"), + } + // Let's copy the times from the response so that we can use deep.Equal() + timeStarted, ok := tidyStatus.Data["time_started"] + if !ok || timeStarted == "" { + t.Fatal("Expected tidy status response to include a value for time_started") + } + expectedData["time_started"] = timeStarted + timeFinished, ok := tidyStatus.Data["time_finished"] + if !ok || timeFinished == "" { + t.Fatal("Expected tidy status response to include a value for time_finished") + } + expectedData["time_finished"] = timeFinished + expectedData["last_auto_tidy_finished"] = tidyStatus.Data["last_auto_tidy_finished"] + + if diff := deep.Equal(expectedData, tidyStatus.Data); diff != nil { + t.Fatal(diff) + } + } + // Check the tidy metrics + { + // Map of gauges to expected value + expectedGauges := map[string]float32{ + "secrets.pki.tidy.cert_store_current_entry": 0, + "secrets.pki.tidy.cert_store_total_entries": 1, + "secrets.pki.tidy.revoked_cert_current_entry": 0, + "secrets.pki.tidy.revoked_cert_total_entries": 1, + "secrets.pki.tidy.start_time_epoch": 0, + "secrets.pki." + backendUUID + ".total_certificates_stored": 0, + "secrets.pki." + backendUUID + ".total_revoked_certificates_stored": 0, + "secrets.pki.tidy.cert_store_total_entries_remaining": 0, + "secrets.pki.tidy.revoked_cert_total_entries_remaining": 0, + } + // Map of counters to the sum of the metrics for that counter + expectedCounters := map[string]float64{ + "secrets.pki.tidy.cert_store_deleted_count": 1, + "secrets.pki.tidy.revoked_cert_deleted_count": 1, + "secrets.pki.tidy.success": 2, + // Note that "secrets.pki.tidy.failure" won't be in the captured metrics + } + + // If the metrics span more than one interval, skip the checks + intervals := inmemSink.Data() + if len(intervals) == 1 { + interval := inmemSink.Data()[0] + + for gauge, value := range expectedGauges { + if _, ok := interval.Gauges[gauge]; !ok { + t.Fatalf("Expected metrics to include a value for gauge %s", gauge) + } + if value != interval.Gauges[gauge].Value { + t.Fatalf("Expected value metric %s to be %f but got %f", gauge, value, interval.Gauges[gauge].Value) + } + + } + for counter, value := range expectedCounters { + if _, ok := interval.Counters[counter]; !ok { + t.Fatalf("Expected metrics to include a value for couter %s", counter) + } + if value != interval.Counters[counter].Sum { + t.Fatalf("Expected the sum of metric %s to be %f but got %f", counter, value, interval.Counters[counter].Sum) + } + } + + tidyDuration, ok := interval.Samples["secrets.pki.tidy.duration"] + if !ok { + t.Fatal("Expected metrics to include a value for sample secrets.pki.tidy.duration") + } + if tidyDuration.Count <= 0 { + t.Fatalf("Expected metrics to have count > 0 for sample secrets.pki.tidy.duration, but got %d", tidyDuration.Count) + } + } + } + + crl = getParsedCrl(t, client, "pki") + + revokedCerts = crl.TBSCertList.RevokedCertificates + if len(revokedCerts) != 0 { + t.Fatal("expected CRL to be empty") + } +} + +func TestBackend_Root_FullCAChain(t *testing.T) { + t.Parallel() + testCases := []struct { + testName string + keyType string + }{ + {testName: "RSA", keyType: "rsa"}, + {testName: "ED25519", keyType: "ed25519"}, + {testName: "EC", keyType: "ec"}, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.testName, func(t *testing.T) { + runFullCAChainTest(t, tc.keyType) + }) + } +} + +func runFullCAChainTest(t *testing.T, keyType string) { + // Generate a root CA at /pki-root + b_root, s_root := CreateBackendWithStorage(t) + + var err error + + resp, err := CBWrite(b_root, s_root, "root/generate/exported", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": keyType, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + rootData := resp.Data + rootCert := rootData["certificate"].(string) + + // Validate that root's /cert/ca-chain now contains the certificate. + resp, err = CBRead(b_root, s_root, "cert/ca_chain") + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate chain information") + } + + fullChain := resp.Data["ca_chain"].(string) + requireCertInCaChainString(t, fullChain, rootCert, "expected root cert within root cert/ca_chain") + + // Make sure when we issue a leaf certificate we get the full chain back. + _, err = CBWrite(b_root, s_root, "roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + require.NoError(t, err, "error setting up pki root role: %v", err) + + resp, err = CBWrite(b_root, s_root, "issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err, "error issuing certificate from pki root: %v", err) + fullChainArray := resp.Data["ca_chain"].([]string) + requireCertInCaChainArray(t, fullChainArray, rootCert, "expected root cert within root issuance pki-root/issue/example") + + // Now generate an intermediate at /pki-intermediate, signed by the root. + b_int, s_int := CreateBackendWithStorage(t) + + resp, err = CBWrite(b_int, s_int, "intermediate/generate/exported", map[string]interface{}{ + "common_name": "intermediate myvault.com", + "key_type": keyType, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate CSR info") + } + intermediateData := resp.Data + intermediateKey := intermediateData["private_key"].(string) + + resp, err = CBWrite(b_root, s_root, "root/sign-intermediate", map[string]interface{}{ + "csr": intermediateData["csr"], + "format": "pem", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected signed intermediate info") + } + intermediateSignedData := resp.Data + intermediateCert := intermediateSignedData["certificate"].(string) + + rootCaCert := parseCert(t, rootCert) + intermediaryCaCert := parseCert(t, intermediateCert) + requireSignedBy(t, intermediaryCaCert, rootCaCert) + intermediateCaChain := intermediateSignedData["ca_chain"].([]string) + + require.Equal(t, parseCert(t, intermediateCaChain[0]), intermediaryCaCert, "intermediate signed cert should have been part of ca_chain") + require.Equal(t, parseCert(t, intermediateCaChain[1]), rootCaCert, "root cert should have been part of ca_chain") + + _, err = CBWrite(b_int, s_int, "intermediate/set-signed", map[string]interface{}{ + "certificate": intermediateCert + "\n" + rootCert + "\n", + }) + if err != nil { + t.Fatal(err) + } + + // Validate that intermediate's ca_chain field now includes the full + // chain. + resp, err = CBRead(b_int, s_int, "cert/ca_chain") + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate chain information") + } + + // Verify we have a proper CRL now + crl := getParsedCrlFromBackend(t, b_int, s_int, "crl") + require.Equal(t, 0, len(crl.TBSCertList.RevokedCertificates)) + + fullChain = resp.Data["ca_chain"].(string) + requireCertInCaChainString(t, fullChain, intermediateCert, "expected full chain to contain intermediate certificate from pki-intermediate/cert/ca_chain") + requireCertInCaChainString(t, fullChain, rootCert, "expected full chain to contain root certificate from pki-intermediate/cert/ca_chain") + + // Make sure when we issue a leaf certificate we get the full chain back. + _, err = CBWrite(b_int, s_int, "roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + require.NoError(t, err, "error setting up pki intermediate role: %v", err) + + resp, err = CBWrite(b_int, s_int, "issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err, "error issuing certificate from pki intermediate: %v", err) + fullChainArray = resp.Data["ca_chain"].([]string) + requireCertInCaChainArray(t, fullChainArray, intermediateCert, "expected full chain to contain intermediate certificate from pki-intermediate/issue/example") + requireCertInCaChainArray(t, fullChainArray, rootCert, "expected full chain to contain root certificate from pki-intermediate/issue/example") + + // Finally, import this signing cert chain into a new mount to ensure + // "external" CAs behave as expected. + b_ext, s_ext := CreateBackendWithStorage(t) + + _, err = CBWrite(b_ext, s_ext, "config/ca", map[string]interface{}{ + "pem_bundle": intermediateKey + "\n" + intermediateCert + "\n" + rootCert + "\n", + }) + if err != nil { + t.Fatal(err) + } + + // Validate the external chain information was loaded correctly. + resp, err = CBRead(b_ext, s_ext, "cert/ca_chain") + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected intermediate chain information") + } + + fullChain = resp.Data["ca_chain"].(string) + if strings.Count(fullChain, intermediateCert) != 1 { + t.Fatalf("expected full chain to contain intermediate certificate; got %v occurrences", strings.Count(fullChain, intermediateCert)) + } + if strings.Count(fullChain, rootCert) != 1 { + t.Fatalf("expected full chain to contain root certificate; got %v occurrences", strings.Count(fullChain, rootCert)) + } + + // Now issue a short-lived certificate from our pki-external. + _, err = CBWrite(b_ext, s_ext, "roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + resp, err = CBWrite(b_ext, s_ext, "issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + issueCrtAsPem := resp.Data["certificate"].(string) + issuedCrt := parseCert(t, issueCrtAsPem) + + // Verify that the certificates are signed by the intermediary CA key... + requireSignedBy(t, issuedCrt, intermediaryCaCert) + + // Test that we can request that the root ca certificate not appear in the ca_chain field + resp, err = CBWrite(b_ext, s_ext, "issue/example", map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + "remove_roots_from_chain": "true", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing certificate when removing self signed") + fullChain = strings.Join(resp.Data["ca_chain"].([]string), "\n") + if strings.Count(fullChain, intermediateCert) != 1 { + t.Fatalf("expected full chain to contain intermediate certificate; got %v occurrences", strings.Count(fullChain, intermediateCert)) + } + if strings.Count(fullChain, rootCert) != 0 { + t.Fatalf("expected full chain to NOT contain root certificate; got %v occurrences", strings.Count(fullChain, rootCert)) + } +} + +func requireCertInCaChainArray(t *testing.T, chain []string, cert string, msgAndArgs ...interface{}) { + var fullChain string + for _, caCert := range chain { + fullChain = fullChain + "\n" + caCert + } + + requireCertInCaChainString(t, fullChain, cert, msgAndArgs) +} + +func requireCertInCaChainString(t *testing.T, chain string, cert string, msgAndArgs ...interface{}) { + count := strings.Count(chain, cert) + if count != 1 { + failMsg := fmt.Sprintf("Found %d occurrances of the cert in the provided chain", count) + require.FailNow(t, failMsg, msgAndArgs...) + } +} + +type MultiBool int + +const ( + MFalse MultiBool = iota + MTrue MultiBool = iota + MAny MultiBool = iota +) + +func (o MultiBool) ToValues() []bool { + if o == MTrue { + return []bool{true} + } + + if o == MFalse { + return []bool{false} + } + + if o == MAny { + return []bool{true, false} + } + + return []bool{} +} + +type IssuanceRegression struct { + AllowedDomains []string + AllowBareDomains MultiBool + AllowGlobDomains MultiBool + AllowSubdomains MultiBool + AllowLocalhost MultiBool + AllowWildcardCertificates MultiBool + CNValidations []string + CommonName string + Issued bool +} + +func RoleIssuanceRegressionHelper(t *testing.T, b *backend, s logical.Storage, index int, test IssuanceRegression) int { + tested := 0 + for _, AllowBareDomains := range test.AllowBareDomains.ToValues() { + for _, AllowGlobDomains := range test.AllowGlobDomains.ToValues() { + for _, AllowSubdomains := range test.AllowSubdomains.ToValues() { + for _, AllowLocalhost := range test.AllowLocalhost.ToValues() { + for _, AllowWildcardCertificates := range test.AllowWildcardCertificates.ToValues() { + role := fmt.Sprintf("issuance-regression-%d-bare-%v-glob-%v-subdomains-%v-localhost-%v-wildcard-%v", index, AllowBareDomains, AllowGlobDomains, AllowSubdomains, AllowLocalhost, AllowWildcardCertificates) + _, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ + "allowed_domains": test.AllowedDomains, + "allow_bare_domains": AllowBareDomains, + "allow_glob_domains": AllowGlobDomains, + "allow_subdomains": AllowSubdomains, + "allow_localhost": AllowLocalhost, + "allow_wildcard_certificates": AllowWildcardCertificates, + "cn_validations": test.CNValidations, + // TODO: test across this vector as well. Currently certain wildcard + // matching is broken with it enabled (such as x*x.foo). + "enforce_hostnames": false, + "key_type": "ec", + "key_bits": 256, + "no_store": true, + // With the CN Validations field, ensure we prevent CN from appearing + // in SANs. + }) + if err != nil { + t.Fatal(err) + } + + resp, err := CBWrite(b, s, "issue/"+role, map[string]interface{}{ + "common_name": test.CommonName, + "exclude_cn_from_sans": true, + }) + + haveErr := err != nil || resp == nil + expectErr := !test.Issued + + if haveErr != expectErr { + t.Fatalf("issuance regression test [%d] failed: haveErr: %v, expectErr: %v, err: %v, resp: %v, test case: %v, role: %v", index, haveErr, expectErr, err, resp, test, role) + } + + tested += 1 + } + } + } + } + } + + return tested +} + +func TestBackend_Roles_IssuanceRegression(t *testing.T) { + t.Parallel() + // Regression testing of role's issuance policy. + testCases := []IssuanceRegression{ + // allowed, bare, glob, subdomains, localhost, wildcards, cn, issued + + // === Globs not allowed but used === // + // Allowed contains globs, but globbing not allowed, resulting in all + // issuances failing. Note that tests against issuing a wildcard with + // a bare domain will be covered later. + /* 0 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "baz.fud.bar.foo", false}, + /* 1 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.fud.bar.foo", false}, + /* 2 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "fud.bar.foo", false}, + /* 3 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.bar.foo", false}, + /* 4 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "bar.foo", false}, + /* 5 */ {[]string{"*.*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.foo", false}, + /* 6 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "foo", false}, + /* 7 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "baz.fud.bar.foo", false}, + /* 8 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.fud.bar.foo", false}, + /* 9 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "fud.bar.foo", false}, + /* 10 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "*.bar.foo", false}, + /* 11 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "bar.foo", false}, + /* 12 */ {[]string{"*.foo"}, MAny, MFalse, MAny, MAny, MAny, nil, "foo", false}, + + // === Localhost sanity === // + // Localhost forbidden, not matching allowed domains -> not issued + /* 13 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MFalse, MAny, nil, "localhost", false}, + // Localhost allowed, not matching allowed domains -> issued + /* 14 */ {[]string{"*.*.foo"}, MAny, MAny, MAny, MTrue, MAny, nil, "localhost", true}, + // Localhost allowed via allowed domains (and bare allowed), not by AllowLocalhost -> issued + /* 15 */ {[]string{"localhost"}, MTrue, MAny, MAny, MFalse, MAny, nil, "localhost", true}, + // Localhost allowed via allowed domains (and bare not allowed), not by AllowLocalhost -> not issued + /* 16 */ {[]string{"localhost"}, MFalse, MAny, MAny, MFalse, MAny, nil, "localhost", false}, + // Localhost allowed via allowed domains (but bare not allowed), and by AllowLocalhost -> issued + /* 17 */ {[]string{"localhost"}, MFalse, MAny, MAny, MTrue, MAny, nil, "localhost", true}, + + // === Bare wildcard issuance == // + // allowed_domains contains one or more wildcards and bare domains allowed, + // resulting in the cert being issued. + /* 18 */ {[]string{"*.foo"}, MTrue, MAny, MAny, MAny, MTrue, nil, "*.foo", true}, + /* 19 */ {[]string{"*.*.foo"}, MTrue, MAny, MAny, MAny, MAny, nil, "*.*.foo", false}, // Does not conform to RFC 6125 + + // === Double Leading Glob Testing === // + // Allowed contains globs, but glob allowed so certain matches work. + // The value of bare and localhost does not impact these results. + /* 20 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 21 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains + /* 22 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "fud.bar.foo", true}, + /* 23 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.bar.foo", true}, // Regression fix: Vault#13530 + /* 24 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "bar.foo", false}, + /* 25 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "*.foo", false}, + /* 26 */ {[]string{"*.*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "foo", false}, + + // Allowed contains globs, but glob and subdomain both work, so we expect + // wildcard issuance to work as well. The value of bare and localhost does + // not impact these results. + /* 27 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, + /* 28 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, + /* 29 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, + /* 30 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, // Regression fix: Vault#13530 + /* 31 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "bar.foo", false}, + /* 32 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "*.foo", false}, + /* 33 */ {[]string{"*.*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "foo", false}, + + // === Single Leading Glob Testing === // + // Allowed contains globs, but glob allowed so certain matches work. + // The value of bare and localhost does not impact these results. + /* 34 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 35 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.fud.bar.foo", true}, // glob domain allows wildcard of subdomains + /* 36 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "fud.bar.foo", true}, // glob domains allow infinite subdomains + /* 37 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MTrue, nil, "*.bar.foo", true}, // glob domain allows wildcards of subdomains + /* 38 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "bar.foo", true}, + /* 39 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MAny, nil, "foo", false}, + + // Allowed contains globs, but glob and subdomain both work, so we expect + // wildcard issuance to work as well. The value of bare and localhost does + // not impact these results. + /* 40 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, + /* 41 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, + /* 42 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, + /* 43 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, + /* 44 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "bar.foo", true}, + /* 45 */ {[]string{"*.foo"}, MAny, MTrue, MTrue, MAny, MAny, nil, "foo", false}, + + // === Only base domain name === // + // Allowed contains only domain components, but subdomains not allowed. This + // results in most issuances failing unless we allow bare domains, in which + // case only the final issuance for "foo" will succeed. + /* 46 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "baz.fud.bar.foo", false}, + /* 47 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.fud.bar.foo", false}, + /* 48 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "fud.bar.foo", false}, + /* 49 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.bar.foo", false}, + /* 50 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "bar.foo", false}, + /* 51 */ {[]string{"foo"}, MAny, MAny, MFalse, MAny, MAny, nil, "*.foo", false}, + /* 52 */ {[]string{"foo"}, MFalse, MAny, MFalse, MAny, MAny, nil, "foo", false}, + /* 53 */ {[]string{"foo"}, MTrue, MAny, MFalse, MAny, MAny, nil, "foo", true}, + + // Allowed contains only domain components, and subdomains are now allowed. + // This results in most issuances succeeding, with the exception of the + // base foo, which is still governed by base's value. + /* 54 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "baz.fud.bar.foo", true}, + /* 55 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.fud.bar.foo", true}, + /* 56 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "fud.bar.foo", true}, + /* 57 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.bar.foo", true}, + /* 58 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MAny, nil, "bar.foo", true}, + /* 59 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*.foo", true}, + /* 60 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "x*x.foo", true}, // internal wildcards should be allowed per RFC 6125/6.4.3 + /* 61 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "*x.foo", true}, // prefix wildcards should be allowed per RFC 6125/6.4.3 + /* 62 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MTrue, nil, "x*.foo", true}, // suffix wildcards should be allowed per RFC 6125/6.4.3 + /* 63 */ {[]string{"foo"}, MFalse, MAny, MTrue, MAny, MAny, nil, "foo", false}, + /* 64 */ {[]string{"foo"}, MTrue, MAny, MTrue, MAny, MAny, nil, "foo", true}, + + // === Internal Glob Matching === // + // Basic glob matching requirements + /* 65 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xerox.foo", true}, + /* 66 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xylophone.files.pyrex.foo", true}, // globs can match across subdomains + /* 67 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xercex.bar.foo", false}, // x.foo isn't matched + /* 68 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "bar.foo", false}, // x*x isn't matched. + /* 69 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.foo", false}, // unrelated wildcard + /* 70 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x*x.foo", false}, // Does not conform to RFC 6125 + /* 71 */ {[]string{"x*x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.xyx.foo", false}, // Globs and Subdomains do not layer per docs. + + // Various requirements around x*x.foo wildcard matching. + /* 72 */ {[]string{"x*x.foo"}, MFalse, MFalse, MAny, MAny, MAny, nil, "x*x.foo", false}, // base disabled, shouldn't match wildcard + /* 73 */ {[]string{"x*x.foo"}, MFalse, MTrue, MAny, MAny, MTrue, nil, "x*x.foo", true}, // base disallowed, but globbing allowed and should match + /* 74 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MTrue, nil, "x*x.foo", true}, // base allowed, should match wildcard + + // Basic glob matching requirements with internal dots. + /* 75 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xerox.foo", false}, // missing dots + /* 76 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "x.ero.x.foo", true}, + /* 77 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xylophone.files.pyrex.foo", false}, // missing dots + /* 78 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "x.ylophone.files.pyre.x.foo", true}, // globs can match across subdomains + /* 79 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "xercex.bar.foo", false}, // x.foo isn't matched + /* 80 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "bar.foo", false}, // x.*.x isn't matched. + /* 81 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.foo", false}, // unrelated wildcard + /* 82 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x.*.x.foo", false}, // Does not conform to RFC 6125 + /* 83 */ {[]string{"x.*.x.foo"}, MAny, MTrue, MAny, MAny, MAny, nil, "*.x.y.x.foo", false}, // Globs and Subdomains do not layer per docs. + + // === Wildcard restriction testing === // + /* 84 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, nil, "*.fud.bar.foo", false}, // glob domain allows wildcard of subdomains + /* 85 */ {[]string{"*.foo"}, MAny, MTrue, MFalse, MAny, MFalse, nil, "*.bar.foo", false}, // glob domain allows wildcards of subdomains + /* 86 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.fud.bar.foo", false}, + /* 87 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.bar.foo", false}, + /* 88 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*.foo", false}, + /* 89 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "x*x.foo", false}, + /* 90 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "*x.foo", false}, + /* 91 */ {[]string{"foo"}, MAny, MAny, MTrue, MAny, MFalse, nil, "x*.foo", false}, + /* 92 */ {[]string{"x*x.foo"}, MTrue, MAny, MAny, MAny, MFalse, nil, "x*x.foo", false}, + /* 93 */ {[]string{"*.foo"}, MFalse, MFalse, MAny, MAny, MAny, nil, "*.foo", false}, // Bare and globs forbidden despite (potentially) allowing wildcards. + /* 94 */ {[]string{"x.*.x.foo"}, MAny, MAny, MAny, MAny, MAny, nil, "x.*.x.foo", false}, // Does not conform to RFC 6125 + + // === CN validation allowances === // + /* 95 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.fud.bar.foo", true}, + /* 96 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.fud.*.foo", true}, + /* 97 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "*.bar.*.bar", true}, + /* 98 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "foo@foo", true}, + /* 99 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "foo@foo@foo", true}, + /* 100 */ {[]string{"foo"}, MAny, MAny, MAny, MAny, MAny, []string{"disabled"}, "bar@bar@bar", true}, + /* 101 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@bar@bar", false}, + /* 102 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@bar", false}, + /* 103 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar@foo", true}, + /* 104 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar@foo", false}, + /* 105 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar@bar", false}, + /* 106 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar.foo", true}, + /* 107 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"hostname"}, "bar.bar", false}, + /* 108 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar.foo", false}, + /* 109 */ {[]string{"foo"}, MTrue, MTrue, MTrue, MTrue, MTrue, []string{"email"}, "bar.bar", false}, + } + + if len(testCases) != 110 { + t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) + } + + b, s := CreateBackendWithStorage(t) + + // We need a RSA key so all signature sizes are valid with it. + resp, err := CBWrite(b, s, "root/generate/exported", map[string]interface{}{ + "common_name": "myvault.com", + "ttl": "128h", + "key_type": "rsa", + "key_bits": 2048, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + tested := 0 + for index, test := range testCases { + tested += RoleIssuanceRegressionHelper(t, b, s, index, test) + } + + t.Logf("Issuance regression expanded matrix test scenarios: %d", tested) +} + +type KeySizeRegression struct { + // Values reused for both Role and CA configuration. + RoleKeyType string + RoleKeyBits []int + + // Signature Bits presently is only specified on the role. + RoleSignatureBits []int + RoleUsePSS bool + + // These are tuples; must be of the same length. + TestKeyTypes []string + TestKeyBits []int + + // All of the above key types/sizes must pass or fail together. + ExpectError bool +} + +func (k KeySizeRegression) KeyTypeValues() []string { + if k.RoleKeyType == "any" { + return []string{"rsa", "ec", "ed25519"} + } + + return []string{k.RoleKeyType} +} + +func RoleKeySizeRegressionHelper(t *testing.T, b *backend, s logical.Storage, index int, test KeySizeRegression) int { + tested := 0 + + for _, caKeyType := range test.KeyTypeValues() { + for _, caKeyBits := range test.RoleKeyBits { + // Generate a new CA key. + resp, err := CBWrite(b, s, "root/generate/exported", map[string]interface{}{ + "common_name": "myvault.com", + "ttl": "128h", + "key_type": caKeyType, + "key_bits": caKeyBits, + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + for _, roleKeyBits := range test.RoleKeyBits { + for _, roleSignatureBits := range test.RoleSignatureBits { + role := fmt.Sprintf("key-size-regression-%d-keytype-%v-keybits-%d-signature-bits-%d", index, test.RoleKeyType, roleKeyBits, roleSignatureBits) + _, err := CBWrite(b, s, "roles/"+role, map[string]interface{}{ + "key_type": test.RoleKeyType, + "key_bits": roleKeyBits, + "signature_bits": roleSignatureBits, + "use_pss": test.RoleUsePSS, + }) + if err != nil { + t.Fatal(err) + } + + for index, keyType := range test.TestKeyTypes { + keyBits := test.TestKeyBits[index] + + _, _, csrPem := generateCSR(t, &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "localhost", + }, + }, keyType, keyBits) + + resp, err = CBWrite(b, s, "sign/"+role, map[string]interface{}{ + "common_name": "localhost", + "csr": csrPem, + }) + + haveErr := err != nil || resp == nil + + if haveErr != test.ExpectError { + t.Fatalf("key size regression test [%d] failed: haveErr: %v, expectErr: %v, err: %v, resp: %v, test case: %v, caKeyType: %v, caKeyBits: %v, role: %v, keyType: %v, keyBits: %v", index, haveErr, test.ExpectError, err, resp, test, caKeyType, caKeyBits, role, keyType, keyBits) + } + + if resp != nil && test.RoleUsePSS && caKeyType == "rsa" { + leafCert := parseCert(t, resp.Data["certificate"].(string)) + switch leafCert.SignatureAlgorithm { + case x509.SHA256WithRSAPSS, x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS: + default: + t.Fatalf("key size regression test [%d] failed on role %v: unexpected signature algorithm; expected RSA-type CA to sign a leaf cert with PSS algorithm; got %v", index, role, leafCert.SignatureAlgorithm.String()) + } + } + + tested += 1 + } + } + } + + _, err = CBDelete(b, s, "root") + if err != nil { + t.Fatal(err) + } + } + } + + return tested +} + +func TestBackend_Roles_KeySizeRegression(t *testing.T) { + t.Parallel() + // Regression testing of role's issuance policy. + testCases := []KeySizeRegression{ + // RSA with default parameters should fail to issue smaller RSA keys + // and any size ECDSA/Ed25519 keys. + /* 0 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, false, []string{"rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{1024, 224, 256, 384, 521, 0}, true}, + // But it should work to issue larger RSA keys. + /* 1 */ {"rsa", []int{0, 2048}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa"}, []int{2048, 3072}, false}, + + // EC with default parameters should fail to issue smaller EC keys + // and any size RSA/Ed25519 keys. + /* 2 */ {"ec", []int{0}, []int{0}, false, []string{"rsa", "ec", "ed25519"}, []int{2048, 224, 0}, true}, + // But it should work to issue larger EC keys. Note that we should be + // independent of signature bits as that's computed from the issuer + // type (for EC based issuers). + /* 3 */ {"ec", []int{224}, []int{0, 256, 384, 521}, false, []string{"ec", "ec", "ec", "ec"}, []int{224, 256, 384, 521}, false}, + /* 4 */ {"ec", []int{0, 256}, []int{0, 256, 384, 521}, false, []string{"ec", "ec", "ec"}, []int{256, 384, 521}, false}, + /* 5 */ {"ec", []int{384}, []int{0, 256, 384, 521}, false, []string{"ec", "ec"}, []int{384, 521}, false}, + /* 6 */ {"ec", []int{521}, []int{0, 256, 384, 512}, false, []string{"ec"}, []int{521}, false}, + + // Ed25519 should reject RSA and EC keys. + /* 7 */ {"ed25519", []int{0}, []int{0}, false, []string{"rsa", "ec", "ec"}, []int{2048, 256, 521}, true}, + // But it should work to issue Ed25519 keys. + /* 8 */ {"ed25519", []int{0}, []int{0}, false, []string{"ed25519"}, []int{0}, false}, + + // Any key type should reject insecure RSA key sizes. + /* 9 */ {"any", []int{0}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa"}, []int{512, 1024}, true}, + // But work for everything else. + /* 10 */ {"any", []int{0}, []int{0, 256, 384, 512}, false, []string{"rsa", "rsa", "ec", "ec", "ec", "ec", "ed25519"}, []int{2048, 3072, 224, 256, 384, 521, 0}, false}, + + // RSA with larger than default key size should reject smaller ones. + /* 11 */ {"rsa", []int{3072}, []int{0, 256, 384, 512}, false, []string{"rsa"}, []int{2048}, true}, + + // We should be able to sign with PSS with any CA key type. + /* 12 */ {"rsa", []int{0}, []int{0, 256, 384, 512}, true, []string{"rsa"}, []int{2048}, false}, + /* 13 */ {"ec", []int{0}, []int{0}, true, []string{"ec"}, []int{256}, false}, + /* 14 */ {"ed25519", []int{0}, []int{0}, true, []string{"ed25519"}, []int{0}, false}, + } + + if len(testCases) != 15 { + t.Fatalf("misnumbered test case entries will make it hard to find bugs: %v", len(testCases)) + } + + b, s := CreateBackendWithStorage(t) + + tested := 0 + for index, test := range testCases { + tested += RoleKeySizeRegressionHelper(t, b, s, index, test) + } + + t.Logf("Key size regression expanded matrix test scenarios: %d", tested) +} + +func TestRootWithExistingKey(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + var err error + + // Fail requests if type is existing, and we specify the key_type param + _, err = CBWrite(b, s, "root/generate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") + + // Fail requests if type is existing, and we specify the key_bits param + _, err = CBWrite(b, s, "root/generate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_bits": "2048", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") + + // Fail if the specified key does not exist. + _, err = CBWrite(b, s, "issuers/generate/root/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer1", + "key_ref": "my-key1", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "unable to find PKI key for reference: my-key1") + + // Fail if the specified key name is default. + _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer1", + "key_name": "Default", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "reserved keyword 'default' can not be used as key name") + + // Fail if the specified issuer name is default. + _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "DEFAULT", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "reserved keyword 'default' can not be used as issuer name") + + // Create the first CA + resp, err := CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + "issuer_name": "my-issuer1", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/root/internal"), logical.UpdateOperation), resp, true) + require.NoError(t, err) + require.NotNil(t, resp.Data["certificate"]) + myIssuerId1 := resp.Data["issuer_id"] + myKeyId1 := resp.Data["key_id"] + require.NotEmpty(t, myIssuerId1) + require.NotEmpty(t, myKeyId1) + + // Fetch the parsed CRL; it should be empty as we've not revoked anything + parsedCrl := getParsedCrlFromBackend(t, b, s, "issuer/my-issuer1/crl/der") + require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") + + // Fail if the specified issuer name is re-used. + _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer1", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "issuer name already in use") + + // Create the second CA + resp, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + "issuer_name": "my-issuer2", + "key_name": "root-key2", + }) + require.NoError(t, err) + require.NotNil(t, resp.Data["certificate"]) + myIssuerId2 := resp.Data["issuer_id"] + myKeyId2 := resp.Data["key_id"] + require.NotEmpty(t, myIssuerId2) + require.NotEmpty(t, myKeyId2) + + // Fetch the parsed CRL; it should be empty as we've not revoked anything + parsedCrl = getParsedCrlFromBackend(t, b, s, "issuer/my-issuer2/crl/der") + require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") + + // Fail if the specified key name is re-used. + _, err = CBWrite(b, s, "issuers/generate/root/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer3", + "key_name": "root-key2", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key name already in use") + + // Create a third CA re-using key from CA 1 + resp, err = CBWrite(b, s, "issuers/generate/root/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "issuer_name": "my-issuer3", + "key_ref": myKeyId1, + }) + require.NoError(t, err) + require.NotNil(t, resp.Data["certificate"]) + myIssuerId3 := resp.Data["issuer_id"] + myKeyId3 := resp.Data["key_id"] + require.NotEmpty(t, myIssuerId3) + require.NotEmpty(t, myKeyId3) + + // Fetch the parsed CRL; it should be empty as we've not revoking anything. + parsedCrl = getParsedCrlFromBackend(t, b, s, "issuer/my-issuer3/crl/der") + require.Equal(t, len(parsedCrl.TBSCertList.RevokedCertificates), 0, "should have no revoked certificates") + // Signatures should be the same since this is just a reissued cert. We + // use signature as a proxy for "these two CRLs are equal". + firstCrl := getParsedCrlFromBackend(t, b, s, "issuer/my-issuer1/crl/der") + require.Equal(t, parsedCrl.SignatureValue, firstCrl.SignatureValue) + + require.NotEqual(t, myIssuerId1, myIssuerId2) + require.NotEqual(t, myIssuerId1, myIssuerId3) + require.NotEqual(t, myKeyId1, myKeyId2) + require.Equal(t, myKeyId1, myKeyId3) + + resp, err = CBList(b, s, "issuers") + require.NoError(t, err) + require.Equal(t, 3, len(resp.Data["keys"].([]string))) + require.Contains(t, resp.Data["keys"], string(myIssuerId1.(issuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId2.(issuerID))) + require.Contains(t, resp.Data["keys"], string(myIssuerId3.(issuerID))) +} + +func TestIntermediateWithExistingKey(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + var err error + + // Fail requests if type is existing, and we specify the key_type param + _, err = CBWrite(b, s, "intermediate/generate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") + + // Fail requests if type is existing, and we specify the key_bits param + _, err = CBWrite(b, s, "intermediate/generate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_bits": "2048", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "key_type nor key_bits arguments can be set in this mode") + + // Fail if the specified key does not exist. + _, err = CBWrite(b, s, "issuers/generate/intermediate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_ref": "my-key1", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "unable to find PKI key for reference: my-key1") + + // Create the first intermediate CA + resp, err := CBWrite(b, s, "issuers/generate/intermediate/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/generate/intermediate/internal"), logical.UpdateOperation), resp, true) + require.NoError(t, err) + // csr1 := resp.Data["csr"] + myKeyId1 := resp.Data["key_id"] + require.NotEmpty(t, myKeyId1) + + // Create the second intermediate CA + resp, err = CBWrite(b, s, "issuers/generate/intermediate/internal", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "rsa", + "key_name": "interkey1", + }) + require.NoError(t, err) + // csr2 := resp.Data["csr"] + myKeyId2 := resp.Data["key_id"] + require.NotEmpty(t, myKeyId2) + + // Create a third intermediate CA re-using key from intermediate CA 1 + resp, err = CBWrite(b, s, "issuers/generate/intermediate/existing", map[string]interface{}{ + "common_name": "root myvault.com", + "key_ref": myKeyId1, + }) + require.NoError(t, err) + // csr3 := resp.Data["csr"] + myKeyId3 := resp.Data["key_id"] + require.NotEmpty(t, myKeyId3) + + require.NotEqual(t, myKeyId1, myKeyId2) + require.Equal(t, myKeyId1, myKeyId3, "our new ca did not seem to reuse the key as we expected.") +} + +func TestIssuanceTTLs(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "ttl": "10s", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootCert := parseCert(t, resp.Data["certificate"].(string)) + + _, err = CBWrite(b, s, "roles/local-testing", map[string]interface{}{ + "allow_any_name": true, + "enforce_hostnames": false, + "key_type": "ec", + }) + require.NoError(t, err) + + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + "ttl": "1s", + }) + require.NoError(t, err, "expected issuance to succeed due to shorter ttl than cert ttl") + + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") + + resp, err = CBPatch(b, s, "issuer/root", map[string]interface{}{ + "leaf_not_after_behavior": "permit", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "permit") + + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.NoError(t, err, "expected issuance to succeed due to permitted longer TTL") + + resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ + "issuer_name": "root", + "leaf_not_after_behavior": "truncate", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "truncate") + + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + }) + require.NoError(t, err, "expected issuance to succeed due to truncated ttl") + + // Sleep until the parent cert expires and the clock rolls over + // to the next second. + time.Sleep(time.Until(rootCert.NotAfter) + (1500 * time.Millisecond)) + + resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ + "issuer_name": "root", + "leaf_not_after_behavior": "err", + }) + require.NoError(t, err) + require.NotNil(t, resp) + + // Even 1s ttl should now fail. + _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ + "common_name": "testing", + "ttl": "1s", + }) + require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") +} + +func TestSealWrappedStorageConfigured(t *testing.T) { + t.Parallel() + b, _ := CreateBackendWithStorage(t) + wrappedEntries := b.Backend.PathsSpecial.SealWrapStorage + + // Make sure our legacy bundle is within the list + // NOTE: do not convert these test values to constants, we should always have these paths within seal wrap config + require.Contains(t, wrappedEntries, "config/ca_bundle", "Legacy bundle missing from seal wrap") + // The trailing / is important as it treats the entire folder requiring seal wrapping, not just config/key + require.Contains(t, wrappedEntries, "config/key/", "key prefix with trailing / missing from seal wrap.") +} + +func TestBackend_ConfigCA_WithECParams(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Generated key with OpenSSL: + // $ openssl ecparam -out p256.key -name prime256v1 -genkey + // + // Regression test for https://github.com/hashicorp/vault/issues/16667 + resp, err := CBWrite(b, s, "config/ca", map[string]interface{}{ + "pem_bundle": ` +-----BEGIN EC PARAMETERS----- +BggqhkjOPQMBBw== +-----END EC PARAMETERS----- +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEINzXthCZdhyV7+wIEBl/ty+ctNsUS99ykTeax6EbYZtvoAoGCCqGSM49 +AwEHoUQDQgAE57NX8bR/nDoW8yRgLswoXBQcjHrdyfuHS0gPwki6BNnfunUzryVb +8f22/JWj6fsEF6AOADZlrswKIbR2Es9e/w== +-----END EC PRIVATE KEY----- + `, + }) + require.NoError(t, err) + require.NotNil(t, resp, "expected ca info") + importedKeys := resp.Data["imported_keys"].([]string) + importedIssuers := resp.Data["imported_issuers"].([]string) + + require.Equal(t, len(importedKeys), 1) + require.Equal(t, len(importedIssuers), 0) +} + +func TestPerIssuerAIA(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Generating a root without anything should not have AIAs. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "root example.com", + "issuer_name": "root", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + rootCert := parseCert(t, resp.Data["certificate"].(string)) + require.Empty(t, rootCert.OCSPServer) + require.Empty(t, rootCert.IssuingCertificateURL) + require.Empty(t, rootCert.CRLDistributionPoints) + + // Set some local URLs on the issuer. + resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{ + "issuing_certificates": []string{"https://google.com"}, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuer/default"), logical.UpdateOperation), resp, true) + + require.NoError(t, err) + + _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": true, + "ttl": "85s", + "key_type": "ec", + }) + require.NoError(t, err) + + // Issue something with this re-configured issuer. + resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ + "common_name": "localhost.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + leafCert := parseCert(t, resp.Data["certificate"].(string)) + require.Empty(t, leafCert.OCSPServer) + require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://google.com"}) + require.Empty(t, leafCert.CRLDistributionPoints) + + // Set global URLs and ensure they don't appear on this issuer's leaf. + _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ + "issuing_certificates": []string{"https://example.com/ca", "https://backup.example.com/ca"}, + "crl_distribution_points": []string{"https://example.com/crl", "https://backup.example.com/crl"}, + "ocsp_servers": []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}, + }) + require.NoError(t, err) + resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ + "common_name": "localhost.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + leafCert = parseCert(t, resp.Data["certificate"].(string)) + require.Empty(t, leafCert.OCSPServer) + require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://google.com"}) + require.Empty(t, leafCert.CRLDistributionPoints) + + // Now come back and remove the local modifications and ensure we get + // the defaults again. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuing_certificates": []string{}, + }) + require.NoError(t, err) + resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ + "common_name": "localhost.com", + }) + require.NoError(t, err) + require.NotNil(t, resp) + leafCert = parseCert(t, resp.Data["certificate"].(string)) + require.Equal(t, leafCert.IssuingCertificateURL, []string{"https://example.com/ca", "https://backup.example.com/ca"}) + require.Equal(t, leafCert.OCSPServer, []string{"https://example.com/ocsp", "https://backup.example.com/ocsp"}) + require.Equal(t, leafCert.CRLDistributionPoints, []string{"https://example.com/crl", "https://backup.example.com/crl"}) + + // Validate that we can set an issuer name and remove it. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuer_name": "my-issuer", + }) + require.NoError(t, err) + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuer_name": "", + }) + require.NoError(t, err) +} + +func TestIssuersWithoutCRLBits(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Importing a root without CRL signing bits should work fine. + customBundleWithoutCRLBits := ` +-----BEGIN CERTIFICATE----- +MIIDGTCCAgGgAwIBAgIBATANBgkqhkiG9w0BAQsFADATMREwDwYDVQQDDAhyb290 +LW5ldzAeFw0yMjA4MjQxMjEzNTVaFw0yMzA5MDMxMjEzNTVaMBMxETAPBgNVBAMM +CHJvb3QtbmV3MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAojTA/Mx7 +LVW/Zgn/N4BqZbaF82MrTIBFug3ob7mqycNRlWp4/PH8v37+jYn8e691HUsKjden +rDTrO06kiQKiJinAzmlLJvgcazE3aXoh7wSzVG9lFHYvljEmVj+yDbkeaqaCktup +skuNjxCoN9BLmKzZIwVCHn92ZHlhN6LI7CNaU3SDJdu7VftWF9Ugzt9FIvI+6Gcn +/WNE9FWvZ9o7035rZ+1vvTn7/tgxrj2k3XvD51Kq4tsSbqjnSf3QieXT6E6uvtUE +TbPp3xjBElgBCKmeogR1l28rs1aujqqwzZ0B/zOeF8ptaH0aZOIBsVDJR8yTwHzq +s34hNdNfKLHzOwIDAQABo3gwdjAdBgNVHQ4EFgQUF4djNmx+1+uJINhZ82pN+7jz +H8EwHwYDVR0jBBgwFoAUF4djNmx+1+uJINhZ82pN+7jzH8EwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAoQwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZI +hvcNAQELBQADggEBAICQovBz4KLWlLmXeZ2Vf6WfQYyGNgGyJa10XNXtWQ5dM2NU +OLAit4x1c2dz+aFocc8ZsX/ikYi/bruT2rsGWqMAGC4at3U4GuaYGO5a6XzMKIDC +nxIlbiO+Pn6Xum7fAqUri7+ZNf/Cygmc5sByi3MAAIkszeObUDZFTJL7gEOuXIMT +rKIXCINq/U+qc7m9AQ8vKhF1Ddj+dLGLzNQ5j3cKfilPs/wRaYqbMQvnmarX+5Cs +k1UL6kWSQsiP3+UWaBlcWkmD6oZ3fIG7c0aMxf7RISq1eTAM9XjH3vMxWQJlS5q3 +2weJ2LYoPe/DwX5CijR0IezapBCrin1BscJMLFQ= +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCiNMD8zHstVb9m +Cf83gGpltoXzYytMgEW6DehvuarJw1GVanj88fy/fv6Nifx7r3UdSwqN16esNOs7 +TqSJAqImKcDOaUsm+BxrMTdpeiHvBLNUb2UUdi+WMSZWP7INuR5qpoKS26myS42P +EKg30EuYrNkjBUIef3ZkeWE3osjsI1pTdIMl27tV+1YX1SDO30Ui8j7oZyf9Y0T0 +Va9n2jvTfmtn7W+9Ofv+2DGuPaTde8PnUqri2xJuqOdJ/dCJ5dPoTq6+1QRNs+nf +GMESWAEIqZ6iBHWXbyuzVq6OqrDNnQH/M54Xym1ofRpk4gGxUMlHzJPAfOqzfiE1 +018osfM7AgMBAAECggEAAVd6kZZaN69IZITIc1vHRYa2rlZpKS2JP7c8Vd3Z/4Fz +ZZvnJ7LgVAmUYg5WPZ2sOqBNLfKVN/oke5Q0dALgdxYl7dWQIhPjHeRFbZFtjqEV +OXZGBniamMO/HSKGWGrqFf7BM/H7AhClUwQgjnzVSz+B+LJJidM+SVys3n1xuDmC +EP+iOda+bAHqHv/7oCELQKhLmCvPc9v2fDy+180ttdo8EHuxwVnKiyR/ryKFhSyx +K1wgAPQ9jO+V+GESL90rqpX/r501REsIOOpm4orueelHTD4+dnHxvUPqJ++9aYGX +79qBNPPUhxrQI1yoHxwW0cTxW5EqkZ9bT2lSd5rjcQKBgQDNyPBpidkHPrYemQDT +RldtS6FiW/jc1It/CRbjU4A6Gi7s3Cda43pEUObKNLeXMyLQaMf4GbDPDX+eh7B8 +RkUq0Q/N0H4bn1hbxYSUdgv0j/6czpMo6rLcJHGwOTSpHGsNsxSLL7xlpgzuzqrG +FzEgjMA1aD3w8B9+/77AoSLoMQKBgQDJyYMw82+euLYRbR5Wc/SbrWfh2n1Mr2BG +pp1ZNYorXE5CL4ScdLcgH1q/b8r5XGwmhMcpeA+geAAaKmk1CGG+gPLoq20c9Q1Y +Ykq9tUVJasIkelvbb/SPxyjkJdBwylzcPP14IJBsqQM0be+yVqLJJVHSaoKhXZcl +IW2xgCpjKwKBgFpeX5U5P+F6nKebMU2WmlYY3GpBUWxIummzKCX0SV86mFjT5UR4 +mPzfOjqaI/V2M1eqbAZ74bVLjDumAs7QXReMb5BGetrOgxLqDmrT3DQt9/YMkXtq +ddlO984XkRSisjB18BOfhvBsl0lX4I7VKHHO3amWeX0RNgOjc7VMDfRBAoGAWAQH +r1BfvZHACLXZ58fISCdJCqCsysgsbGS8eW77B5LJp+DmLQBT6DUE9j+i/0Wq/ton +rRTrbAkrsj4RicpQKDJCwe4UN+9DlOu6wijRQgbJC/Q7IOoieJxcX7eGxcve2UnZ +HY7GsD7AYRwa02UquCYJHIjM1enmxZFhMW1AD+UCgYEAm4jdNz5e4QjA4AkNF+cB +ZenrAZ0q3NbTyiSsJEAtRe/c5fNFpmXo3mqgCannarREQYYDF0+jpSoTUY8XAc4q +wL7EZNzwxITLqBnnHQbdLdAvYxB43kvWTy+JRK8qY9LAMCCFeDoYwXkWV4Wkx/b0 +TgM7RZnmEjNdeaa4M52o7VY= +-----END PRIVATE KEY----- + ` + resp, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": customBundleWithoutCRLBits, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("issuers/import/bundle"), logical.UpdateOperation), resp, true) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["imported_issuers"]) + require.NotEmpty(t, resp.Data["imported_keys"]) + require.NotEmpty(t, resp.Data["mapping"]) + + // Shouldn't have crl-signing on the newly imported issuer's usage. + resp, err = CBRead(b, s, "issuer/default") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["usage"]) + require.NotContains(t, resp.Data["usage"], "crl-signing") + + // Modifying to set CRL should fail. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "usage": "issuing-certificates,crl-signing", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // Modifying to set issuing-certificates and ocsp-signing should succeed. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "usage": "issuing-certificates,ocsp-signing", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotEmpty(t, resp.Data) + require.NotEmpty(t, resp.Data["usage"]) + require.NotContains(t, resp.Data["usage"], "crl-signing") +} + +func TestBackend_IfModifiedSinceHeaders(t *testing.T) { + t.Parallel() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + RequestResponseCallback: schema.ResponseValidatingCallback(t), + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + + // Mount PKI. + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + // Required to allow the header to be passed through. + PassthroughRequestHeaders: []string{"if-modified-since"}, + AllowedResponseHeaders: []string{"Last-Modified"}, + }, + }) + require.NoError(t, err) + + // Get a time before CA generation. Subtract two seconds to ensure + // the value in the seconds field is different than the time the CA + // is actually generated at. + beforeOldCAGeneration := time.Now().Add(-2 * time.Second) + + // Generate an internal CA. This one is the default. + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root X1", + "key_type": "ec", + "issuer_name": "old-root", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + + // CA is generated, but give a grace window. + afterOldCAGeneration := time.Now().Add(2 * time.Second) + + // When you _save_ headers, client returns a copy. But when you go to + // reset them, it doesn't create a new copy (and instead directly + // assigns). This means we have to continually refresh our view of the + // last headers, otherwise the headers added after the last set operation + // leak into this copy... Yuck! + lastHeaders := client.Headers() + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/old-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Reading the CA should work, without a header. + resp, err := client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", beforeOldCAGeneration.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + // Ensure that the CA is elided if we give it the present time (plus a + // grace window). + client.AddHeader("If-Modified-Since", afterOldCAGeneration.Format(time.RFC1123)) + t.Logf("headers: %v", client.Headers()) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // Wait three seconds. This ensures we have adequate grace period + // to distinguish the two cases, even with grace periods. + time.Sleep(3 * time.Second) + + // Generating a second root. This one isn't the default. + beforeNewCAGeneration := time.Now().Add(-2 * time.Second) + + // Generate an internal CA. This one is the default. + _, err = client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root X1", + "key_type": "ec", + "issuer_name": "new-root", + }) + require.NoError(t, err) + + // As above. + afterNewCAGeneration := time.Now().Add(2 * time.Second) + + // New root isn't the default, so it has fewer paths. + for _, path := range []string{"pki/issuer/new-root/json", "pki/issuer/new-root/crl", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Reading the CA should work, without a header. + resp, err := client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", beforeNewCAGeneration.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + // Ensure that the CA is elided if we give it the present time (plus a + // grace window). + client.AddHeader("If-Modified-Since", afterNewCAGeneration.Format(time.RFC1123)) + t.Logf("headers: %v", client.Headers()) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // Wait three seconds. This ensures we have adequate grace period + // to distinguish the two cases, even with grace periods. + time.Sleep(3 * time.Second) + + // Now swap the default issuers around. + _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ + "default": "new-root", + }) + require.NoError(t, err) + + // Reading both with the last modified date should return new values. + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", afterOldCAGeneration.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", afterNewCAGeneration.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // Wait for things to settle, record the present time, and wait for the + // clock to definitely tick over again. + time.Sleep(2 * time.Second) + preRevocationTimestamp := time.Now() + time.Sleep(2 * time.Second) + + // The above tests should say everything is cached. + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + + // Ensure that the CA is returned correctly if we give it the new time. + client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // We could generate some leaves and verify the revocation updates the + // CRL. But, revoking the issuer behaves the same, so let's do that + // instead. + _, err = client.Logical().Write("pki/issuer/old-root/revoke", map[string]interface{}{}) + require.NoError(t, err) + + // CA should still be valid. + for _, path := range []string{"pki/cert/ca", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json"} { + t.Logf("path: %v", path) + + // Ensure that the CA is returned correctly if we give it the old time. + client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // CRL should be invalidated + for _, path := range []string{"pki/cert/crl", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + client.AddHeader("If-Modified-Since", preRevocationTimestamp.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // If we send some time in the future, everything should be cached again! + futureTime := time.Now().Add(30 * time.Second) + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + + // Ensure that the CA is returned correctly if we give it the new time. + client.AddHeader("If-Modified-Since", futureTime.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + beforeThreeWaySwap := time.Now().Add(-2 * time.Second) + + // Now, do a three-way swap of names (old->tmp; new->old; tmp->new). This + // should result in all names/CRLs being invalidated. + _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/old-root", map[string]interface{}{ + "issuer_name": "tmp-root", + }) + require.NoError(t, err) + _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/new-root", map[string]interface{}{ + "issuer_name": "old-root", + }) + require.NoError(t, err) + _, err = client.Logical().JSONMergePatch(ctx, "pki/issuer/tmp-root", map[string]interface{}{ + "issuer_name": "new-root", + }) + require.NoError(t, err) + + afterThreeWaySwap := time.Now().Add(2 * time.Second) + + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl", "pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Ensure that the CA is returned if we give it the pre-update time. + client.AddHeader("If-Modified-Since", beforeThreeWaySwap.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + // Ensure that the CA is elided correctly if we give it the after time. + client.AddHeader("If-Modified-Since", afterThreeWaySwap.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + + // Finally, rebuild the delta CRL and ensure that only that is + // invalidated. We first need to enable it though, and wait for + // all CRLs to rebuild. + _, err = client.Logical().Write("pki/config/crl", map[string]interface{}{ + "auto_rebuild": true, + "enable_delta": true, + }) + require.NoError(t, err) + time.Sleep(4 * time.Second) + beforeDeltaRotation := time.Now().Add(-2 * time.Second) + + resp, err = client.Logical().Read("pki/crl/rotate-delta") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["success"], true) + + afterDeltaRotation := time.Now().Add(2 * time.Second) + + for _, path := range []string{"pki/cert/ca", "pki/cert/crl", "pki/issuer/default/json", "pki/issuer/old-root/json", "pki/issuer/new-root/json", "pki/issuer/old-root/crl", "pki/issuer/new-root/crl"} { + t.Logf("path: %v", path) + + for _, when := range []time.Time{beforeDeltaRotation, afterDeltaRotation} { + client.AddHeader("If-Modified-Since", when.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } + } + + for _, path := range []string{"pki/cert/delta-crl", "pki/issuer/old-root/crl/delta", "pki/issuer/new-root/crl/delta"} { + t.Logf("path: %v", path) + field := "certificate" + if strings.HasPrefix(path, "pki/issuer") && strings.Contains(path, "/crl") { + field = "crl" + } + + // Ensure that the CRL is present if we give it the pre-update time. + client.AddHeader("If-Modified-Since", beforeDeltaRotation.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data[field]) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + + client.AddHeader("If-Modified-Since", afterDeltaRotation.Format(time.RFC1123)) + resp, err = client.Logical().Read(path) + require.NoError(t, err) + require.Nil(t, resp) + client.SetHeaders(lastHeaders) + lastHeaders = client.Headers() + } +} + +func TestBackend_InitializeCertificateCounts(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + ctx := context.Background() + + // Set up an Issuer and Role + // We need a root certificate to write/revoke certificates with + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + if resp == nil { + t.Fatal("expected ca info") + } + + // Create a role + _, err = CBWrite(b, s, "roles/example", map[string]interface{}{ + "allowed_domains": "myvault.com", + "allow_bare_domains": true, + "allow_subdomains": true, + "max_ttl": "2h", + }) + if err != nil { + t.Fatal(err) + } + + // Put certificates A, B, C, D, E in backend + var certificates []string = []string{"a", "b", "c", "d", "e"} + serials := make([]string, 5) + for i, cn := range certificates { + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + "common_name": cn + ".myvault.com", + }) + if err != nil { + t.Fatal(err) + } + serials[i] = resp.Data["serial_number"].(string) + } + + // Turn on certificate counting: + CBWrite(b, s, "config/auto-tidy", map[string]interface{}{ + "maintain_stored_certificate_counts": true, + "publish_stored_certificate_count_metrics": false, + }) + // Assert initialize from clean is correct: + b.initializeStoredCertificateCounts(ctx) + + // Revoke certificates A + B + revocations := serials[0:2] + for _, key := range revocations { + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": key, + }) + if err != nil { + t.Fatal(err) + } + } + + if b.certCount.Load() != 6 { + t.Fatalf("Failed to count six certificates root,A,B,C,D,E, instead counted %d certs", b.certCount.Load()) + } + if b.revokedCertCount.Load() != 2 { + t.Fatalf("Failed to count two revoked certificates A+B, instead counted %d certs", b.revokedCertCount.Load()) + } + + // Simulates listing while initialize in progress, by "restarting it" + b.certCount.Store(0) + b.revokedCertCount.Store(0) + b.certsCounted.Store(false) + + // Revoke certificates C, D + dirtyRevocations := serials[2:4] + for _, key := range dirtyRevocations { + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{ + "serial_number": key, + }) + if err != nil { + t.Fatal(err) + } + } + + // Put certificates F, G in the backend + dirtyCertificates := []string{"f", "g"} + for _, cn := range dirtyCertificates { + resp, err = CBWrite(b, s, "issue/example", map[string]interface{}{ + "common_name": cn + ".myvault.com", + }) + if err != nil { + t.Fatal(err) + } + } + + // Run initialize + b.initializeStoredCertificateCounts(ctx) + + // Test certificate count + if b.certCount.Load() != 8 { + t.Fatalf("Failed to initialize count of certificates root, A,B,C,D,E,F,G counted %d certs", b.certCount.Load()) + } + + if b.revokedCertCount.Load() != 4 { + t.Fatalf("Failed to count revoked certificates A,B,C,D counted %d certs", b.revokedCertCount.Load()) + } + + return +} + +// Verify that our default values are consistent when creating an issuer and when we do an +// empty POST update to it. This will hopefully identify if we have different default values +// for fields across the two APIs. +func TestBackend_VerifyIssuerUpdateDefaultsMatchCreation(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "myvault.com", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading default issuer") + preUpdateValues := resp.Data + + // This field gets reset during issuer update to the empty string + // (meaning Go will auto-detect the rev-sig-algo). + preUpdateValues["revocation_signature_algorithm"] = "" + + resp, err = CBWrite(b, s, "issuer/default", map[string]interface{}{}) + requireSuccessNonNilResponse(t, resp, err, "failed updating default issuer with no values") + + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading default issuer") + postUpdateValues := resp.Data + + require.Equal(t, preUpdateValues, postUpdateValues, + "A value was updated based on the empty update of an issuer, "+ + "most likely we have a different set of field parameters across create and update of issuers.") +} + +func TestBackend_VerifyPSSKeysIssuersFailImport(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // PKCS8 parsing fails on this key due to rsaPSS OID + rsaOIDKey := ` +-----BEGIN PRIVATE KEY----- +MIIEugIBADALBgkqhkiG9w0BAQoEggSmMIIEogIBAAKCAQEAtN0/NPuJHLuyEdBr +tUikXoXOV741XZcNvLAIVBIqDA0ege2gXt9A15FGUI4X3u6kT16Fl6MRdtUZ/qNS +Vs15nK9A1PI/AVekMgTVFTnoCzs550CKN8iRk9Om+lwHimpyXxKkFW69v8fsXwKE +Bsz69jjT7HV9VZQ7fQhmE79brAMuwKP1fUQKdHq5OBKtQ7Cl3Gmipp0izCsVuQIE +kBHvT3UUgyaSp2n+FONpOiyuBoYUH5tVEv9sZzBqSsrYBJYF+GvfnFy9AcTdqRe2 +VX2SjjWjDF84T30OBA798gIFIPwu9R4OjWOlPeh2bo2kGeo3AITjwFZ28m7kS7kc +OtvHpwIDAQABAoIBAFQxmjbj0RQbG+3HBBzD0CBgUYnu9ZC3vKFVoMriGci6YrVB +FSKU8u5mpkDhpKMWnE6GRdItCvgyg4NSLAZUaIRT4O5ARqwtTDYsobTb2/U+gNnx +5WXKbFpQcK6jIK+ClfNEDjYb8yDPxG0GEsfHrBvqoFy25L1t37N4sWwH7HjJyZIe +Hbqx4NVDur9qgqaUwkfSeufn4ycHqFtkzKNzCUarDkST9cxE6/1AKfhl09PPuMEa +lAY2JLiEplQL5sh9cxG5FObJbutJo5EIhR2OdM0VcPf0MTD9LXKRoGR3SNlG7IlS +llJzBjlh4J1ByMX32btKMHzEvlhyrMI90E1SEGECgYEAx1yDQWe4/b1MBqCxA3d0 +20dDmUHSRQFhkd/Mzkl5dPzRkG42W3ryNbMKdeuL0ZgK9AhfaLCjcj1i+44O7dHb +qBTVwfRrer2uoQVCqqJ6z8PGxPJJxTaqh9QuJxkoQ0i43ZNPcjc2M2sWLn+lkkdE +MaGMiyrmjIQEC6tmgCtZ1VUCgYEA6D9xoT9VuAnQjDvW2tO5N2U2H/8ZyRd1pC3z +H1CzjwShhxsP4YOUaVdw59K95JL4SMxSmpRrhthlW3cRaiT/exBcXLEvz0Qu0OhW +a6155ZFjK3UaLDKlwvmtuoAsuAFqX084LO0B1oxvUJESgyPncQ36fv2lZGV7A66z +Uo+BKQsCgYB2yGBMMAjA5nDN4iCV+C7gF+3m+pjWFKSVzcqxfoWndptGeuRYTUDT +TgIFkHqWPwkHrZVrQxOflYPMbi/m8wr1crSKA5+mWi4aMpAuKvERqYxc/B+IKbIh +jAKTuSGMNWAwZP0JCGx65mso+VUleuDe0Wpz4PPM9TuT2GQSKcI0oQKBgHAHcouC +npmo+lU65DgoWzaydrpWdpy+2Tt6AsW/Su4ZIMWoMy/oJaXuzQK2cG0ay/NpxArW +v0uLhNDrDZZzBF3blYIM4nALhr205UMJqjwntnuXACoDwFvdzoShIXEdFa+l6gYZ +yYIxudxWLmTd491wDb5GIgrcvMsY8V1I5dfjAoGAM9g2LtdqgPgK33dCDtZpBm8m +y4ri9PqHxnpps9WJ1dO6MW/YbW+a7vbsmNczdJ6XNLEfy2NWho1dw3xe7ztFVDjF +cWNUzs1+/6aFsi41UX7EFn3zAFhQUPxT59hXspuWuKbRAWc5fMnxbCfI/Cr8wTLJ +E/0kiZ4swUMyI4tYSbM= +-----END PRIVATE KEY----- +` + _, err := CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rsaOIDKey, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key") + + _, err = CBWrite(b, s, "keys/import", map[string]interface{}{ + "key": rsaOIDKey, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key") + + // Importing a cert with rsaPSS OID should also fail + rsaOIDCert := ` +-----BEGIN CERTIFICATE----- +MIIDfjCCAjGgAwIBAgIBATBCBgkqhkiG9w0BAQowNaAPMA0GCWCGSAFlAwQCAQUA +oRwwGgYJKoZIhvcNAQEIMA0GCWCGSAFlAwQCAQUAogQCAgDeMBMxETAPBgNVBAMM +CHJvb3Qtb2xkMB4XDTIyMDkxNjE0MDEwM1oXDTIzMDkyNjE0MDEwM1owEzERMA8G +A1UEAwwIcm9vdC1vbGQwggEgMAsGCSqGSIb3DQEBCgOCAQ8AMIIBCgKCAQEAtN0/ +NPuJHLuyEdBrtUikXoXOV741XZcNvLAIVBIqDA0ege2gXt9A15FGUI4X3u6kT16F +l6MRdtUZ/qNSVs15nK9A1PI/AVekMgTVFTnoCzs550CKN8iRk9Om+lwHimpyXxKk +FW69v8fsXwKEBsz69jjT7HV9VZQ7fQhmE79brAMuwKP1fUQKdHq5OBKtQ7Cl3Gmi +pp0izCsVuQIEkBHvT3UUgyaSp2n+FONpOiyuBoYUH5tVEv9sZzBqSsrYBJYF+Gvf +nFy9AcTdqRe2VX2SjjWjDF84T30OBA798gIFIPwu9R4OjWOlPeh2bo2kGeo3AITj +wFZ28m7kS7kcOtvHpwIDAQABo3UwczAdBgNVHQ4EFgQUVGkTAUJ8inxIVGBlfxf4 +cDhRSnowHwYDVR0jBBgwFoAUVGkTAUJ8inxIVGBlfxf4cDhRSnowDAYDVR0TBAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAYYwEwYDVR0lBAwwCgYIKwYBBQUHAwEwQgYJKoZI +hvcNAQEKMDWgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglghkgB +ZQMEAgEFAKIEAgIA3gOCAQEAQZ3iQ3NjvS4FYJ5WG41huZI0dkvNFNan+ZYWlYHJ +MIQhbFogb/UQB0rlsuldG0+HF1RDXoYNuThfzt5hiBWYEtMBNurezvnOn4DF0hrl +Uk3sBVnvTalVXg+UVjqh9hBGB75JYJl6a5Oa2Zrq++4qGNwjd0FqgnoXzqS5UGuB +TJL8nlnXPuOIK3VHoXEy7l9GtvEzKcys0xa7g1PYpaJ5D2kpbBJmuQGmU6CDcbP+ +m0hI4QDfVfHtnBp2VMCvhj0yzowtwF4BFIhv4EXZBU10mzxVj0zyKKft9++X8auH +nebuK22ZwzbPe4NhOvAdfNDElkrrtGvTnzkDB7ezPYjelA== +-----END CERTIFICATE----- +` + _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rsaOIDCert, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID cert") + + _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rsaOIDKey + "\n" + rsaOIDCert, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID key+cert") + + _, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": rsaOIDCert + "\n" + rsaOIDKey, + }) + require.Error(t, err, "expected error importing PKCS8 rsaPSS OID cert+key") + + // After all these errors, we should have zero issuers and keys. + resp, err := CBList(b, s, "issuers") + require.NoError(t, err) + require.Equal(t, nil, resp.Data["keys"]) + + resp, err = CBList(b, s, "keys") + require.NoError(t, err) + require.Equal(t, nil, resp.Data["keys"]) + + // If we create a new PSS root, we should be able to issue an intermediate + // under it. + resp, err = CBWrite(b, s, "root/generate/exported", map[string]interface{}{ + "use_pss": "true", + "common_name": "root x1 - pss", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["private_key"]) + + resp, err = CBWrite(b, s, "intermediate/generate/exported", map[string]interface{}{ + "use_pss": "true", + "common_name": "int x1 - pss", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["csr"]) + require.NotEmpty(t, resp.Data["private_key"]) + + resp, err = CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ + "use_pss": "true", + "common_name": "int x1 - pss", + "csr": resp.Data["csr"].(string), + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + + resp, err = CBWrite(b, s, "issuers/import/bundle", map[string]interface{}{ + "pem_bundle": resp.Data["certificate"].(string), + }) + require.NoError(t, err) + + // Finally, if we were to take an rsaPSS OID'd CSR and use it against this + // mount, it will fail. + _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": true, + "ttl": "85s", + "key_type": "any", + }) + require.NoError(t, err) + + // Issuing a leaf from a CSR with rsaPSS OID should fail... + rsaOIDCSR := `-----BEGIN CERTIFICATE REQUEST----- +MIICkTCCAUQCAQAwGTEXMBUGA1UEAwwOcmFuY2hlci5teS5vcmcwggEgMAsGCSqG +SIb3DQEBCgOCAQ8AMIIBCgKCAQEAtzHuGEUK55lXI08yp9DXoye9yCZbkJZO+Hej +1TWGEkbX4hzauRJeNp2+wn8xU5y8ITjWSIXEVDHeezosLCSy0Y2QT7/V45zWPUYY +ld0oUnPiwsb9CPFlBRFnX3dO9SS5MONIrNCJGKXmLdF3lgSl8zPT6J/hWM+JBjHO +hBzK6L8IYwmcEujrQfnOnOztzgMEBJtWG8rnI8roz1adpczTddDKGymh2QevjhlL +X9CLeYSSQZInOMsgaDYl98Hn00K5x0CBp8ADzzXtaPSQ9nsnihN8VvZ/wHw6YbBS +BSHa6OD+MrYnw3Sao6/YgBRNT2glIX85uro4ARW9zGB9/748dwIDAQABoAAwQgYJ +KoZIhvcNAQEKMDWgDzANBglghkgBZQMEAgEFAKEcMBoGCSqGSIb3DQEBCDANBglg +hkgBZQMEAgEFAKIEAgIA3gOCAQEARGAa0HiwzWCpvAdLOVc4/srEyOYFZPLbtv+Y +ezZIaUBNaWhOvkunqpa48avmcbGlji7r6fxJ5sT28lHt7ODWcJfn1XPAnqesXErm +EBuOIhCv6WiwVyGeTVynuHYkHyw3rIL/zU7N8+zIFV2G2M1UAv5D/eyh/74cr9Of ++nvm9jAbkHix8UwOBCFY2LLNl6bXvbIeJEdDOEtA9UmDXs8QGBg4lngyqcE2Z7rz ++5N/x4guMk2FqblbFGiCc5fLB0Gp6lFFOqhX9Q8nLJ6HteV42xGJUUtsFpppNCRm +82dGIH2PTbXZ0k7iAAwLaPjzOv1v58Wq90o35d4iEsOfJ8v98Q== +-----END CERTIFICATE REQUEST-----` + + _, err = CBWrite(b, s, "issuer/default/sign/testing", map[string]interface{}{ + "common_name": "example.com", + "csr": rsaOIDCSR, + }) + require.Error(t, err) + + _, err = CBWrite(b, s, "issuer/default/sign-verbatim", map[string]interface{}{ + "common_name": "example.com", + "use_pss": true, + "csr": rsaOIDCSR, + }) + require.Error(t, err) + + _, err = CBWrite(b, s, "issuer/default/sign-intermediate", map[string]interface{}{ + "common_name": "faulty x1 - pss", + "use_pss": true, + "csr": rsaOIDCSR, + }) + require.Error(t, err) + + // Vault has a weird API for signing self-signed certificates. Ensure + // that doesn't accept rsaPSS OID'd certificates either. + _, err = CBWrite(b, s, "issuer/default/sign-self-issued", map[string]interface{}{ + "use_pss": true, + "certificate": rsaOIDCert, + }) + require.Error(t, err) + + // Issuing a regular leaf should succeed. + _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": true, + "ttl": "85s", + "key_type": "rsa", + "use_pss": "true", + }) + require.NoError(t, err) + + resp, err = CBWrite(b, s, "issuer/default/issue/testing", map[string]interface{}{ + "common_name": "example.com", + "use_pss": "true", + }) + requireSuccessNonNilResponse(t, resp, err, "failed to issue PSS leaf") +} + +func TestPKI_EmptyCRLConfigUpgraded(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Write an empty CRLConfig into storage. + crlConfigEntry, err := logical.StorageEntryJSON("config/crl", &crlConfig{}) + require.NoError(t, err) + err = s.Put(ctx, crlConfigEntry) + require.NoError(t, err) + + resp, err := CBRead(b, s, "config/crl") + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["expiry"], defaultCrlConfig.Expiry) + require.Equal(t, resp.Data["disable"], defaultCrlConfig.Disable) + require.Equal(t, resp.Data["ocsp_disable"], defaultCrlConfig.OcspDisable) + require.Equal(t, resp.Data["auto_rebuild"], defaultCrlConfig.AutoRebuild) + require.Equal(t, resp.Data["auto_rebuild_grace_period"], defaultCrlConfig.AutoRebuildGracePeriod) + require.Equal(t, resp.Data["enable_delta"], defaultCrlConfig.EnableDelta) + require.Equal(t, resp.Data["delta_rebuild_interval"], defaultCrlConfig.DeltaRebuildInterval) +} + +func TestPKI_ListRevokedCerts(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Test empty cluster + resp, err := CBList(b, s, "certs/revoked") + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("certs/revoked"), logical.ListOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err, "failed listing empty cluster") + require.Empty(t, resp.Data, "response map contained data that we did not expect") + + // Set up a mount that we can revoke under (We will create 3 leaf certs, 2 of which will be revoked) + resp, err = CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "test.com", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "error generating root CA") + requireFieldsSetInResp(t, resp, "serial_number") + issuerSerial := resp.Data["serial_number"] + + resp, err = CBWrite(b, s, "roles/test", map[string]interface{}{ + "allowed_domains": "test.com", + "allow_subdomains": "true", + "max_ttl": "1h", + }) + requireSuccessNonNilResponse(t, resp, err, "error setting up pki role") + + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "test1.test.com", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing cert 1") + requireFieldsSetInResp(t, resp, "serial_number") + serial1 := resp.Data["serial_number"] + + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "test2.test.com", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing cert 2") + requireFieldsSetInResp(t, resp, "serial_number") + serial2 := resp.Data["serial_number"] + + resp, err = CBWrite(b, s, "issue/test", map[string]interface{}{ + "common_name": "test3.test.com", + }) + requireSuccessNonNilResponse(t, resp, err, "error issuing cert 2") + requireFieldsSetInResp(t, resp, "serial_number") + serial3 := resp.Data["serial_number"] + + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{"serial_number": serial1}) + requireSuccessNonNilResponse(t, resp, err, "error revoking cert 1") + + resp, err = CBWrite(b, s, "revoke", map[string]interface{}{"serial_number": serial2}) + requireSuccessNonNilResponse(t, resp, err, "error revoking cert 2") + + // Test that we get back the expected revoked serial numbers. + resp, err = CBList(b, s, "certs/revoked") + requireSuccessNonNilResponse(t, resp, err, "failed listing revoked certs") + requireFieldsSetInResp(t, resp, "keys") + revokedKeys := resp.Data["keys"].([]string) + + require.Contains(t, revokedKeys, serial1) + require.Contains(t, revokedKeys, serial2) + require.Equal(t, 2, len(revokedKeys), "Expected 2 revoked entries got %d: %v", len(revokedKeys), revokedKeys) + + // Test that listing our certs returns a different response + resp, err = CBList(b, s, "certs") + requireSuccessNonNilResponse(t, resp, err, "failed listing written certs") + requireFieldsSetInResp(t, resp, "keys") + certKeys := resp.Data["keys"].([]string) + + require.Contains(t, certKeys, serial1) + require.Contains(t, certKeys, serial2) + require.Contains(t, certKeys, serial3) + require.Contains(t, certKeys, issuerSerial) + require.Equal(t, 4, len(certKeys), "Expected 4 cert entries got %d: %v", len(certKeys), certKeys) +} + +func TestPKI_TemplatedAIAs(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // Setting templated AIAs should succeed. + resp, err := CBWrite(b, s, "config/cluster", map[string]interface{}{ + "path": "http://localhost:8200/v1/pki", + "aia_path": "http://localhost:8200/cdn/pki", + }) + require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.UpdateOperation), resp, true) + + resp, err = CBRead(b, s, "config/cluster") + require.NoError(t, err) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("config/cluster"), logical.ReadOperation), resp, true) + + aiaData := map[string]interface{}{ + "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, + } + _, err = CBWrite(b, s, "config/urls", aiaData) + require.NoError(t, err) + + // Root generation should succeed, but without AIA info. + rootData := map[string]interface{}{ + "common_name": "Long-Lived Root X1", + "issuer_name": "long-root-x1", + "key_type": "ec", + } + resp, err = CBWrite(b, s, "root/generate/internal", rootData) + require.NoError(t, err) + _, err = CBDelete(b, s, "root") + require.NoError(t, err) + + // Clearing the config and regenerating the root should still succeed. + _, err = CBWrite(b, s, "config/urls", map[string]interface{}{ + "crl_distribution_points": "{{cluster_path}}/issuer/my-root-id/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/my-root-id/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, + }) + require.NoError(t, err) + resp, err = CBWrite(b, s, "root/generate/internal", rootData) + requireSuccessNonNilResponse(t, resp, err) + issuerId := string(resp.Data["issuer_id"].(issuerID)) + + // Now write the original AIA config and sign a leaf. + _, err = CBWrite(b, s, "config/urls", aiaData) + require.NoError(t, err) + _, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allow_any_name": "true", + "key_type": "ec", + "ttl": "50m", + }) + require.NoError(t, err) + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "example.com", + }) + requireSuccessNonNilResponse(t, resp, err) + + // Validate the AIA info is correctly templated. + cert := parseCert(t, resp.Data["certificate"].(string)) + require.Equal(t, cert.OCSPServer, []string{"http://localhost:8200/v1/pki/ocsp"}) + require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost:8200/cdn/pki/issuer/" + issuerId + "/der"}) + require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost:8200/v1/pki/issuer/" + issuerId + "/crl/der"}) + + // Modify our issuer to set custom AIAs: these URLs are bad. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "enable_aia_url_templating": "false", + "crl_distribution_points": "a", + "issuing_certificates": "b", + "ocsp_servers": "c", + }) + require.Error(t, err) + + // These URLs are good. + _, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "enable_aia_url_templating": "false", + "crl_distribution_points": "http://localhost/a", + "issuing_certificates": "http://localhost/b", + "ocsp_servers": "http://localhost/c", + }) + + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "example.com", + }) + requireSuccessNonNilResponse(t, resp, err) + + // Validate the AIA info is correctly templated. + cert = parseCert(t, resp.Data["certificate"].(string)) + require.Equal(t, cert.OCSPServer, []string{"http://localhost/c"}) + require.Equal(t, cert.IssuingCertificateURL, []string{"http://localhost/b"}) + require.Equal(t, cert.CRLDistributionPoints, []string{"http://localhost/a"}) + + // These URLs are bad, but will fail at issuance time due to AIA templating. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "enable_aia_url_templating": "true", + "crl_distribution_points": "a", + "issuing_certificates": "b", + "ocsp_servers": "c", + }) + requireSuccessNonNilResponse(t, resp, err) + require.NotEmpty(t, resp.Warnings) + _, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "example.com", + }) + require.Error(t, err) +} + +func requireSubjectUserIDAttr(t *testing.T, cert string, target string) { + xCert := parseCert(t, cert) + + for _, attr := range xCert.Subject.Names { + var userID string + if attr.Type.Equal(certutil.SubjectPilotUserIDAttributeOID) { + if target == "" { + t.Fatalf("expected no UserID (OID: %v) subject attributes in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, cert) + } + + switch aValue := attr.Value.(type) { + case string: + userID = aValue + case []byte: + userID = string(aValue) + default: + t.Fatalf("unknown type for UserID attribute: %v\nCert: %v", attr, cert) + } + + if userID == target { + return + } + } + } + + if target != "" { + t.Fatalf("failed to find UserID (OID: %v) matching %v in cert:\n%v", certutil.SubjectPilotUserIDAttributeOID, target, cert) + } +} + +func TestUserIDsInLeafCerts(t *testing.T) { + t.Parallel() + b, s := CreateBackendWithStorage(t) + + // 1. Setup root issuer. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Vault Root CA", + "key_type": "ec", + "ttl": "7200h", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + + // 2. Allow no user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs should work. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with user ID should fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // 3. Allow any user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "*", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with one user ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with two user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // 4. Allow one specific user ID. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "humanoid", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with non-approved user ID should fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // - Issue cert with one approved and one non-approved should also fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // 5. Allow two specific user IDs. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "humanoid,robot", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with one approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with other user ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // - Issue cert with unknown user ID will fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot2", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // - Issue cert with both should succeed. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid,robot", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "robot") + + // 6. Use a glob. + resp, err = CBWrite(b, s, "roles/testing", map[string]interface{}{ + "allowed_user_ids": "human*", + "key_type": "ec", + "use_csr_sans": true, // setup for further testing. + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up role") + + // - Issue cert without user IDs. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "") + + // - Issue cert with approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "humanoid", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Issue cert with another approved ID. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "human", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human") + + // - Issue cert with literal glob. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "human*", + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "human*") + + // - Still no robotic certs are allowed; will fail. + resp, err = CBWrite(b, s, "issue/testing", map[string]interface{}{ + "common_name": "localhost", + "user_ids": "robot", + }) + require.Error(t, err) + require.True(t, resp.IsError()) + + // Create a CSR and validate it works with both sign/ and sign-verbatim. + csrTemplate := x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "localhost", + ExtraNames: []pkix.AttributeTypeAndValue{ + { + Type: certutil.SubjectPilotUserIDAttributeOID, + Value: "humanoid", + }, + }, + }, + } + _, _, csrPem := generateCSR(t, &csrTemplate, "ec", 256) + + // Should work with role-based signing. + resp, err = CBWrite(b, s, "sign/testing", map[string]interface{}{ + "csr": csrPem, + }) + schema.ValidateResponse(t, schema.GetResponseSchema(t, b.Route("sign/testing"), logical.UpdateOperation), resp, true) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") + + // - Definitely will work with sign-verbatim. + resp, err = CBWrite(b, s, "sign-verbatim", map[string]interface{}{ + "csr": csrPem, + }) + requireSuccessNonNilResponse(t, resp, err, "failed issuing leaf cert") + requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") +} + +// TestStandby_Operations test proper forwarding for PKI requests from a standby node to the +// active node within a cluster. +func TestStandby_Operations(t *testing.T) { + conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + }, nil, teststorage.InmemBackendSetup) + cluster := vault.NewTestCluster(t, conf, opts) + cluster.Start() + defer cluster.Cleanup() + + testhelpers.WaitForActiveNodeAndStandbys(t, cluster) + standbyCores := testhelpers.DeriveStandbyCores(t, cluster) + require.Greater(t, len(standbyCores), 0, "Need at least one standby core.") + client := standbyCores[0].Client + + mountPKIEndpoint(t, client, "pki") + + _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "root-ca.com", + "ttl": "600h", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + _, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "no_store": "false", // make sure we store this cert + "ttl": "5h", + "key_type": "ec", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + resp, err := client.Logical().Write("pki/issue/example", map[string]interface{}{ + "common_name": "test.example.com", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + serialOfCert := resp.Data["serial_number"].(string) + + resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialOfCert, + }) + require.NoError(t, err, "error revoking certificate: %v", err) + require.NotNil(t, resp, "got nil response from revoke request") +} + +type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) + +func isPermDenied(err error) bool { + return err != nil && strings.Contains(err.Error(), "permission denied") +} + +func isUnsupportedPathOperation(err error) bool { + return err != nil && (strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation")) +} + +func isDeniedOp(err error) bool { + return isPermDenied(err) || isUnsupportedPathOperation(err) +} + +func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { + // Should be able to read both with and without a token. + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // Read will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ReadWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // List will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ListWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + + // These should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow read/list, but not modification still. + client.SetToken(token) + resp, err = client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) + } + + // Should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. However, on OSS, we might end up with + // a regular 404, which looks like err == resp == nil; hence we only + // fail when there's a non-nil response and/or a non-nil err. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow writing, but nothing else. + client.SetToken(token) + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + if resp != nil || err != nil { + t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) + } +} + +type pathAuthChecker int + +const ( + shouldBeAuthed pathAuthChecker = iota + shouldBeUnauthedReadList + shouldBeUnauthedWriteOnly +) + +var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ + shouldBeAuthed: pathShouldBeAuthed, + shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, + shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, +} + +func TestProperAuthing(t *testing.T) { + t.Parallel() + ctx := context.Background() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + token := client.Token() + + // Mount PKI. + err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Setup basic configuration. + _, err = client.Logical().WriteWithContext(ctx, "pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{ + "allow_localhost": true, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().WriteWithContext(ctx, "pki/issue/test", map[string]interface{}{ + "common_name": "localhost", + }) + if err != nil || resp == nil { + t.Fatal(err) + } + serial := resp.Data["serial_number"].(string) + eabKid := "13b80844-e60d-42d2-b7e9-152a8e834b90" + paths := map[string]pathAuthChecker{ + "ca_chain": shouldBeUnauthedReadList, + "cert/ca_chain": shouldBeUnauthedReadList, + "ca": shouldBeUnauthedReadList, + "ca/pem": shouldBeUnauthedReadList, + "cert/" + serial: shouldBeUnauthedReadList, + "cert/" + serial + "/raw": shouldBeUnauthedReadList, + "cert/" + serial + "/raw/pem": shouldBeUnauthedReadList, + "cert/crl": shouldBeUnauthedReadList, + "cert/crl/raw": shouldBeUnauthedReadList, + "cert/crl/raw/pem": shouldBeUnauthedReadList, + "cert/delta-crl": shouldBeUnauthedReadList, + "cert/delta-crl/raw": shouldBeUnauthedReadList, + "cert/delta-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-crl": shouldBeUnauthedReadList, + "cert/unified-crl/raw": shouldBeUnauthedReadList, + "cert/unified-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-delta-crl": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw/pem": shouldBeUnauthedReadList, + "certs/": shouldBeAuthed, + "certs/revoked/": shouldBeAuthed, + "certs/revocation-queue/": shouldBeAuthed, + "certs/unified-revoked/": shouldBeAuthed, + "config/acme": shouldBeAuthed, + "config/auto-tidy": shouldBeAuthed, + "config/ca": shouldBeAuthed, + "config/cluster": shouldBeAuthed, + "config/crl": shouldBeAuthed, + "config/issuers": shouldBeAuthed, + "config/keys": shouldBeAuthed, + "config/urls": shouldBeAuthed, + "crl": shouldBeUnauthedReadList, + "crl/pem": shouldBeUnauthedReadList, + "crl/delta": shouldBeUnauthedReadList, + "crl/delta/pem": shouldBeUnauthedReadList, + "crl/rotate": shouldBeAuthed, + "crl/rotate-delta": shouldBeAuthed, + "intermediate/cross-sign": shouldBeAuthed, + "intermediate/generate/exported": shouldBeAuthed, + "intermediate/generate/internal": shouldBeAuthed, + "intermediate/generate/existing": shouldBeAuthed, + "intermediate/generate/kms": shouldBeAuthed, + "intermediate/set-signed": shouldBeAuthed, + "issue/test": shouldBeAuthed, + "issuer/default": shouldBeAuthed, + "issuer/default/der": shouldBeUnauthedReadList, + "issuer/default/json": shouldBeUnauthedReadList, + "issuer/default/pem": shouldBeUnauthedReadList, + "issuer/default/crl": shouldBeUnauthedReadList, + "issuer/default/crl/pem": shouldBeUnauthedReadList, + "issuer/default/crl/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta": shouldBeUnauthedReadList, + "issuer/default/crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl": shouldBeUnauthedReadList, + "issuer/default/unified-crl/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/issue/test": shouldBeAuthed, + "issuer/default/resign-crls": shouldBeAuthed, + "issuer/default/revoke": shouldBeAuthed, + "issuer/default/sign-intermediate": shouldBeAuthed, + "issuer/default/sign-revocation-list": shouldBeAuthed, + "issuer/default/sign-self-issued": shouldBeAuthed, + "issuer/default/sign-verbatim": shouldBeAuthed, + "issuer/default/sign-verbatim/test": shouldBeAuthed, + "issuer/default/sign/test": shouldBeAuthed, + "issuers/": shouldBeUnauthedReadList, + "issuers/generate/intermediate/exported": shouldBeAuthed, + "issuers/generate/intermediate/internal": shouldBeAuthed, + "issuers/generate/intermediate/existing": shouldBeAuthed, + "issuers/generate/intermediate/kms": shouldBeAuthed, + "issuers/generate/root/exported": shouldBeAuthed, + "issuers/generate/root/internal": shouldBeAuthed, + "issuers/generate/root/existing": shouldBeAuthed, + "issuers/generate/root/kms": shouldBeAuthed, + "issuers/import/cert": shouldBeAuthed, + "issuers/import/bundle": shouldBeAuthed, + "key/default": shouldBeAuthed, + "keys/": shouldBeAuthed, + "keys/generate/internal": shouldBeAuthed, + "keys/generate/exported": shouldBeAuthed, + "keys/generate/kms": shouldBeAuthed, + "keys/import": shouldBeAuthed, + "ocsp": shouldBeUnauthedWriteOnly, + "ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "revoke": shouldBeAuthed, + "revoke-with-key": shouldBeAuthed, + "roles/test": shouldBeAuthed, + "roles/": shouldBeAuthed, + "root": shouldBeAuthed, + "root/generate/exported": shouldBeAuthed, + "root/generate/internal": shouldBeAuthed, + "root/generate/existing": shouldBeAuthed, + "root/generate/kms": shouldBeAuthed, + "root/replace": shouldBeAuthed, + "root/rotate/internal": shouldBeAuthed, + "root/rotate/exported": shouldBeAuthed, + "root/rotate/existing": shouldBeAuthed, + "root/rotate/kms": shouldBeAuthed, + "root/sign-intermediate": shouldBeAuthed, + "root/sign-self-issued": shouldBeAuthed, + "sign-verbatim": shouldBeAuthed, + "sign-verbatim/test": shouldBeAuthed, + "sign/test": shouldBeAuthed, + "tidy": shouldBeAuthed, + "tidy-cancel": shouldBeAuthed, + "tidy-status": shouldBeAuthed, + "unified-crl": shouldBeUnauthedReadList, + "unified-crl/pem": shouldBeUnauthedReadList, + "unified-crl/delta": shouldBeUnauthedReadList, + "unified-crl/delta/pem": shouldBeUnauthedReadList, + "unified-ocsp": shouldBeUnauthedWriteOnly, + "unified-ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "eab/": shouldBeAuthed, + "eab/" + eabKid: shouldBeAuthed, + } + + entPaths := getEntProperAuthingPaths(serial) + maps.Copy(paths, entPaths) + + // Add ACME based paths to the test suite + ossAcmePrefixes := []string{"acme/", "issuer/default/acme/", "roles/test/acme/", "issuer/default/roles/test/acme/"} + entAcmePrefixes := getEntAcmePrefixes() + for _, acmePrefix := range append(ossAcmePrefixes, entAcmePrefixes...) { + paths[acmePrefix+"directory"] = shouldBeUnauthedReadList + paths[acmePrefix+"new-nonce"] = shouldBeUnauthedReadList + paths[acmePrefix+"new-account"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"revoke-cert"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"new-order"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"orders"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"account/hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo="] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"authorization/29da8c38-7a09-465e-b9a6-3d76802b1afd"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"challenge/29da8c38-7a09-465e-b9a6-3d76802b1afd/http-01"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90/finalize"] = shouldBeUnauthedWriteOnly + paths[acmePrefix+"order/13b80844-e60d-42d2-b7e9-152a8e834b90/cert"] = shouldBeUnauthedWriteOnly + + // Make sure this new-eab path is auth'd + paths[acmePrefix+"new-eab"] = shouldBeAuthed + } + + for path, checkerType := range paths { + checker := pathAuthChckerMap[checkerType] + checker(t, client, "pki/"+path, token) + } + + client.SetToken(token) + openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") + if err != nil { + t.Fatalf("failed to get openapi data: %v", err) + } + + validatedPath := false + for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { + if !strings.HasPrefix(openapi_path, "/pki/") { + t.Logf("Skipping path: %v", openapi_path) + continue + } + + t.Logf("Validating path: %v", openapi_path) + validatedPath = true + // Substitute values in from our testing map. + raw_path := openapi_path[5:] + if strings.Contains(raw_path, "roles/") && strings.Contains(raw_path, "{name}") { + raw_path = strings.ReplaceAll(raw_path, "{name}", "test") + } + if strings.Contains(raw_path, "{role}") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test") + } + if strings.Contains(raw_path, "ocsp/") && strings.Contains(raw_path, "{req}") { + raw_path = strings.ReplaceAll(raw_path, "{req}", "dGVzdAo=") + } + if strings.Contains(raw_path, "{issuer_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{issuer_ref}", "default") + } + if strings.Contains(raw_path, "{key_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{key_ref}", "default") + } + if strings.Contains(raw_path, "{exported}") { + raw_path = strings.ReplaceAll(raw_path, "{exported}", "internal") + } + if strings.Contains(raw_path, "{serial}") { + raw_path = strings.ReplaceAll(raw_path, "{serial}", serial) + } + if strings.Contains(raw_path, "acme/account/") && strings.Contains(raw_path, "{kid}") { + raw_path = strings.ReplaceAll(raw_path, "{kid}", "hrKmDYTvicHoHGVN2-3uzZV_BPGdE0W_dNaqYTtYqeo=") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{auth_id}") { + raw_path = strings.ReplaceAll(raw_path, "{auth_id}", "29da8c38-7a09-465e-b9a6-3d76802b1afd") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{challenge_type}") { + raw_path = strings.ReplaceAll(raw_path, "{challenge_type}", "http-01") + } + if strings.Contains(raw_path, "acme/") && strings.Contains(raw_path, "{order_id}") { + raw_path = strings.ReplaceAll(raw_path, "{order_id}", "13b80844-e60d-42d2-b7e9-152a8e834b90") + } + if strings.Contains(raw_path, "eab") && strings.Contains(raw_path, "{key_id}") { + raw_path = strings.ReplaceAll(raw_path, "{key_id}", eabKid) + } + if strings.Contains(raw_path, "external-policy/") && strings.Contains(raw_path, "{policy}") { + raw_path = strings.ReplaceAll(raw_path, "{policy}", "a-policy") + } + + raw_path = entProperAuthingPathReplacer(raw_path) + + handler, present := paths[raw_path] + if !present { + t.Fatalf("OpenAPI reports PKI mount contains %v -> %v but was not tested to be authed or not authed.", + openapi_path, raw_path) + } + + openapi_data := raw_data.(map[string]interface{}) + hasList := false + rawGetData, hasGet := openapi_data["get"] + if hasGet { + getData := rawGetData.(map[string]interface{}) + getParams, paramsPresent := getData["parameters"].(map[string]interface{}) + if getParams != nil && paramsPresent { + if _, hasList = getParams["list"]; hasList { + // LIST is exclusive from GET on the same endpoint usually. + hasGet = false + } + } + } + _, hasPost := openapi_data["post"] + _, hasDelete := openapi_data["delete"] + + if handler == shouldBeUnauthedReadList { + if hasPost || hasDelete { + t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities: %v->%v", openapi_path, raw_path) + } + } else if handler == shouldBeUnauthedWriteOnly { + if hasGet || hasList { + t.Fatalf("Unauthed write-only endpoints should not have GET/LIST capabilities: %v->%v", openapi_path, raw_path) + } + } + } + + if !validatedPath { + t.Fatalf("Expected to have validated at least one path.") + } +} + +func TestPatchIssuer(t *testing.T) { + t.Parallel() + + type TestCase struct { + Field string + Before interface{} + Patched interface{} + } + testCases := []TestCase{ + { + Field: "issuer_name", + Before: "root", + Patched: "root-new", + }, + { + Field: "leaf_not_after_behavior", + Before: "err", + Patched: "permit", + }, + { + Field: "usage", + Before: "crl-signing,issuing-certificates,ocsp-signing,read-only", + Patched: "issuing-certificates,read-only", + }, + { + Field: "revocation_signature_algorithm", + Before: "ECDSAWithSHA256", + Patched: "ECDSAWithSHA384", + }, + { + Field: "issuing_certificates", + Before: []string{"http://localhost/v1/pki-1/ca"}, + Patched: []string{"http://localhost/v1/pki/ca"}, + }, + { + Field: "crl_distribution_points", + Before: []string{"http://localhost/v1/pki-1/crl"}, + Patched: []string{"http://localhost/v1/pki/crl"}, + }, + { + Field: "ocsp_servers", + Before: []string{"http://localhost/v1/pki-1/ocsp"}, + Patched: []string{"http://localhost/v1/pki/ocsp"}, + }, + { + Field: "enable_aia_url_templating", + Before: false, + Patched: true, + }, + { + Field: "manual_chain", + Before: []string(nil), + Patched: []string{"self"}, + }, + } + + for index, testCase := range testCases { + t.Logf("index: %v / tc: %v", index, testCase) + + b, s := CreateBackendWithStorage(t) + + // 1. Setup root issuer. + resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{ + "common_name": "Vault Root CA", + "key_type": "ec", + "ttl": "7200h", + "issuer_name": "root", + }) + requireSuccessNonNilResponse(t, resp, err, "failed generating root issuer") + id := string(resp.Data["issuer_id"].(issuerID)) + + // 2. Enable Cluster paths + resp, err = CBWrite(b, s, "config/urls", map[string]interface{}{ + "path": "https://localhost/v1/pki", + "aia_path": "http://localhost/v1/pki", + }) + requireSuccessNonNilResponse(t, resp, err, "failed updating AIA config") + + // 3. Add AIA information + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + "issuing_certificates": "http://localhost/v1/pki-1/ca", + "crl_distribution_points": "http://localhost/v1/pki-1/crl", + "ocsp_servers": "http://localhost/v1/pki-1/ocsp", + }) + requireSuccessNonNilResponse(t, resp, err, "failed setting up issuer") + + // 4. Read the issuer before. + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer before") + require.Equal(t, testCase.Before, resp.Data[testCase.Field], "bad expectations") + + // 5. Perform modification. + resp, err = CBPatch(b, s, "issuer/default", map[string]interface{}{ + testCase.Field: testCase.Patched, + }) + requireSuccessNonNilResponse(t, resp, err, "failed patching root issuer") + + if testCase.Field != "manual_chain" { + require.Equal(t, testCase.Patched, resp.Data[testCase.Field], "failed persisting value") + } else { + // self->id + require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") + } + + // 6. Ensure it stuck + resp, err = CBRead(b, s, "issuer/default") + requireSuccessNonNilResponse(t, resp, err, "failed reading root issuer after") + + if testCase.Field != "manual_chain" { + require.Equal(t, testCase.Patched, resp.Data[testCase.Field]) + } else { + // self->id + require.Equal(t, []string{id}, resp.Data[testCase.Field], "failed persisting value") + } + } +} + +func TestGenerateRootCAWithAIA(t *testing.T) { + // Generate a root CA at /pki-root + b_root, s_root := CreateBackendWithStorage(t) + + // Setup templated AIA information + _, err := CBWrite(b_root, s_root, "config/cluster", map[string]interface{}{ + "path": "https://localhost:8200", + "aia_path": "https://localhost:8200", + }) + require.NoError(t, err, "failed to write AIA settings") + + _, err = CBWrite(b_root, s_root, "config/urls", map[string]interface{}{ + "crl_distribution_points": "{{cluster_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_path}}/ocsp", + "enable_templating": true, + }) + require.NoError(t, err, "failed to write AIA settings") + + // Write a root issuer, this should succeed. + resp, err := CBWrite(b_root, s_root, "root/generate/exported", map[string]interface{}{ + "common_name": "root myvault.com", + "key_type": "ec", + }) + requireSuccessNonNilResponse(t, resp, err, "expected root generation to succeed") +} + +var ( + initTest sync.Once + rsaCAKey string + rsaCACert string + ecCAKey string + ecCACert string + edCAKey string + edCACert string +) diff --git a/command/base.go b/command/base.go index 10be15b636cb..bb4c455b4805 100644 --- a/command/base.go +++ b/command/base.go @@ -1,722 +1,1268 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package command +package vault import ( - "bytes" - "flag" + "context" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/subtle" + "encoding/binary" + "errors" "fmt" "io" - "io/ioutil" - "os" - "regexp" + "math" + "strconv" "strings" "sync" "time" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/command/token" - "github.com/hashicorp/vault/helper/namespace" - "github.com/mattn/go-isatty" - "github.com/mitchellh/cli" - "github.com/pkg/errors" - "github.com/posener/complete" + "github.com/armon/go-metrics" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "go.uber.org/atomic" ) const ( - // maxLineLength is the maximum width of any line. - maxLineLength int = 78 + // initialKeyTerm is the hard coded initial key term. This is + // used only for values that are not encrypted with the keyring. + initialKeyTerm = 1 - // notSetValue is a flag value for a not-set value - notSetValue = "(not set)" -) + // termSize the number of bytes used for the key term. + termSize = 4 -// reRemoveWhitespace is a regular expression for stripping whitespace from -// a string. -var reRemoveWhitespace = regexp.MustCompile(`[\s]+`) + autoRotateCheckInterval = 5 * time.Minute + legacyRotateReason = "legacy rotation" + // The keyring is persisted before the root key. + keyringTimeout = 1 * time.Second +) -type BaseCommand struct { - UI cli.Ui +// Versions of the AESGCM storage methodology +const ( + AESGCMVersion1 = 0x1 + AESGCMVersion2 = 0x2 +) - flags *FlagSets - flagsOnce sync.Once +// barrierInit is the JSON encoded value stored +type barrierInit struct { + Version int // Version is the current format version + Key []byte // Key is the primary encryption key +} - flagAddress string - flagAgentProxyAddress string - flagCACert string - flagCAPath string - flagClientCert string - flagClientKey string - flagNamespace string - flagNS string - flagPolicyOverride bool - flagTLSServerName string - flagTLSSkipVerify bool - flagDisableRedirects bool - flagWrapTTL time.Duration - flagUnlockKey string +// Validate AESGCMBarrier satisfies SecurityBarrier interface +var ( + _ SecurityBarrier = &AESGCMBarrier{} + barrierEncryptsMetric = []string{"barrier", "estimated_encryptions"} + barrierRotationsMetric = []string{"barrier", "auto_rotation"} +) - flagFormat string - flagField string - flagDetailed bool - flagOutputCurlString bool - flagOutputPolicy bool - flagNonInteractive bool - addrWarning string +// AESGCMBarrier is a SecurityBarrier implementation that uses the AES +// cipher core and the Galois Counter Mode block mode. It defaults to +// the golang NONCE default value of 12 and a key size of 256 +// bit. AES-GCM is high performance, and provides both confidentiality +// and integrity. +type AESGCMBarrier struct { + backend physical.Backend + + l sync.RWMutex + sealed bool + + // keyring is used to maintain all of the encryption keys, including + // the active key used for encryption, but also prior keys to allow + // decryption of keys encrypted under previous terms. + keyring *Keyring + + // cache is used to reduce the number of AEAD constructions we do + cache map[uint32]cipher.AEAD + cacheLock sync.RWMutex + + // currentAESGCMVersionByte is prefixed to a message to allow for + // future versioning of barrier implementations. It's var instead + // of const to allow for testing + currentAESGCMVersionByte byte + + initialized atomic.Bool + + UnaccountedEncryptions *atomic.Int64 + // Used only for testing + RemoteEncryptions *atomic.Int64 + totalLocalEncryptions *atomic.Int64 +} - flagMFA []string +func (b *AESGCMBarrier) RotationConfig() (kc KeyRotationConfig, err error) { + if b.keyring == nil { + return kc, errors.New("keyring not yet present") + } + return b.keyring.rotationConfig.Clone(), nil +} - flagHeader map[string]string +func (b *AESGCMBarrier) SetRotationConfig(ctx context.Context, rotConfig KeyRotationConfig) error { + b.l.Lock() + defer b.l.Unlock() + rotConfig.Sanitize() + if !rotConfig.Equals(b.keyring.rotationConfig) { + b.keyring.rotationConfig = rotConfig - tokenHelper token.TokenHelper + return b.persistKeyring(ctx, b.keyring) + } + return nil +} - client *api.Client +// NewAESGCMBarrier is used to construct a new barrier that uses +// the provided physical backend for storage. +func NewAESGCMBarrier(physical physical.Backend) (*AESGCMBarrier, error) { + b := &AESGCMBarrier{ + backend: physical, + sealed: true, + cache: make(map[uint32]cipher.AEAD), + currentAESGCMVersionByte: byte(AESGCMVersion2), + UnaccountedEncryptions: atomic.NewInt64(0), + RemoteEncryptions: atomic.NewInt64(0), + totalLocalEncryptions: atomic.NewInt64(0), + } + return b, nil } -// Client returns the HTTP API client. The client is cached on the command to -// save performance on future calls. -func (c *BaseCommand) Client() (*api.Client, error) { - // Read the test client if present - if c.client != nil { - return c.client, nil +// Initialized checks if the barrier has been initialized +// and has a root key set. +func (b *AESGCMBarrier) Initialized(ctx context.Context) (bool, error) { + if b.initialized.Load() { + return true, nil } - if c.addrWarning != "" && c.UI != nil { - if os.Getenv("VAULT_ADDR") == "" { - if !c.flagNonInteractive && isatty.IsTerminal(os.Stdin.Fd()) { - c.UI.Warn(wrapAtLength(c.addrWarning)) - } - } + // Read the keyring file + keys, err := b.backend.List(ctx, keyringPrefix) + if err != nil { + return false, fmt.Errorf("failed to check for initialization: %w", err) + } + if strutil.StrListContains(keys, "keyring") { + b.initialized.Store(true) + return true, nil } - config := api.DefaultConfig() + // Fallback, check for the old sentinel file + out, err := b.backend.Get(ctx, barrierInitPath) + if err != nil { + return false, fmt.Errorf("failed to check for initialization: %w", err) + } + b.initialized.Store(out != nil) + return out != nil, nil +} - if err := config.ReadEnvironment(); err != nil { - return nil, errors.Wrap(err, "failed to read environment") +// Initialize works only if the barrier has not been initialized +// and makes use of the given root key. +func (b *AESGCMBarrier) Initialize(ctx context.Context, key []byte, sealKey []byte, reader io.Reader) error { + // Verify the key size + min, max := b.KeyLength() + if len(key) < min || len(key) > max { + return fmt.Errorf("key size must be %d or %d", min, max) } - if c.flagAddress != "" { - config.Address = c.flagAddress + // Check if already initialized + if alreadyInit, err := b.Initialized(ctx); err != nil { + return err + } else if alreadyInit { + return ErrBarrierAlreadyInit } - if c.flagAgentProxyAddress != "" { - config.Address = c.flagAgentProxyAddress + + // Generate encryption key + encryptionKey, err := b.GenerateKey(reader) + if err != nil { + return fmt.Errorf("failed to generate encryption key: %w", err) } - if c.flagOutputCurlString { - config.OutputCurlString = c.flagOutputCurlString + // Create a new keyring, install the keys + keyring := NewKeyring() + keyring = keyring.SetRootKey(key) + keyring, err = keyring.AddKey(&Key{ + Term: 1, + Version: 1, + Value: encryptionKey, + }) + if err != nil { + return fmt.Errorf("failed to create keyring: %w", err) } - if c.flagOutputPolicy { - config.OutputPolicy = c.flagOutputPolicy + + err = b.persistKeyring(ctx, keyring) + if err != nil { + return err } - // If we need custom TLS configuration, then set it - if c.flagCACert != "" || c.flagCAPath != "" || c.flagClientCert != "" || - c.flagClientKey != "" || c.flagTLSServerName != "" || c.flagTLSSkipVerify { - t := &api.TLSConfig{ - CACert: c.flagCACert, - CAPath: c.flagCAPath, - ClientCert: c.flagClientCert, - ClientKey: c.flagClientKey, - TLSServerName: c.flagTLSServerName, - Insecure: c.flagTLSSkipVerify, + if len(sealKey) > 0 { + primary, err := b.aeadFromKey(encryptionKey) + if err != nil { + return err } - // Setup TLS config - if err := config.ConfigureTLS(t); err != nil { - return nil, errors.Wrap(err, "failed to setup TLS config") + err = b.putInternal(ctx, 1, primary, &logical.StorageEntry{ + Key: shamirKekPath, + Value: sealKey, + }) + if err != nil { + return fmt.Errorf("failed to store new seal key: %w", err) } } - // Build the client - client, err := api.NewClient(config) + return nil +} + +// persistKeyring is used to write out the keyring using the +// root key to encrypt it. +func (b *AESGCMBarrier) persistKeyring(ctx context.Context, keyring *Keyring) error { + return b.persistKeyringInternal(ctx, keyring, false) +} + +// persistKeyringBestEffort is like persistKeyring but 'best effort', ie times out early +// for non critical keyring writes (encryption/rotation tracking) +func (b *AESGCMBarrier) persistKeyringBestEffort(ctx context.Context, keyring *Keyring) error { + return b.persistKeyringInternal(ctx, keyring, true) +} + +// persistKeyring is used to write out the keyring using the +// root key to encrypt it. +func (b *AESGCMBarrier) persistKeyringInternal(ctx context.Context, keyring *Keyring, bestEffort bool) error { + // Create the keyring entry + keyringBuf, err := keyring.Serialize() + defer memzero(keyringBuf) if err != nil { - return nil, errors.Wrap(err, "failed to create client") + return fmt.Errorf("failed to serialize keyring: %w", err) } - // Turn off retries on the CLI - if os.Getenv(api.EnvVaultMaxRetries) == "" { - client.SetMaxRetries(0) + // Create the AES-GCM + gcm, err := b.aeadFromKey(keyring.RootKey()) + if err != nil { + return fmt.Errorf("failed to retrieve AES-GCM AEAD from root key: %w", err) } - // Set the wrapping function - client.SetWrappingLookupFunc(c.DefaultWrappingLookupFunc) + // Encrypt the barrier init value + value, err := b.encrypt(keyringPath, initialKeyTerm, gcm, keyringBuf) + if err != nil { + return fmt.Errorf("failed to encrypt barrier initial value: %w", err) + } - // Get the token if it came in from the environment - token := client.Token() + // Create the keyring physical entry + pe := &physical.Entry{ + Key: keyringPath, + Value: value, + } - // If we don't have a token, check the token helper - if token == "" { - helper, err := c.TokenHelper() - if err != nil { - return nil, errors.Wrap(err, "failed to get token helper") - } - token, err = helper.Get() - if err != nil { - return nil, errors.Wrap(err, "failed to get token from token helper") - } + ctxKeyring := ctx + + if bestEffort { + // We reduce the timeout on the initial 'put' but if this succeeds we will + // allow longer later on when we try to persist the root key . + var cancelKeyring func() + ctxKeyring, cancelKeyring = context.WithTimeout(ctx, keyringTimeout) + defer cancelKeyring() } - // Set the token - if token != "" { - client.SetToken(token) + if err := b.backend.Put(ctxKeyring, pe); err != nil { + return fmt.Errorf("failed to persist keyring: %w", err) } - client.SetMFACreds(c.flagMFA) + // Serialize the root key value + key := &Key{ + Term: 1, + Version: 1, + Value: keyring.RootKey(), + } + keyBuf, err := key.Serialize() + defer memzero(keyBuf) + if err != nil { + return fmt.Errorf("failed to serialize root key: %w", err) + } - // flagNS takes precedence over flagNamespace. After resolution, point both - // flags to the same value to be able to use them interchangeably anywhere. - if c.flagNS != notSetValue { - c.flagNamespace = c.flagNS + // Encrypt the root key + activeKey := keyring.ActiveKey() + aead, err := b.aeadFromKey(activeKey.Value) + if err != nil { + return fmt.Errorf("failed to retrieve AES-GCM AEAD from active key: %w", err) } - if c.flagNamespace != notSetValue { - client.SetNamespace(namespace.Canonicalize(c.flagNamespace)) + value, err = b.encryptTracked(rootKeyPath, activeKey.Term, aead, keyBuf) + if err != nil { + return fmt.Errorf("failed to encrypt and track active key value: %w", err) } - if c.flagPolicyOverride { - client.SetPolicyOverride(c.flagPolicyOverride) + + // Update the rootKeyPath for standby instances + pe = &physical.Entry{ + Key: rootKeyPath, + Value: value, } - if c.flagHeader != nil { + // Use the longer timeout from the original context, for the follow-up write + // to persist the root key, as the initial storage of the keyring was successful. + if err := b.backend.Put(ctx, pe); err != nil { + return fmt.Errorf("failed to persist root key: %w", err) + } + return nil +} - var forbiddenHeaders []string - for key, val := range c.flagHeader { +// GenerateKey is used to generate a new key +func (b *AESGCMBarrier) GenerateKey(reader io.Reader) ([]byte, error) { + // Generate a 256bit key + buf := make([]byte, 2*aes.BlockSize) + _, err := reader.Read(buf) - if strings.HasPrefix(key, "X-Vault-") { - forbiddenHeaders = append(forbiddenHeaders, key) - continue - } - client.AddHeader(key, val) - } + return buf, err +} + +// KeyLength is used to sanity check a key +func (b *AESGCMBarrier) KeyLength() (int, int) { + return aes.BlockSize, 2 * aes.BlockSize +} + +// Sealed checks if the barrier has been unlocked yet. The Barrier +// is not expected to be able to perform any CRUD until it is unsealed. +func (b *AESGCMBarrier) Sealed() (bool, error) { + b.l.RLock() + sealed := b.sealed + b.l.RUnlock() + return sealed, nil +} + +// VerifyRoot is used to check if the given key matches the root key +func (b *AESGCMBarrier) VerifyRoot(key []byte) error { + b.l.RLock() + defer b.l.RUnlock() + if b.sealed { + return ErrBarrierSealed + } + if subtle.ConstantTimeCompare(key, b.keyring.RootKey()) != 1 { + return ErrBarrierInvalidKey + } + return nil +} + +// ReloadKeyring is used to re-read the underlying keyring. +// This is used for HA deployments to ensure the latest keyring +// is present in the leader. +func (b *AESGCMBarrier) ReloadKeyring(ctx context.Context) error { + b.l.Lock() + defer b.l.Unlock() - if len(forbiddenHeaders) > 0 { - return nil, fmt.Errorf("failed to setup Headers[%s]: Header starting by 'X-Vault-' are for internal usage only", strings.Join(forbiddenHeaders, ", ")) + // Create the AES-GCM + gcm, err := b.aeadFromKey(b.keyring.RootKey()) + if err != nil { + return err + } + + // Read in the keyring + out, err := b.backend.Get(ctx, keyringPath) + if err != nil { + return fmt.Errorf("failed to check for keyring: %w", err) + } + + // Ensure that the keyring exists. This should never happen, + // and indicates something really bad has happened. + if out == nil { + return errors.New("keyring unexpectedly missing") + } + + // Verify the term is always just one + term := binary.BigEndian.Uint32(out.Value[:4]) + if term != initialKeyTerm { + return errors.New("term mis-match") + } + + // Decrypt the barrier init key + plain, err := b.decrypt(keyringPath, gcm, out.Value) + defer memzero(plain) + if err != nil { + if strings.Contains(err.Error(), "message authentication failed") { + return ErrBarrierInvalidKey } + return err } - c.client = client + // Reset enc. counters, this may be a leadership change + b.totalLocalEncryptions.Store(0) + b.totalLocalEncryptions.Store(0) + b.UnaccountedEncryptions.Store(0) + b.RemoteEncryptions.Store(0) - return client, nil + return b.recoverKeyring(plain) } -// SetAddress sets the token helper on the command; useful for the demo server and other outside cases. -func (c *BaseCommand) SetAddress(addr string) { - c.flagAddress = addr -} +func (b *AESGCMBarrier) recoverKeyring(plaintext []byte) error { + keyring, err := DeserializeKeyring(plaintext) + if err != nil { + return fmt.Errorf("keyring deserialization failed: %w", err) + } -// SetTokenHelper sets the token helper on the command. -func (c *BaseCommand) SetTokenHelper(th token.TokenHelper) { - c.tokenHelper = th + // Setup the keyring and finish + b.cache = make(map[uint32]cipher.AEAD) + b.keyring = keyring + return nil } -// TokenHelper returns the token helper attached to the command. -func (c *BaseCommand) TokenHelper() (token.TokenHelper, error) { - if c.tokenHelper != nil { - return c.tokenHelper, nil +// ReloadRootKey is used to re-read the underlying root key. +// This is used for HA deployments to ensure the latest root key +// is available for keyring reloading. +func (b *AESGCMBarrier) ReloadRootKey(ctx context.Context) error { + // Read the rootKeyPath upgrade + out, err := b.Get(ctx, rootKeyPath) + if err != nil { + return fmt.Errorf("failed to read root key path: %w", err) + } + + // The rootKeyPath could be missing (backwards incompatible), + // we can ignore this and attempt to make progress with the current + // root key. + if out == nil { + return nil } - helper, err := DefaultTokenHelper() + // Grab write lock and refetch + b.l.Lock() + defer b.l.Unlock() + + out, err = b.lockSwitchedGet(ctx, rootKeyPath, false) if err != nil { - return nil, err + return fmt.Errorf("failed to read root key path: %w", err) } - return helper, nil -} -// DefaultWrappingLookupFunc is the default wrapping function based on the -// CLI flag. -func (c *BaseCommand) DefaultWrappingLookupFunc(operation, path string) string { - if c.flagWrapTTL != 0 { - return c.flagWrapTTL.String() + if out == nil { + return nil } - return api.DefaultWrappingLookupFunc(operation, path) -} + // Deserialize the root key + key, err := DeserializeKey(out.Value) + memzero(out.Value) + if err != nil { + return fmt.Errorf("failed to deserialize key: %w", err) + } -// getMFAValidationRequired checks to see if the secret exists and has an MFA -// requirement. If MFA is required and the number of constraints is greater than -// 1, we can assert that interactive validation is not required. -func (c *BaseCommand) getMFAValidationRequired(secret *api.Secret) bool { - if secret != nil && secret.Auth != nil && secret.Auth.MFARequirement != nil { - if c.flagMFA == nil && len(secret.Auth.MFARequirement.MFAConstraints) == 1 { - return true - } else if len(secret.Auth.MFARequirement.MFAConstraints) > 1 { - return true - } + // Check if the root key is the same + if subtle.ConstantTimeCompare(b.keyring.RootKey(), key.Value) == 1 { + return nil } - return false + // Update the root key + oldKeyring := b.keyring + b.keyring = b.keyring.SetRootKey(key.Value) + oldKeyring.Zeroize(false) + return nil } -// getInteractiveMFAMethodInfo returns MFA method information only if operating -// in interactive mode and one MFA method is configured. -func (c *BaseCommand) getInteractiveMFAMethodInfo(secret *api.Secret) *MFAMethodInfo { - if secret == nil || secret.Auth == nil || secret.Auth.MFARequirement == nil { +// Unseal is used to provide the root key which permits the barrier +// to be unsealed. If the key is not correct, the barrier remains sealed. +func (b *AESGCMBarrier) Unseal(ctx context.Context, key []byte) error { + b.l.Lock() + defer b.l.Unlock() + + // Do nothing if already unsealed + if !b.sealed { return nil } - mfaConstraints := secret.Auth.MFARequirement.MFAConstraints - if c.flagNonInteractive || len(mfaConstraints) != 1 || !isatty.IsTerminal(os.Stdin.Fd()) { - return nil + // Create the AES-GCM + gcm, err := b.aeadFromKey(key) + if err != nil { + return err } - for _, mfaConstraint := range mfaConstraints { - if len(mfaConstraint.Any) != 1 { - return nil + // Read in the keyring + out, err := b.backend.Get(ctx, keyringPath) + if err != nil { + return fmt.Errorf("failed to check for keyring: %w", err) + } + if out != nil { + // Verify the term is always just one + term := binary.BigEndian.Uint32(out.Value[:4]) + if term != initialKeyTerm { + return errors.New("term mis-match") } - return &MFAMethodInfo{ - methodType: mfaConstraint.Any[0].Type, - methodID: mfaConstraint.Any[0].ID, - usePasscode: mfaConstraint.Any[0].UsesPasscode, + // Decrypt the barrier init key + plain, err := b.decrypt(keyringPath, gcm, out.Value) + defer memzero(plain) + if err != nil { + if strings.Contains(err.Error(), "message authentication failed") { + return ErrBarrierInvalidKey + } + return err + } + + // Recover the keyring + err = b.recoverKeyring(plain) + if err != nil { + return fmt.Errorf("keyring deserialization failed: %w", err) } + + b.sealed = false + + return nil + } + + // Read the barrier initialization key + out, err = b.backend.Get(ctx, barrierInitPath) + if err != nil { + return fmt.Errorf("failed to check for initialization: %w", err) + } + if out == nil { + return ErrBarrierNotInit } + // Verify the term is always just one + term := binary.BigEndian.Uint32(out.Value[:4]) + if term != initialKeyTerm { + return errors.New("term mis-match") + } + + // Decrypt the barrier init key + plain, err := b.decrypt(barrierInitPath, gcm, out.Value) + if err != nil { + if strings.Contains(err.Error(), "message authentication failed") { + return ErrBarrierInvalidKey + } + return err + } + defer memzero(plain) + + // Unmarshal the barrier init + var init barrierInit + if err := jsonutil.DecodeJSON(plain, &init); err != nil { + return fmt.Errorf("failed to unmarshal barrier init file") + } + + // Setup a new keyring, this is for backwards compatibility + keyringNew := NewKeyring() + keyring := keyringNew.SetRootKey(key) + + // AddKey reuses the root, so we are only zeroizing after this call + defer keyringNew.Zeroize(false) + + keyring, err = keyring.AddKey(&Key{ + Term: 1, + Version: 1, + Value: init.Key, + }) + if err != nil { + return fmt.Errorf("failed to create keyring: %w", err) + } + if err := b.persistKeyring(ctx, keyring); err != nil { + return err + } + + // Delete the old barrier entry + if err := b.backend.Delete(ctx, barrierInitPath); err != nil { + return fmt.Errorf("failed to delete barrier init file: %w", err) + } + + // Set the vault as unsealed + b.keyring = keyring + b.sealed = false + return nil } -func (c *BaseCommand) validateMFA(reqID string, methodInfo MFAMethodInfo) (*api.Secret, error) { - var passcode string - var err error - if methodInfo.usePasscode { - passcode, err = c.UI.AskSecret(fmt.Sprintf("Enter the passphrase for methodID %q of type %q:", methodInfo.methodID, methodInfo.methodType)) - if err != nil { - return nil, fmt.Errorf("failed to read passphrase: %w. please validate the login by sending a request to sys/mfa/validate", err) - } - } else { - c.UI.Warn("Asking Vault to perform MFA validation with upstream service. " + - "You should receive a push notification in your authenticator app shortly") +// Seal is used to re-seal the barrier. This requires the barrier to +// be unsealed again to perform any further operations. +func (b *AESGCMBarrier) Seal() error { + b.l.Lock() + defer b.l.Unlock() + + // Remove the primary key, and seal the vault + b.cache = make(map[uint32]cipher.AEAD) + b.keyring.Zeroize(true) + b.keyring = nil + b.sealed = true + return nil +} + +// Rotate is used to create a new encryption key. All future writes +// should use the new key, while old values should still be decryptable. +func (b *AESGCMBarrier) Rotate(ctx context.Context, randomSource io.Reader) (uint32, error) { + b.l.Lock() + defer b.l.Unlock() + if b.sealed { + return 0, ErrBarrierSealed } - // passcode could be an empty string - mfaPayload := map[string]interface{}{ - methodInfo.methodID: []string{passcode}, + // Generate a new key + encrypt, err := b.GenerateKey(randomSource) + if err != nil { + return 0, fmt.Errorf("failed to generate encryption key: %w", err) } - client, err := c.Client() + // Get the next term + term := b.keyring.ActiveTerm() + newTerm := term + 1 + + // Add a new encryption key + newKeyring, err := b.keyring.AddKey(&Key{ + Term: newTerm, + Version: 1, + Value: encrypt, + }) if err != nil { - return nil, err + return 0, fmt.Errorf("failed to add new encryption key: %w", err) + } + + // Persist the new keyring + if err := b.persistKeyring(ctx, newKeyring); err != nil { + return 0, err } - return client.Sys().MFAValidate(reqID, mfaPayload) + // Clear encryption tracking + b.RemoteEncryptions.Store(0) + b.totalLocalEncryptions.Store(0) + b.UnaccountedEncryptions.Store(0) + + // Swap the keyrings + b.keyring = newKeyring + + return newTerm, nil } -type FlagSetBit uint +// CreateUpgrade creates an upgrade path key to the given term from the previous term +func (b *AESGCMBarrier) CreateUpgrade(ctx context.Context, term uint32) error { + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return ErrBarrierSealed + } -const ( - FlagSetNone FlagSetBit = 1 << iota - FlagSetHTTP - FlagSetOutputField - FlagSetOutputFormat - FlagSetOutputDetailed -) + // Get the key for this term + termKey := b.keyring.TermKey(term) + buf, err := termKey.Serialize() + defer memzero(buf) + if err != nil { + b.l.RUnlock() + return err + } -// flagSet creates the flags for this command. The result is cached on the -// command to save performance on future calls. -func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { - c.flagsOnce.Do(func() { - set := NewFlagSets(c.UI) - - // These flag sets will apply to all leaf subcommands. - // TODO: Optional, but FlagSetHTTP can be safely removed from the individual - // Flags() subcommands. - bit = bit | FlagSetHTTP - - if bit&FlagSetHTTP != 0 { - f := set.NewFlagSet("HTTP Options") - - addrStringVar := &StringVar{ - Name: flagNameAddress, - Target: &c.flagAddress, - EnvVar: api.EnvVaultAddress, - Completion: complete.PredictAnything, - Usage: "Address of the Vault server.", - } + // Get the AEAD for the previous term + prevTerm := term - 1 + primary, err := b.aeadForTerm(prevTerm) + if err != nil { + b.l.RUnlock() + return err + } - if c.flagAddress != "" { - addrStringVar.Default = c.flagAddress - } else { - addrStringVar.Default = "https://127.0.0.1:8200" - c.addrWarning = fmt.Sprintf("WARNING! VAULT_ADDR and -address unset. Defaulting to %s.", addrStringVar.Default) - } - f.StringVar(addrStringVar) - - agentAddrStringVar := &StringVar{ - Name: "agent-address", - Target: &c.flagAgentProxyAddress, - EnvVar: api.EnvVaultAgentAddr, - Completion: complete.PredictAnything, - Usage: "Address of the Agent.", - } - f.StringVar(agentAddrStringVar) - - f.StringVar(&StringVar{ - Name: flagNameCACert, - Target: &c.flagCACert, - Default: "", - EnvVar: api.EnvVaultCACert, - Completion: complete.PredictFiles("*"), - Usage: "Path on the local disk to a single PEM-encoded CA " + - "certificate to verify the Vault server's SSL certificate. This " + - "takes precedence over -ca-path.", - }) - - f.StringVar(&StringVar{ - Name: flagNameCAPath, - Target: &c.flagCAPath, - Default: "", - EnvVar: api.EnvVaultCAPath, - Completion: complete.PredictDirs("*"), - Usage: "Path on the local disk to a directory of PEM-encoded CA " + - "certificates to verify the Vault server's SSL certificate.", - }) - - f.StringVar(&StringVar{ - Name: flagNameClientCert, - Target: &c.flagClientCert, - Default: "", - EnvVar: api.EnvVaultClientCert, - Completion: complete.PredictFiles("*"), - Usage: "Path on the local disk to a single PEM-encoded CA " + - "certificate to use for TLS authentication to the Vault server. If " + - "this flag is specified, -client-key is also required.", - }) - - f.StringVar(&StringVar{ - Name: flagNameClientKey, - Target: &c.flagClientKey, - Default: "", - EnvVar: api.EnvVaultClientKey, - Completion: complete.PredictFiles("*"), - Usage: "Path on the local disk to a single PEM-encoded private key " + - "matching the client certificate from -client-cert.", - }) - - f.StringVar(&StringVar{ - Name: "namespace", - Target: &c.flagNamespace, - Default: notSetValue, // this can never be a real value - EnvVar: api.EnvVaultNamespace, - Completion: complete.PredictAnything, - Usage: "The namespace to use for the command. Setting this is not " + - "necessary but allows using relative paths. -ns can be used as " + - "shortcut.", - }) - - f.StringVar(&StringVar{ - Name: "ns", - Target: &c.flagNS, - Default: notSetValue, // this can never be a real value - Completion: complete.PredictAnything, - Hidden: true, - Usage: "Alias for -namespace. This takes precedence over -namespace.", - }) - - f.StringVar(&StringVar{ - Name: flagTLSServerName, - Target: &c.flagTLSServerName, - Default: "", - EnvVar: api.EnvVaultTLSServerName, - Completion: complete.PredictAnything, - Usage: "Name to use as the SNI host when connecting to the Vault " + - "server via TLS.", - }) - - f.BoolVar(&BoolVar{ - Name: flagNameTLSSkipVerify, - Target: &c.flagTLSSkipVerify, - Default: false, - EnvVar: api.EnvVaultSkipVerify, - Usage: "Disable verification of TLS certificates. Using this option " + - "is highly discouraged as it decreases the security of data " + - "transmissions to and from the Vault server.", - }) - - f.BoolVar(&BoolVar{ - Name: flagNameDisableRedirects, - Target: &c.flagDisableRedirects, - Default: false, - EnvVar: api.EnvVaultDisableRedirects, - Usage: "Disable the default client behavior, which honors a single " + - "redirect response from a request", - }) - - f.BoolVar(&BoolVar{ - Name: "policy-override", - Target: &c.flagPolicyOverride, - Default: false, - Usage: "Override a Sentinel policy that has a soft-mandatory " + - "enforcement_level specified", - }) - - f.DurationVar(&DurationVar{ - Name: "wrap-ttl", - Target: &c.flagWrapTTL, - Default: 0, - EnvVar: api.EnvVaultWrapTTL, - Completion: complete.PredictAnything, - Usage: "Wraps the response in a cubbyhole token with the requested " + - "TTL. The response is available via the \"vault unwrap\" command. " + - "The TTL is specified as a numeric string with suffix like \"30s\" " + - "or \"5m\".", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: "mfa", - Target: &c.flagMFA, - Default: nil, - EnvVar: api.EnvVaultMFA, - Completion: complete.PredictAnything, - Usage: "Supply MFA credentials as part of X-Vault-MFA header.", - }) - - f.BoolVar(&BoolVar{ - Name: "output-curl-string", - Target: &c.flagOutputCurlString, - Default: false, - Usage: "Instead of executing the request, print an equivalent cURL " + - "command string and exit.", - }) - - f.BoolVar(&BoolVar{ - Name: "output-policy", - Target: &c.flagOutputPolicy, - Default: false, - Usage: "Instead of executing the request, print an example HCL " + - "policy that would be required to run this command, and exit.", - }) - - f.StringVar(&StringVar{ - Name: "unlock-key", - Target: &c.flagUnlockKey, - Default: notSetValue, - Completion: complete.PredictNothing, - Usage: "Key to unlock a namespace API lock.", - }) - - f.StringMapVar(&StringMapVar{ - Name: "header", - Target: &c.flagHeader, - Completion: complete.PredictAnything, - Usage: "Key-value pair provided as key=value to provide http header added to any request done by the CLI." + - "Trying to add headers starting with 'X-Vault-' is forbidden and will make the command fail " + - "This can be specified multiple times.", - }) - - f.BoolVar(&BoolVar{ - Name: "non-interactive", - Target: &c.flagNonInteractive, - Default: false, - Usage: "When set true, prevents asking the user for input via the terminal.", - }) + key := fmt.Sprintf("%s%d", keyringUpgradePrefix, prevTerm) + value, err := b.encryptTracked(key, prevTerm, primary, buf) + b.l.RUnlock() + if err != nil { + return err + } + // Create upgrade key + pe := &physical.Entry{ + Key: key, + Value: value, + } + return b.backend.Put(ctx, pe) +} - } +// DestroyUpgrade destroys the upgrade path key to the given term +func (b *AESGCMBarrier) DestroyUpgrade(ctx context.Context, term uint32) error { + path := fmt.Sprintf("%s%d", keyringUpgradePrefix, term-1) + return b.Delete(ctx, path) +} - if bit&(FlagSetOutputField|FlagSetOutputFormat|FlagSetOutputDetailed) != 0 { - outputSet := set.NewFlagSet("Output Options") - - if bit&FlagSetOutputField != 0 { - outputSet.StringVar(&StringVar{ - Name: "field", - Target: &c.flagField, - Default: "", - Completion: complete.PredictAnything, - Usage: "Print only the field with the given name. Specifying " + - "this option will take precedence over other formatting " + - "directives. The result will not have a trailing newline " + - "making it ideal for piping to other processes.", - }) - } +// CheckUpgrade looks for an upgrade to the current term and installs it +func (b *AESGCMBarrier) CheckUpgrade(ctx context.Context) (bool, uint32, error) { + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return false, 0, ErrBarrierSealed + } - if bit&FlagSetOutputFormat != 0 { - outputSet.StringVar(&StringVar{ - Name: "format", - Target: &c.flagFormat, - Default: "table", - EnvVar: EnvVaultFormat, - Completion: complete.PredictSet("table", "json", "yaml", "pretty", "raw"), - Usage: `Print the output in the given format. Valid formats - are "table", "json", "yaml", or "pretty". "raw" is allowed - for 'vault read' operations only.`, - }) - } + // Get the current term + activeTerm := b.keyring.ActiveTerm() - if bit&FlagSetOutputDetailed != 0 { - outputSet.BoolVar(&BoolVar{ - Name: "detailed", - Target: &c.flagDetailed, - Default: false, - EnvVar: EnvVaultDetailed, - Usage: "Enables additional metadata during some operations", - }) - } - } + // Check for an upgrade key + upgrade := fmt.Sprintf("%s%d", keyringUpgradePrefix, activeTerm) + entry, err := b.lockSwitchedGet(ctx, upgrade, false) + if err != nil { + b.l.RUnlock() + return false, 0, err + } - c.flags = set - }) + // Nothing to do if no upgrade + if entry == nil { + b.l.RUnlock() + return false, 0, nil + } + + // Upgrade from read lock to write lock + b.l.RUnlock() + b.l.Lock() + defer b.l.Unlock() + + // Validate base cases and refetch values again + + if b.sealed { + return false, 0, ErrBarrierSealed + } + + activeTerm = b.keyring.ActiveTerm() + + upgrade = fmt.Sprintf("%s%d", keyringUpgradePrefix, activeTerm) + entry, err = b.lockSwitchedGet(ctx, upgrade, false) + if err != nil { + return false, 0, err + } + + if entry == nil { + return false, 0, nil + } + + // Deserialize the key + key, err := DeserializeKey(entry.Value) + memzero(entry.Value) + if err != nil { + return false, 0, err + } - return c.flags + // Update the keyring + newKeyring, err := b.keyring.AddKey(key) + if err != nil { + return false, 0, fmt.Errorf("failed to add new encryption key: %w", err) + } + b.keyring = newKeyring + + // Done! + return true, key.Term, nil } -// FlagSets is a group of flag sets. -type FlagSets struct { - flagSets []*FlagSet - mainSet *flag.FlagSet - hiddens map[string]struct{} - completions complete.Flags - ui cli.Ui +// ActiveKeyInfo is used to inform details about the active key +func (b *AESGCMBarrier) ActiveKeyInfo() (*KeyInfo, error) { + b.l.RLock() + defer b.l.RUnlock() + if b.sealed { + return nil, ErrBarrierSealed + } + + // Determine the key install time + term := b.keyring.ActiveTerm() + key := b.keyring.TermKey(term) + + // Return the key info + info := &KeyInfo{ + Term: int(term), + InstallTime: key.InstallTime, + Encryptions: b.encryptions(), + } + return info, nil } -// NewFlagSets creates a new flag sets. -func NewFlagSets(ui cli.Ui) *FlagSets { - mainSet := flag.NewFlagSet("", flag.ContinueOnError) +// Rekey is used to change the root key used to protect the keyring +func (b *AESGCMBarrier) Rekey(ctx context.Context, key []byte) error { + b.l.Lock() + defer b.l.Unlock() - // Errors and usage are controlled by the CLI. - mainSet.Usage = func() {} - mainSet.SetOutput(ioutil.Discard) + newKeyring, err := b.updateRootKeyCommon(key) + if err != nil { + return err + } - return &FlagSets{ - flagSets: make([]*FlagSet, 0, 6), - mainSet: mainSet, - hiddens: make(map[string]struct{}), - completions: complete.Flags{}, - ui: ui, + // Persist the new keyring + if err := b.persistKeyring(ctx, newKeyring); err != nil { + return err } + + // Swap the keyrings + oldKeyring := b.keyring + b.keyring = newKeyring + oldKeyring.Zeroize(false) + return nil } -// NewFlagSet creates a new flag set from the given flag sets. -func (f *FlagSets) NewFlagSet(name string) *FlagSet { - flagSet := NewFlagSet(name) - flagSet.mainSet = f.mainSet - flagSet.completions = f.completions - f.flagSets = append(f.flagSets, flagSet) - return flagSet +// SetRootKey updates the keyring's in-memory root key but does not persist +// anything to storage +func (b *AESGCMBarrier) SetRootKey(key []byte) error { + b.l.Lock() + defer b.l.Unlock() + + newKeyring, err := b.updateRootKeyCommon(key) + if err != nil { + return err + } + + // Swap the keyrings + oldKeyring := b.keyring + b.keyring = newKeyring + oldKeyring.Zeroize(false) + return nil } -// Completions returns the completions for this flag set. -func (f *FlagSets) Completions() complete.Flags { - return f.completions +// Performs common tasks related to updating the root key; note that the lock +// must be held before calling this function +func (b *AESGCMBarrier) updateRootKeyCommon(key []byte) (*Keyring, error) { + if b.sealed { + return nil, ErrBarrierSealed + } + + // Verify the key size + min, max := b.KeyLength() + if len(key) < min || len(key) > max { + return nil, fmt.Errorf("key size must be %d or %d", min, max) + } + + return b.keyring.SetRootKey(key), nil } -type ( - ParseOptions interface{} - ParseOptionAllowRawFormat bool - DisableDisplayFlagWarning bool -) +// Put is used to insert or update an entry +func (b *AESGCMBarrier) Put(ctx context.Context, entry *logical.StorageEntry) error { + defer metrics.MeasureSince([]string{"barrier", "put"}, time.Now()) + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return ErrBarrierSealed + } -// Parse parses the given flags, returning any errors. -// Warnings, if any, regarding the arguments format are sent to stdout -func (f *FlagSets) Parse(args []string, opts ...ParseOptions) error { - err := f.mainSet.Parse(args) + term := b.keyring.ActiveTerm() + primary, err := b.aeadForTerm(term) + b.l.RUnlock() + if err != nil { + return err + } + + return b.putInternal(ctx, term, primary, entry) +} + +func (b *AESGCMBarrier) putInternal(ctx context.Context, term uint32, primary cipher.AEAD, entry *logical.StorageEntry) error { + value, err := b.encryptTracked(entry.Key, term, primary, entry.Value) + if err != nil { + return err + } + pe := &physical.Entry{ + Key: entry.Key, + Value: value, + SealWrap: entry.SealWrap, + } + return b.backend.Put(ctx, pe) +} + +// Get is used to fetch an entry +func (b *AESGCMBarrier) Get(ctx context.Context, key string) (*logical.StorageEntry, error) { + return b.lockSwitchedGet(ctx, key, true) +} + +func (b *AESGCMBarrier) lockSwitchedGet(ctx context.Context, key string, getLock bool) (*logical.StorageEntry, error) { + defer metrics.MeasureSince([]string{"barrier", "get"}, time.Now()) + if getLock { + b.l.RLock() + } + if b.sealed { + if getLock { + b.l.RUnlock() + } + return nil, ErrBarrierSealed + } - displayFlagWarningsDisabled := false - for _, opt := range opts { - if value, ok := opt.(DisableDisplayFlagWarning); ok { - displayFlagWarningsDisabled = bool(value) + // Read the key from the backend + pe, err := b.backend.Get(ctx, key) + if err != nil { + if getLock { + b.l.RUnlock() + } + return nil, err + } else if pe == nil { + if getLock { + b.l.RUnlock() } + return nil, nil } - if !displayFlagWarningsDisabled { - warnings := generateFlagWarnings(f.Args()) - if warnings != "" && Format(f.ui) == "table" { - f.ui.Warn(warnings) + + if len(pe.Value) < 4 { + if getLock { + b.l.RUnlock() } + return nil, errors.New("invalid value") } + // Verify the term + term := binary.BigEndian.Uint32(pe.Value[:4]) + + // Get the GCM by term + // It is expensive to do this first but it is not a + // normal case that this won't match + gcm, err := b.aeadForTerm(term) + if getLock { + b.l.RUnlock() + } if err != nil { - return err + return nil, err + } + if gcm == nil { + return nil, fmt.Errorf("no decryption key available for term %d", term) } - // Now surface any other errors. - return generateFlagErrors(f, opts...) + // Decrypt the ciphertext + plain, err := b.decrypt(key, gcm, pe.Value) + if err != nil { + return nil, fmt.Errorf("decryption failed: %w", err) + } + + // Wrap in a logical entry + entry := &logical.StorageEntry{ + Key: key, + Value: plain, + SealWrap: pe.SealWrap, + } + return entry, nil } -// Parsed reports whether the command-line flags have been parsed. -func (f *FlagSets) Parsed() bool { - return f.mainSet.Parsed() +// Delete is used to permanently delete an entry +func (b *AESGCMBarrier) Delete(ctx context.Context, key string) error { + defer metrics.MeasureSince([]string{"barrier", "delete"}, time.Now()) + b.l.RLock() + sealed := b.sealed + b.l.RUnlock() + if sealed { + return ErrBarrierSealed + } + + return b.backend.Delete(ctx, key) } -// Args returns the remaining args after parsing. -func (f *FlagSets) Args() []string { - return f.mainSet.Args() +// List is used ot list all the keys under a given +// prefix, up to the next prefix. +func (b *AESGCMBarrier) List(ctx context.Context, prefix string) ([]string, error) { + defer metrics.MeasureSince([]string{"barrier", "list"}, time.Now()) + b.l.RLock() + sealed := b.sealed + b.l.RUnlock() + if sealed { + return nil, ErrBarrierSealed + } + + return b.backend.List(ctx, prefix) } -// Visit visits the flags in lexicographical order, calling fn for each. It -// visits only those flags that have been set. -func (f *FlagSets) Visit(fn func(*flag.Flag)) { - f.mainSet.Visit(fn) +// aeadForTerm returns the AES-GCM AEAD for the given term +func (b *AESGCMBarrier) aeadForTerm(term uint32) (cipher.AEAD, error) { + // Check for the keyring + keyring := b.keyring + if keyring == nil { + return nil, nil + } + + // Check the cache for the aead + b.cacheLock.RLock() + aead, ok := b.cache[term] + b.cacheLock.RUnlock() + if ok { + return aead, nil + } + + // Read the underlying key + key := keyring.TermKey(term) + if key == nil { + return nil, nil + } + + // Create a new aead + aead, err := b.aeadFromKey(key.Value) + if err != nil { + return nil, err + } + + // Update the cache + b.cacheLock.Lock() + b.cache[term] = aead + b.cacheLock.Unlock() + return aead, nil } -// Help builds custom help for this command, grouping by flag set. -func (f *FlagSets) Help() string { - var out bytes.Buffer +// aeadFromKey returns an AES-GCM AEAD using the given key. +func (b *AESGCMBarrier) aeadFromKey(key []byte) (cipher.AEAD, error) { + // Create the AES cipher + aesCipher, err := aes.NewCipher(key) + if err != nil { + return nil, fmt.Errorf("failed to create cipher: %w", err) + } - for _, set := range f.flagSets { - printFlagTitle(&out, set.name+":") - set.VisitAll(func(f *flag.Flag) { - // Skip any hidden flags - if v, ok := f.Value.(FlagVisibility); ok && v.Hidden() { - return - } - printFlagDetail(&out, f) - }) + // Create the GCM mode AEAD + gcm, err := cipher.NewGCM(aesCipher) + if err != nil { + return nil, fmt.Errorf("failed to initialize GCM mode") + } + return gcm, nil +} + +// encrypt is used to encrypt a value +func (b *AESGCMBarrier) encrypt(path string, term uint32, gcm cipher.AEAD, plain []byte) ([]byte, error) { + // Allocate the output buffer with room for term, version byte, + // nonce, GCM tag and the plaintext + + extra := termSize + 1 + gcm.NonceSize() + gcm.Overhead() + if len(plain) > math.MaxInt-extra { + return nil, ErrPlaintextTooLarge + } + + capacity := len(plain) + extra + size := termSize + 1 + gcm.NonceSize() + out := make([]byte, size, capacity) + + // Set the key term + binary.BigEndian.PutUint32(out[:4], term) + + // Set the version byte + out[4] = b.currentAESGCMVersionByte + + // Generate a random nonce + nonce := out[5 : 5+gcm.NonceSize()] + n, err := rand.Read(nonce) + if err != nil { + return nil, err + } + if n != len(nonce) { + return nil, errors.New("unable to read enough random bytes to fill gcm nonce") + } + + // Seal the output + switch b.currentAESGCMVersionByte { + case AESGCMVersion1: + out = gcm.Seal(out, nonce, plain, nil) + case AESGCMVersion2: + aad := []byte(nil) + if path != "" { + aad = []byte(path) + } + out = gcm.Seal(out, nonce, plain, aad) + default: + panic("Unknown AESGCM version") + } + + return out, nil +} + +func termLabel(term uint32) []metrics.Label { + return []metrics.Label{ + { + Name: "term", + Value: strconv.FormatUint(uint64(term), 10), + }, + } +} + +// decrypt is used to decrypt a value using the keyring +func (b *AESGCMBarrier) decrypt(path string, gcm cipher.AEAD, cipher []byte) ([]byte, error) { + if len(cipher) < 5+gcm.NonceSize() { + return nil, fmt.Errorf("invalid cipher length") + } + // Capture the parts + nonce := cipher[5 : 5+gcm.NonceSize()] + raw := cipher[5+gcm.NonceSize():] + out := make([]byte, 0, len(raw)-gcm.NonceSize()) + + // Attempt to open + switch cipher[4] { + case AESGCMVersion1: + return gcm.Open(out, nonce, raw, nil) + case AESGCMVersion2: + aad := []byte(nil) + if path != "" { + aad = []byte(path) + } + return gcm.Open(out, nonce, raw, aad) + default: + return nil, fmt.Errorf("version bytes mis-match") + } +} + +// Encrypt is used to encrypt in-memory for the BarrierEncryptor interface +func (b *AESGCMBarrier) Encrypt(ctx context.Context, key string, plaintext []byte) ([]byte, error) { + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return nil, ErrBarrierSealed + } + + term := b.keyring.ActiveTerm() + primary, err := b.aeadForTerm(term) + b.l.RUnlock() + if err != nil { + return nil, err } - return strings.TrimRight(out.String(), "\n") + ciphertext, err := b.encryptTracked(key, term, primary, plaintext) + if err != nil { + return nil, err + } + + return ciphertext, nil } -// FlagSet is a grouped wrapper around a real flag set and a grouped flag set. -type FlagSet struct { - name string - flagSet *flag.FlagSet - mainSet *flag.FlagSet - completions complete.Flags +// Decrypt is used to decrypt in-memory for the BarrierEncryptor interface +func (b *AESGCMBarrier) Decrypt(_ context.Context, key string, ciphertext []byte) ([]byte, error) { + b.l.RLock() + if b.sealed { + b.l.RUnlock() + return nil, ErrBarrierSealed + } + + if len(ciphertext) == 0 { + b.l.RUnlock() + return nil, fmt.Errorf("empty ciphertext") + } + + // Verify the term + if len(ciphertext) < 4 { + b.l.RUnlock() + return nil, fmt.Errorf("invalid ciphertext term") + } + term := binary.BigEndian.Uint32(ciphertext[:4]) + + // Get the GCM by term + // It is expensive to do this first but it is not a + // normal case that this won't match + gcm, err := b.aeadForTerm(term) + b.l.RUnlock() + if err != nil { + return nil, err + } + if gcm == nil { + return nil, fmt.Errorf("no decryption key available for term %d", term) + } + + // Decrypt the ciphertext + plain, err := b.decrypt(key, gcm, ciphertext) + if err != nil { + return nil, fmt.Errorf("decryption failed: %w", err) + } + + return plain, nil } -// NewFlagSet creates a new flag set. -func NewFlagSet(name string) *FlagSet { - return &FlagSet{ - name: name, - flagSet: flag.NewFlagSet(name, flag.ContinueOnError), +func (b *AESGCMBarrier) Keyring() (*Keyring, error) { + b.l.RLock() + defer b.l.RUnlock() + if b.sealed { + return nil, ErrBarrierSealed } + + return b.keyring.Clone(), nil } -// Name returns the name of this flag set. -func (f *FlagSet) Name() string { - return f.name +func (b *AESGCMBarrier) ConsumeEncryptionCount(consumer func(int64) error) error { + if b.keyring != nil { + // Lock to prevent replacement of the key while we consume the encryptions + b.l.RLock() + defer b.l.RUnlock() + + c := b.UnaccountedEncryptions.Load() + err := consumer(c) + if err == nil && c > 0 { + // Consumer succeeded, remove those from local encryptions + b.UnaccountedEncryptions.Sub(c) + } + return err + } + return nil } -func (f *FlagSet) Visit(fn func(*flag.Flag)) { - f.flagSet.Visit(fn) +func (b *AESGCMBarrier) AddRemoteEncryptions(encryptions int64) { + // For rollup and persistence + b.UnaccountedEncryptions.Add(encryptions) + // For testing + b.RemoteEncryptions.Add(encryptions) } -func (f *FlagSet) VisitAll(fn func(*flag.Flag)) { - f.flagSet.VisitAll(fn) +func (b *AESGCMBarrier) encryptTracked(path string, term uint32, gcm cipher.AEAD, buf []byte) ([]byte, error) { + ct, err := b.encrypt(path, term, gcm, buf) + if err != nil { + return nil, err + } + // Increment the local encryption count, and track metrics + b.UnaccountedEncryptions.Add(1) + b.totalLocalEncryptions.Add(1) + metrics.IncrCounterWithLabels(barrierEncryptsMetric, 1, termLabel(term)) + + return ct, nil } -// printFlagTitle prints a consistently-formatted title to the given writer. -func printFlagTitle(w io.Writer, s string) { - fmt.Fprintf(w, "%s\n\n", s) +// UnaccountedEncryptions returns the number of encryptions made on the local instance only for the current key term +func (b *AESGCMBarrier) TotalLocalEncryptions() int64 { + return b.totalLocalEncryptions.Load() } -// printFlagDetail prints a single flag to the given writer. -func printFlagDetail(w io.Writer, f *flag.Flag) { - // Check if the flag is hidden - do not print any flag detail or help output - // if it is hidden. - if h, ok := f.Value.(FlagVisibility); ok && h.Hidden() { - return +func (b *AESGCMBarrier) CheckBarrierAutoRotate(ctx context.Context) (string, error) { + const oneYear = 24 * 365 * time.Hour + reason, err := func() (string, error) { + b.l.RLock() + defer b.l.RUnlock() + if b.keyring != nil { + // Rotation Checks + var reason string + + rc, err := b.RotationConfig() + if err != nil { + return "", err + } + + if !rc.Disabled { + activeKey := b.keyring.ActiveKey() + ops := b.encryptions() + switch { + case activeKey.Encryptions == 0 && !activeKey.InstallTime.IsZero() && time.Since(activeKey.InstallTime) > oneYear: + reason = legacyRotateReason + case ops > rc.MaxOperations: + reason = "reached max operations" + case rc.Interval > 0 && time.Since(activeKey.InstallTime) > rc.Interval: + reason = "rotation interval reached" + } + } + return reason, nil + } + return "", nil + }() + if err != nil { + return "", err + } + if reason != "" { + return reason, nil } - // Check for a detailed example - example := "" - if t, ok := f.Value.(FlagExample); ok { - example = t.Example() + b.l.Lock() + defer b.l.Unlock() + if b.keyring != nil { + err := b.persistEncryptions(ctx) + if err != nil { + return "", err + } } + return reason, nil +} - if example != "" { - fmt.Fprintf(w, " -%s=<%s>\n", f.Name, example) - } else { - fmt.Fprintf(w, " -%s\n", f.Name) +// Must be called with lock held +func (b *AESGCMBarrier) persistEncryptions(ctx context.Context) error { + if !b.sealed { + // Encryption count persistence + upe := b.UnaccountedEncryptions.Load() + if upe > 0 { + activeKey := b.keyring.ActiveKey() + // Move local (unpersisted) encryptions to the key and persist. This prevents us from needing to persist if + // there has been no activity. Since persistence performs an encryption, perversely we zero out after + // persistence and add 1 to the count to avoid this operation guaranteeing we need another + // autoRotateCheckInterval later. + newEncs := upe + 1 + activeKey.Encryptions += uint64(newEncs) + newKeyring := b.keyring.Clone() + err := b.persistKeyringBestEffort(ctx, newKeyring) + if err != nil { + return err + } + b.UnaccountedEncryptions.Sub(newEncs) + } } + return nil +} - usage := reRemoveWhitespace.ReplaceAllString(f.Usage, " ") - indented := wrapAtLengthWithPadding(usage, 6) - fmt.Fprintf(w, "%s\n\n", indented) +// Mostly for testing, returns the total number of encryption operations performed on the active term +func (b *AESGCMBarrier) encryptions() int64 { + if b.keyring != nil { + activeKey := b.keyring.ActiveKey() + if activeKey != nil { + return b.UnaccountedEncryptions.Load() + int64(activeKey.Encryptions) + } + } + return 0 } diff --git a/command/command_stubs_oss.go b/command/command_stubs_oss.go index bc90a1585aaf..c2657e7fd8cc 100644 --- a/command/command_stubs_oss.go +++ b/command/command_stubs_oss.go @@ -1,33 +1,722 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -//go:build !enterprise - package command -//go:generate go run github.com/hashicorp/vault/tools/stubmaker - import ( - "github.com/hashicorp/vault/command/server" - "github.com/hashicorp/vault/vault" - "github.com/mitchellh/cli" + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "strings" + "sync" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/command/token" + "github.com/hashicorp/vault/helper/namespace" + "github.com/mattn/go-isatty" + "github.com/pkg/errors" + "github.com/posener/complete" ) -func entInitCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions, commands map[string]cli.CommandFactory) { +const ( + // maxLineLength is the maximum width of any line. + maxLineLength int = 78 + + // notSetValue is a flag value for a not-set value + notSetValue = "(not set)" +) + +// reRemoveWhitespace is a regular expression for stripping whitespace from +// a string. +var reRemoveWhitespace = regexp.MustCompile(`[\s]+`) + +type BaseCommand struct { + UI cli.Ui + + flags *FlagSets + flagsOnce sync.Once + + flagAddress string + flagAgentProxyAddress string + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagNamespace string + flagNS string + flagPolicyOverride bool + flagTLSServerName string + flagTLSSkipVerify bool + flagDisableRedirects bool + flagWrapTTL time.Duration + flagUnlockKey string + + flagFormat string + flagField string + flagDetailed bool + flagOutputCurlString bool + flagOutputPolicy bool + flagNonInteractive bool + addrWarning string + + flagMFA []string + + flagHeader map[string]string + + tokenHelper token.TokenHelper + + client *api.Client +} + +// Client returns the HTTP API client. The client is cached on the command to +// save performance on future calls. +func (c *BaseCommand) Client() (*api.Client, error) { + // Read the test client if present + if c.client != nil { + return c.client, nil + } + + if c.addrWarning != "" && c.UI != nil { + if os.Getenv("VAULT_ADDR") == "" { + if !c.flagNonInteractive && isatty.IsTerminal(os.Stdin.Fd()) { + c.UI.Warn(wrapAtLength(c.addrWarning)) + } + } + } + + config := api.DefaultConfig() + + if err := config.ReadEnvironment(); err != nil { + return nil, errors.Wrap(err, "failed to read environment") + } + + if c.flagAddress != "" { + config.Address = c.flagAddress + } + if c.flagAgentProxyAddress != "" { + config.Address = c.flagAgentProxyAddress + } + + if c.flagOutputCurlString { + config.OutputCurlString = c.flagOutputCurlString + } + if c.flagOutputPolicy { + config.OutputPolicy = c.flagOutputPolicy + } + + // If we need custom TLS configuration, then set it + if c.flagCACert != "" || c.flagCAPath != "" || c.flagClientCert != "" || + c.flagClientKey != "" || c.flagTLSServerName != "" || c.flagTLSSkipVerify { + t := &api.TLSConfig{ + CACert: c.flagCACert, + CAPath: c.flagCAPath, + ClientCert: c.flagClientCert, + ClientKey: c.flagClientKey, + TLSServerName: c.flagTLSServerName, + Insecure: c.flagTLSSkipVerify, + } + + // Setup TLS config + if err := config.ConfigureTLS(t); err != nil { + return nil, errors.Wrap(err, "failed to setup TLS config") + } + } + + // Build the client + client, err := api.NewClient(config) + if err != nil { + return nil, errors.Wrap(err, "failed to create client") + } + + // Turn off retries on the CLI + if os.Getenv(api.EnvVaultMaxRetries) == "" { + client.SetMaxRetries(0) + } + + // Set the wrapping function + client.SetWrappingLookupFunc(c.DefaultWrappingLookupFunc) + + // Get the token if it came in from the environment + token := client.Token() + + // If we don't have a token, check the token helper + if token == "" { + helper, err := c.TokenHelper() + if err != nil { + return nil, errors.Wrap(err, "failed to get token helper") + } + token, err = helper.Get() + if err != nil { + return nil, errors.Wrap(err, "failed to get token from token helper") + } + } + + // Set the token + if token != "" { + client.SetToken(token) + } + + client.SetMFACreds(c.flagMFA) + + // flagNS takes precedence over flagNamespace. After resolution, point both + // flags to the same value to be able to use them interchangeably anywhere. + if c.flagNS != notSetValue { + c.flagNamespace = c.flagNS + } + if c.flagNamespace != notSetValue { + client.SetNamespace(namespace.Canonicalize(c.flagNamespace)) + } + if c.flagPolicyOverride { + client.SetPolicyOverride(c.flagPolicyOverride) + } + + if c.flagHeader != nil { + + var forbiddenHeaders []string + for key, val := range c.flagHeader { + + if strings.HasPrefix(key, "X-Vault-") { + forbiddenHeaders = append(forbiddenHeaders, key) + continue + } + client.AddHeader(key, val) + } + + if len(forbiddenHeaders) > 0 { + return nil, fmt.Errorf("failed to setup Headers[%s]: Header starting by 'X-Vault-' are for internal usage only", strings.Join(forbiddenHeaders, ", ")) + } + } + + c.client = client + + return client, nil } -func entEnableFourClusterDev(c *ServerCommand, base *vault.CoreConfig, info map[string]string, infoKeys []string, tempDir string) int { - c.logger.Error("-dev-four-cluster only supported in enterprise Vault") - return 1 +// SetAddress sets the token helper on the command; useful for the demo server and other outside cases. +func (c *BaseCommand) SetAddress(addr string) { + c.flagAddress = addr } -func entAdjustCoreConfig(config *server.Config, coreConfig *vault.CoreConfig) { +// SetTokenHelper sets the token helper on the command. +func (c *BaseCommand) SetTokenHelper(th token.TokenHelper) { + c.tokenHelper = th } -func entCheckStorageType(coreConfig *vault.CoreConfig) bool { - return true +// TokenHelper returns the token helper attached to the command. +func (c *BaseCommand) TokenHelper() (token.TokenHelper, error) { + if c.tokenHelper != nil { + return c.tokenHelper, nil + } + + helper, err := DefaultTokenHelper() + if err != nil { + return nil, err + } + return helper, nil +} + +// DefaultWrappingLookupFunc is the default wrapping function based on the +// CLI flag. +func (c *BaseCommand) DefaultWrappingLookupFunc(operation, path string) string { + if c.flagWrapTTL != 0 { + return c.flagWrapTTL.String() + } + + return api.DefaultWrappingLookupFunc(operation, path) +} + +// getMFAValidationRequired checks to see if the secret exists and has an MFA +// requirement. If MFA is required and the number of constraints is greater than +// 1, we can assert that interactive validation is not required. +func (c *BaseCommand) getMFAValidationRequired(secret *api.Secret) bool { + if secret != nil && secret.Auth != nil && secret.Auth.MFARequirement != nil { + if c.flagMFA == nil && len(secret.Auth.MFARequirement.MFAConstraints) == 1 { + return true + } else if len(secret.Auth.MFARequirement.MFAConstraints) > 1 { + return true + } + } + + return false +} + +// getInteractiveMFAMethodInfo returns MFA method information only if operating +// in interactive mode and one MFA method is configured. +func (c *BaseCommand) getInteractiveMFAMethodInfo(secret *api.Secret) *MFAMethodInfo { + if secret == nil || secret.Auth == nil || secret.Auth.MFARequirement == nil { + return nil + } + + mfaConstraints := secret.Auth.MFARequirement.MFAConstraints + if c.flagNonInteractive || len(mfaConstraints) != 1 || !isatty.IsTerminal(os.Stdin.Fd()) { + return nil + } + + for _, mfaConstraint := range mfaConstraints { + if len(mfaConstraint.Any) != 1 { + return nil + } + + return &MFAMethodInfo{ + methodType: mfaConstraint.Any[0].Type, + methodID: mfaConstraint.Any[0].ID, + usePasscode: mfaConstraint.Any[0].UsesPasscode, + } + } + + return nil } -func entGetFIPSInfoKey() string { - return "" +func (c *BaseCommand) validateMFA(reqID string, methodInfo MFAMethodInfo) (*api.Secret, error) { + var passcode string + var err error + if methodInfo.usePasscode { + passcode, err = c.UI.AskSecret(fmt.Sprintf("Enter the passphrase for methodID %q of type %q:", methodInfo.methodID, methodInfo.methodType)) + if err != nil { + return nil, fmt.Errorf("failed to read passphrase: %w. please validate the login by sending a request to sys/mfa/validate", err) + } + } else { + c.UI.Warn("Asking Vault to perform MFA validation with upstream service. " + + "You should receive a push notification in your authenticator app shortly") + } + + // passcode could be an empty string + mfaPayload := map[string]interface{}{ + methodInfo.methodID: []string{passcode}, + } + + client, err := c.Client() + if err != nil { + return nil, err + } + + return client.Sys().MFAValidate(reqID, mfaPayload) +} + +type FlagSetBit uint + +const ( + FlagSetNone FlagSetBit = 1 << iota + FlagSetHTTP + FlagSetOutputField + FlagSetOutputFormat + FlagSetOutputDetailed +) + +// flagSet creates the flags for this command. The result is cached on the +// command to save performance on future calls. +func (c *BaseCommand) flagSet(bit FlagSetBit) *FlagSets { + c.flagsOnce.Do(func() { + set := NewFlagSets(c.UI) + + // These flag sets will apply to all leaf subcommands. + // TODO: Optional, but FlagSetHTTP can be safely removed from the individual + // Flags() subcommands. + bit = bit | FlagSetHTTP + + if bit&FlagSetHTTP != 0 { + f := set.NewFlagSet("HTTP Options") + + addrStringVar := &StringVar{ + Name: flagNameAddress, + Target: &c.flagAddress, + EnvVar: api.EnvVaultAddress, + Completion: complete.PredictAnything, + Usage: "Address of the Vault server.", + } + + if c.flagAddress != "" { + addrStringVar.Default = c.flagAddress + } else { + addrStringVar.Default = "https://127.0.0.1:8200" + c.addrWarning = fmt.Sprintf("WARNING! VAULT_ADDR and -address unset. Defaulting to %s.", addrStringVar.Default) + } + f.StringVar(addrStringVar) + + agentAddrStringVar := &StringVar{ + Name: "agent-address", + Target: &c.flagAgentProxyAddress, + EnvVar: api.EnvVaultAgentAddr, + Completion: complete.PredictAnything, + Usage: "Address of the Agent.", + } + f.StringVar(agentAddrStringVar) + + f.StringVar(&StringVar{ + Name: flagNameCACert, + Target: &c.flagCACert, + Default: "", + EnvVar: api.EnvVaultCACert, + Completion: complete.PredictFiles("*"), + Usage: "Path on the local disk to a single PEM-encoded CA " + + "certificate to verify the Vault server's SSL certificate. This " + + "takes precedence over -ca-path.", + }) + + f.StringVar(&StringVar{ + Name: flagNameCAPath, + Target: &c.flagCAPath, + Default: "", + EnvVar: api.EnvVaultCAPath, + Completion: complete.PredictDirs("*"), + Usage: "Path on the local disk to a directory of PEM-encoded CA " + + "certificates to verify the Vault server's SSL certificate.", + }) + + f.StringVar(&StringVar{ + Name: flagNameClientCert, + Target: &c.flagClientCert, + Default: "", + EnvVar: api.EnvVaultClientCert, + Completion: complete.PredictFiles("*"), + Usage: "Path on the local disk to a single PEM-encoded CA " + + "certificate to use for TLS authentication to the Vault server. If " + + "this flag is specified, -client-key is also required.", + }) + + f.StringVar(&StringVar{ + Name: flagNameClientKey, + Target: &c.flagClientKey, + Default: "", + EnvVar: api.EnvVaultClientKey, + Completion: complete.PredictFiles("*"), + Usage: "Path on the local disk to a single PEM-encoded private key " + + "matching the client certificate from -client-cert.", + }) + + f.StringVar(&StringVar{ + Name: "namespace", + Target: &c.flagNamespace, + Default: notSetValue, // this can never be a real value + EnvVar: api.EnvVaultNamespace, + Completion: complete.PredictAnything, + Usage: "The namespace to use for the command. Setting this is not " + + "necessary but allows using relative paths. -ns can be used as " + + "shortcut.", + }) + + f.StringVar(&StringVar{ + Name: "ns", + Target: &c.flagNS, + Default: notSetValue, // this can never be a real value + Completion: complete.PredictAnything, + Hidden: true, + Usage: "Alias for -namespace. This takes precedence over -namespace.", + }) + + f.StringVar(&StringVar{ + Name: flagTLSServerName, + Target: &c.flagTLSServerName, + Default: "", + EnvVar: api.EnvVaultTLSServerName, + Completion: complete.PredictAnything, + Usage: "Name to use as the SNI host when connecting to the Vault " + + "server via TLS.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameTLSSkipVerify, + Target: &c.flagTLSSkipVerify, + Default: false, + EnvVar: api.EnvVaultSkipVerify, + Usage: "Disable verification of TLS certificates. Using this option " + + "is highly discouraged as it decreases the security of data " + + "transmissions to and from the Vault server.", + }) + + f.BoolVar(&BoolVar{ + Name: flagNameDisableRedirects, + Target: &c.flagDisableRedirects, + Default: false, + EnvVar: api.EnvVaultDisableRedirects, + Usage: "Disable the default client behavior, which honors a single " + + "redirect response from a request", + }) + + f.BoolVar(&BoolVar{ + Name: "policy-override", + Target: &c.flagPolicyOverride, + Default: false, + Usage: "Override a Sentinel policy that has a soft-mandatory " + + "enforcement_level specified", + }) + + f.DurationVar(&DurationVar{ + Name: "wrap-ttl", + Target: &c.flagWrapTTL, + Default: 0, + EnvVar: api.EnvVaultWrapTTL, + Completion: complete.PredictAnything, + Usage: "Wraps the response in a cubbyhole token with the requested " + + "TTL. The response is available via the \"vault unwrap\" command. " + + "The TTL is specified as a numeric string with suffix like \"30s\" " + + "or \"5m\".", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "mfa", + Target: &c.flagMFA, + Default: nil, + EnvVar: api.EnvVaultMFA, + Completion: complete.PredictAnything, + Usage: "Supply MFA credentials as part of X-Vault-MFA header.", + }) + + f.BoolVar(&BoolVar{ + Name: "output-curl-string", + Target: &c.flagOutputCurlString, + Default: false, + Usage: "Instead of executing the request, print an equivalent cURL " + + "command string and exit.", + }) + + f.BoolVar(&BoolVar{ + Name: "output-policy", + Target: &c.flagOutputPolicy, + Default: false, + Usage: "Instead of executing the request, print an example HCL " + + "policy that would be required to run this command, and exit.", + }) + + f.StringVar(&StringVar{ + Name: "unlock-key", + Target: &c.flagUnlockKey, + Default: notSetValue, + Completion: complete.PredictNothing, + Usage: "Key to unlock a namespace API lock.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "header", + Target: &c.flagHeader, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value to provide http header added to any request done by the CLI." + + "Trying to add headers starting with 'X-Vault-' is forbidden and will make the command fail " + + "This can be specified multiple times.", + }) + + f.BoolVar(&BoolVar{ + Name: "non-interactive", + Target: &c.flagNonInteractive, + Default: false, + Usage: "When set true, prevents asking the user for input via the terminal.", + }) + + } + + if bit&(FlagSetOutputField|FlagSetOutputFormat|FlagSetOutputDetailed) != 0 { + outputSet := set.NewFlagSet("Output Options") + + if bit&FlagSetOutputField != 0 { + outputSet.StringVar(&StringVar{ + Name: "field", + Target: &c.flagField, + Default: "", + Completion: complete.PredictAnything, + Usage: "Print only the field with the given name. Specifying " + + "this option will take precedence over other formatting " + + "directives. The result will not have a trailing newline " + + "making it ideal for piping to other processes.", + }) + } + + if bit&FlagSetOutputFormat != 0 { + outputSet.StringVar(&StringVar{ + Name: "format", + Target: &c.flagFormat, + Default: "table", + EnvVar: EnvVaultFormat, + Completion: complete.PredictSet("table", "json", "yaml", "pretty", "raw"), + Usage: `Print the output in the given format. Valid formats + are "table", "json", "yaml", or "pretty". "raw" is allowed + for 'vault read' operations only.`, + }) + } + + if bit&FlagSetOutputDetailed != 0 { + outputSet.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + EnvVar: EnvVaultDetailed, + Usage: "Enables additional metadata during some operations", + }) + } + } + + c.flags = set + }) + + return c.flags +} + +// FlagSets is a group of flag sets. +type FlagSets struct { + flagSets []*FlagSet + mainSet *flag.FlagSet + hiddens map[string]struct{} + completions complete.Flags + ui cli.Ui +} + +// NewFlagSets creates a new flag sets. +func NewFlagSets(ui cli.Ui) *FlagSets { + mainSet := flag.NewFlagSet("", flag.ContinueOnError) + + // Errors and usage are controlled by the CLI. + mainSet.Usage = func() {} + mainSet.SetOutput(ioutil.Discard) + + return &FlagSets{ + flagSets: make([]*FlagSet, 0, 6), + mainSet: mainSet, + hiddens: make(map[string]struct{}), + completions: complete.Flags{}, + ui: ui, + } +} + +// NewFlagSet creates a new flag set from the given flag sets. +func (f *FlagSets) NewFlagSet(name string) *FlagSet { + flagSet := NewFlagSet(name) + flagSet.mainSet = f.mainSet + flagSet.completions = f.completions + f.flagSets = append(f.flagSets, flagSet) + return flagSet +} + +// Completions returns the completions for this flag set. +func (f *FlagSets) Completions() complete.Flags { + return f.completions +} + +type ( + ParseOptions interface{} + ParseOptionAllowRawFormat bool + DisableDisplayFlagWarning bool +) + +// Parse parses the given flags, returning any errors. +// Warnings, if any, regarding the arguments format are sent to stdout +func (f *FlagSets) Parse(args []string, opts ...ParseOptions) error { + err := f.mainSet.Parse(args) + + displayFlagWarningsDisabled := false + for _, opt := range opts { + if value, ok := opt.(DisableDisplayFlagWarning); ok { + displayFlagWarningsDisabled = bool(value) + } + } + if !displayFlagWarningsDisabled { + warnings := generateFlagWarnings(f.Args()) + if warnings != "" && Format(f.ui) == "table" { + f.ui.Warn(warnings) + } + } + + if err != nil { + return err + } + + // Now surface any other errors. + return generateFlagErrors(f, opts...) +} + +// Parsed reports whether the command-line flags have been parsed. +func (f *FlagSets) Parsed() bool { + return f.mainSet.Parsed() +} + +// Args returns the remaining args after parsing. +func (f *FlagSets) Args() []string { + return f.mainSet.Args() +} + +// Visit visits the flags in lexicographical order, calling fn for each. It +// visits only those flags that have been set. +func (f *FlagSets) Visit(fn func(*flag.Flag)) { + f.mainSet.Visit(fn) +} + +// Help builds custom help for this command, grouping by flag set. +func (f *FlagSets) Help() string { + var out bytes.Buffer + + for _, set := range f.flagSets { + printFlagTitle(&out, set.name+":") + set.VisitAll(func(f *flag.Flag) { + // Skip any hidden flags + if v, ok := f.Value.(FlagVisibility); ok && v.Hidden() { + return + } + printFlagDetail(&out, f) + }) + } + + return strings.TrimRight(out.String(), "\n") +} + +// FlagSet is a grouped wrapper around a real flag set and a grouped flag set. +type FlagSet struct { + name string + flagSet *flag.FlagSet + mainSet *flag.FlagSet + completions complete.Flags +} + +// NewFlagSet creates a new flag set. +func NewFlagSet(name string) *FlagSet { + return &FlagSet{ + name: name, + flagSet: flag.NewFlagSet(name, flag.ContinueOnError), + } +} + +// Name returns the name of this flag set. +func (f *FlagSet) Name() string { + return f.name +} + +func (f *FlagSet) Visit(fn func(*flag.Flag)) { + f.flagSet.Visit(fn) +} + +func (f *FlagSet) VisitAll(fn func(*flag.Flag)) { + f.flagSet.VisitAll(fn) +} + +// printFlagTitle prints a consistently-formatted title to the given writer. +func printFlagTitle(w io.Writer, s string) { + fmt.Fprintf(w, "%s\n\n", s) +} + +// printFlagDetail prints a single flag to the given writer. +func printFlagDetail(w io.Writer, f *flag.Flag) { + // Check if the flag is hidden - do not print any flag detail or help output + // if it is hidden. + if h, ok := f.Value.(FlagVisibility); ok && h.Hidden() { + return + } + + // Check for a detailed example + example := "" + if t, ok := f.Value.(FlagExample); ok { + example = t.Example() + } + + if example != "" { + fmt.Fprintf(w, " -%s=<%s>\n", f.Name, example) + } else { + fmt.Fprintf(w, " -%s\n", f.Name) + } + + usage := reRemoveWhitespace.ReplaceAllString(f.Usage, " ") + indented := wrapAtLengthWithPadding(usage, 6) + fmt.Fprintf(w, "%s\n\n", indented) } diff --git a/command/command_test.go b/command/command_test.go index 34bd3b453355..52a7ff996a5c 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -1,334 +1,88 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "context" - "encoding/base64" - "net" - "net/http" - "strings" - "testing" - "time" - - log "github.com/hashicorp/go-hclog" - kv "github.com/hashicorp/vault-plugin-secrets-kv" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/builtin/logical/pki" - "github.com/hashicorp/vault/builtin/logical/ssh" - "github.com/hashicorp/vault/builtin/logical/transit" - "github.com/hashicorp/vault/helper/benchhelpers" - "github.com/hashicorp/vault/helper/builtinplugins" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/physical/inmem" - "github.com/hashicorp/vault/vault" - "github.com/hashicorp/vault/vault/seal" - "github.com/mitchellh/cli" - - auditFile "github.com/hashicorp/vault/builtin/audit/file" - credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" - vaulthttp "github.com/hashicorp/vault/http" -) - -var ( - defaultVaultLogger = log.NewNullLogger() - - defaultVaultCredentialBackends = map[string]logical.Factory{ - "userpass": credUserpass.Factory, - } - - defaultVaultAuditBackends = map[string]audit.Factory{ - "file": auditFile.Factory, - } - - defaultVaultLogicalBackends = map[string]logical.Factory{ - "generic-leased": vault.LeasedPassthroughBackendFactory, - "pki": pki.Factory, - "ssh": ssh.Factory, - "transit": transit.Factory, - "kv": kv.Factory, - } -) - -// assertNoTabs asserts the CLI help has no tab characters. -func assertNoTabs(tb testing.TB, c cli.Command) { - tb.Helper() - - if strings.ContainsRune(c.Help(), '\t') { - tb.Errorf("%#v help output contains tabs", c) - } -} - -// testVaultServer creates a test vault cluster and returns a configured API -// client and closer function. -func testVaultServer(tb testing.TB) (*api.Client, func()) { - tb.Helper() - - client, _, closer := testVaultServerUnseal(tb) - return client, closer -} - -func testVaultServerWithSecrets(ctx context.Context, tb testing.TB) (*api.Client, func()) { - tb.Helper() - - client, _, closer := testVaultServerUnseal(tb) - - // enable kv-v1 backend - if err := client.Sys().Mount("kv-v1/", &api.MountInput{ - Type: "kv-v1", - }); err != nil { - tb.Fatal(err) - } - - // enable kv-v2 backend - if err := client.Sys().Mount("kv-v2/", &api.MountInput{ - Type: "kv-v2", - }); err != nil { - tb.Fatal(err) - } - - // populate dummy secrets - for _, path := range []string{ - "foo", - "app-1/foo", - "app-1/bar", - "app-1/nested/baz", - } { - if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ - "user": "test", - "password": "Hashi123", - }); err != nil { - tb.Fatal(err) - } - - if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ - "user": "test", - "password": "Hashi123", - }); err != nil { - tb.Fatal(err) - } - } - - return client, closer -} - -func testVaultServerWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, func()) { - tb.Helper() - - client, _, closer := testVaultServerUnsealWithKVVersionWithSeal(tb, kvVersion, nil) - return client, closer -} - -func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { - tb.Helper() - - client, _, closer := testVaultServerCoreConfig(tb, &vault.CoreConfig{ - CredentialBackends: credentialBackends, - AuditBackends: auditBackends, - LogicalBackends: logicalBackends, - BuiltinRegistry: builtinplugins.Registry, - }) - return client, closer -} - -// testVaultServerAutoUnseal creates a test vault cluster and sets it up with auto unseal -// the function returns a client, the recovery keys, and a closer function -func testVaultServerAutoUnseal(tb testing.TB) (*api.Client, []string, func()) { - testSeal, _ := seal.NewTestSeal(nil) - autoSeal := vault.NewAutoSeal(testSeal) - return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", autoSeal) -} - -// testVaultServerUnseal creates a test vault cluster and returns a configured -// API client, list of unseal keys (as strings), and a closer function. -func testVaultServerUnseal(tb testing.TB) (*api.Client, []string, func()) { - return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", nil) -} - -func testVaultServerUnsealWithKVVersionWithSeal(tb testing.TB, kvVersion string, seal vault.Seal) (*api.Client, []string, func()) { - tb.Helper() - - return testVaultServerCoreConfigWithOpts(tb, &vault.CoreConfig{ - CredentialBackends: defaultVaultCredentialBackends, - AuditBackends: defaultVaultAuditBackends, - LogicalBackends: defaultVaultLogicalBackends, - BuiltinRegistry: builtinplugins.Registry, - Seal: seal, - }, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: 1, - KVVersion: kvVersion, - }) -} - -// testVaultServerUnseal creates a test vault cluster and returns a configured -// API client, list of unseal keys (as strings), and a closer function -// configured with the given plugin directory. -func testVaultServerPluginDir(tb testing.TB, pluginDir string) (*api.Client, []string, func()) { - tb.Helper() - - return testVaultServerCoreConfig(tb, &vault.CoreConfig{ - CredentialBackends: defaultVaultCredentialBackends, - AuditBackends: defaultVaultAuditBackends, - LogicalBackends: defaultVaultLogicalBackends, - PluginDirectory: pluginDir, - BuiltinRegistry: builtinplugins.Registry, - }) -} - -func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*api.Client, []string, func()) { - return testVaultServerCoreConfigWithOpts(tb, coreConfig, &vault.TestClusterOptions{ - HandlerFunc: vaulthttp.Handler, - NumCores: 1, // Default is 3, but we don't need that many - }) -} - -// testVaultServerCoreConfig creates a new vault cluster with the given core -// configuration. This is a lower-level test helper. If the seal config supports recovery keys, then -// recovery keys are returned. Otherwise, unseal keys are returned -func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConfig, opts *vault.TestClusterOptions) (*api.Client, []string, func()) { - tb.Helper() - - cluster := vault.NewTestCluster(benchhelpers.TBtoT(tb), coreConfig, opts) - cluster.Start() - - // Make it easy to get access to the active - core := cluster.Cores[0].Core - vault.TestWaitActive(benchhelpers.TBtoT(tb), core) - - // Get the client already setup for us! - client := cluster.Cores[0].Client - client.SetToken(cluster.RootToken) - - var keys [][]byte - if coreConfig.Seal != nil && coreConfig.Seal.RecoveryKeySupported() { - keys = cluster.RecoveryKeys - } else { - keys = cluster.BarrierKeys - } - - return client, encodeKeys(keys), cluster.Cleanup -} - -// Convert the unseal keys to base64 encoded, since these are how the user -// will get them. -func encodeKeys(rawKeys [][]byte) []string { - keys := make([]string, len(rawKeys)) - for i := range rawKeys { - keys[i] = base64.StdEncoding.EncodeToString(rawKeys[i]) - } - return keys -} - -// testVaultServerUninit creates an uninitialized server. -func testVaultServerUninit(tb testing.TB) (*api.Client, func()) { - tb.Helper() - - inm, err := inmem.NewInmem(nil, defaultVaultLogger) - if err != nil { - tb.Fatal(err) - } - - core, err := vault.NewCore(&vault.CoreConfig{ - DisableMlock: true, - Physical: inm, - CredentialBackends: defaultVaultCredentialBackends, - AuditBackends: defaultVaultAuditBackends, - LogicalBackends: defaultVaultLogicalBackends, - BuiltinRegistry: builtinplugins.Registry, - }) - if err != nil { - tb.Fatal(err) - } - - ln, addr := vaulthttp.TestServer(tb, core) - - client, err := api.NewClient(&api.Config{ - Address: addr, - }) - if err != nil { - tb.Fatal(err) - } - - closer := func() { - core.Shutdown() - ln.Close() - } - - return client, closer -} - -// testVaultServerBad creates an http server that returns a 500 on each request -// to simulate failures. -func testVaultServerBad(tb testing.TB) (*api.Client, func()) { - tb.Helper() - - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - tb.Fatal(err) - } - - server := &http.Server{ - Addr: "127.0.0.1:0", - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "500 internal server error", http.StatusInternalServerError) - }), - ReadTimeout: 1 * time.Second, - ReadHeaderTimeout: 1 * time.Second, - WriteTimeout: 1 * time.Second, - IdleTimeout: 1 * time.Second, - } - - go func() { - if err := server.Serve(listener); err != nil && err != http.ErrServerClosed { - tb.Fatal(err) - } - }() - - client, err := api.NewClient(&api.Config{ - Address: "http://" + listener.Addr().String(), - }) - if err != nil { - tb.Fatal(err) - } - - return client, func() { - ctx, done := context.WithTimeout(context.Background(), 5*time.Second) - defer done() - - server.Shutdown(ctx) - } -} - -// testTokenAndAccessor creates a new authentication token capable of being renewed with -// the default policy attached. It returns the token and it's accessor. -func testTokenAndAccessor(tb testing.TB, client *api.Client) (string, string) { - tb.Helper() - - secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ - Policies: []string{"default"}, - TTL: "30m", - }) - if err != nil { - tb.Fatal(err) - } - if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { - tb.Fatalf("missing auth data: %#v", secret) - } - return secret.Auth.ClientToken, secret.Auth.Accessor -} - -func testClient(tb testing.TB, addr string, token string) *api.Client { - tb.Helper() - config := api.DefaultConfig() - config.Address = addr - client, err := api.NewClient(config) - if err != nil { - tb.Fatal(err) - } - client.SetToken(token) - - return client +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +// base handlers used in mirage config when a specific handler is not specified +const EXPIRY_DATE = '2021-05-12T23:20:50.52Z'; + +export default function (server) { + server.get('/sys/internal/ui/feature-flags', (db) => { + const featuresResponse = db.features.first(); + return { + data: { + feature_flags: featuresResponse ? featuresResponse.feature_flags : null, + }, + }; + }); + + server.get('/sys/health', function () { + return { + enterprise: true, + initialized: true, + sealed: false, + standby: false, + license: { + expiry: '2021-05-12T23:20:50.52Z', + state: 'stored', + }, + performance_standby: false, + replication_performance_mode: 'disabled', + replication_dr_mode: 'disabled', + server_time_utc: 1622562585, + version: '1.9.0+ent', + cluster_name: 'vault-cluster-e779cd7c', + cluster_id: '5f20f5ab-acea-0481-787e-71ec2ff5a60b', + last_wal: 121, + }; + }); + + server.get('/sys/license/status', function () { + return { + data: { + autoloading_used: false, + persisted_autoload: { + expiration_time: EXPIRY_DATE, + features: ['DR Replication', 'Namespaces', 'Lease Count Quotas', 'Automated Snapshots'], + license_id: '0eca7ef8-ebc0-f875-315e-3cc94a7870cf', + performance_standby_count: 0, + start_time: '2020-04-28T00:00:00Z', + }, + autoloaded: { + expiration_time: EXPIRY_DATE, + features: ['DR Replication', 'Namespaces', 'Lease Count Quotas', 'Automated Snapshots'], + license_id: '0eca7ef8-ebc0-f875-315e-3cc94a7870cf', + performance_standby_count: 0, + start_time: '2020-04-28T00:00:00Z', + }, + }, + }; + }); + + server.get('sys/namespaces', function () { + return { + data: { + keys: [ + 'ns1/', + 'ns2/', + 'ns3/', + 'ns4/', + 'ns5/', + 'ns6/', + 'ns7/', + 'ns8/', + 'ns9/', + 'ns10/', + 'ns11/', + 'ns12/', + 'ns13/', + 'ns14/', + 'ns15/', + 'ns16/', + 'ns17/', + 'ns18/', + ], + }, + }; + }); } diff --git a/command/commands.go b/command/commands.go index b1867e428da3..05d5ad93637a 100644 --- a/command/commands.go +++ b/command/commands.go @@ -1,946 +1,448 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package command +package cacheboltdb import ( + "context" + "encoding/binary" + "fmt" "os" - "os/signal" - "syscall" - - "github.com/hashicorp/vault/audit" - "github.com/hashicorp/vault/builtin/plugin" - "github.com/hashicorp/vault/sdk/logical" - "github.com/hashicorp/vault/sdk/physical" - "github.com/hashicorp/vault/version" - "github.com/mitchellh/cli" - - /* - The builtinplugins package is initialized here because it, in turn, - initializes the database plugins. - They register multiple database drivers for the "database/sql" package. - */ - _ "github.com/hashicorp/vault/helper/builtinplugins" - - auditFile "github.com/hashicorp/vault/builtin/audit/file" - auditSocket "github.com/hashicorp/vault/builtin/audit/socket" - auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" - - credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" - credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" - credCF "github.com/hashicorp/vault-plugin-auth-cf" - credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" - credOIDC "github.com/hashicorp/vault-plugin-auth-jwt" - credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" - credOCI "github.com/hashicorp/vault-plugin-auth-oci" - credAws "github.com/hashicorp/vault/builtin/credential/aws" - credCert "github.com/hashicorp/vault/builtin/credential/cert" - credGitHub "github.com/hashicorp/vault/builtin/credential/github" - credLdap "github.com/hashicorp/vault/builtin/credential/ldap" - credOkta "github.com/hashicorp/vault/builtin/credential/okta" - credToken "github.com/hashicorp/vault/builtin/credential/token" - credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" - - logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" - logicalDb "github.com/hashicorp/vault/builtin/logical/database" - - physAerospike "github.com/hashicorp/vault/physical/aerospike" - physAliCloudOSS "github.com/hashicorp/vault/physical/alicloudoss" - physAzure "github.com/hashicorp/vault/physical/azure" - physCassandra "github.com/hashicorp/vault/physical/cassandra" - physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb" - physConsul "github.com/hashicorp/vault/physical/consul" - physCouchDB "github.com/hashicorp/vault/physical/couchdb" - physDynamoDB "github.com/hashicorp/vault/physical/dynamodb" - physEtcd "github.com/hashicorp/vault/physical/etcd" - physFoundationDB "github.com/hashicorp/vault/physical/foundationdb" - physGCS "github.com/hashicorp/vault/physical/gcs" - physManta "github.com/hashicorp/vault/physical/manta" - physMSSQL "github.com/hashicorp/vault/physical/mssql" - physMySQL "github.com/hashicorp/vault/physical/mysql" - physOCI "github.com/hashicorp/vault/physical/oci" - physPostgreSQL "github.com/hashicorp/vault/physical/postgresql" - physRaft "github.com/hashicorp/vault/physical/raft" - physS3 "github.com/hashicorp/vault/physical/s3" - physSpanner "github.com/hashicorp/vault/physical/spanner" - physSwift "github.com/hashicorp/vault/physical/swift" - physZooKeeper "github.com/hashicorp/vault/physical/zookeeper" - physFile "github.com/hashicorp/vault/sdk/physical/file" - physInmem "github.com/hashicorp/vault/sdk/physical/inmem" - - sr "github.com/hashicorp/vault/serviceregistration" - csr "github.com/hashicorp/vault/serviceregistration/consul" - ksr "github.com/hashicorp/vault/serviceregistration/kubernetes" + "path/filepath" + "time" + + "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" + "github.com/hashicorp/go-hclog" + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + "github.com/hashicorp/go-multierror" ) const ( - // EnvVaultCLINoColor is an env var that toggles colored UI output. - EnvVaultCLINoColor = `VAULT_CLI_NO_COLOR` - // EnvVaultFormat is the output format - EnvVaultFormat = `VAULT_FORMAT` - // EnvVaultLicense is an env var used in Vault Enterprise to provide a license blob - EnvVaultLicense = "VAULT_LICENSE" - // EnvVaultLicensePath is an env var used in Vault Enterprise to provide a - // path to a license file on disk - EnvVaultLicensePath = "VAULT_LICENSE_PATH" - // EnvVaultDetailed is to output detailed information (e.g., ListResponseWithInfo). - EnvVaultDetailed = `VAULT_DETAILED` - // EnvVaultLogFormat is used to specify the log format. Supported values are "standard" and "json" - EnvVaultLogFormat = "VAULT_LOG_FORMAT" - // EnvVaultLogLevel is used to specify the log level applied to logging - // Supported log levels: Trace, Debug, Error, Warn, Info - EnvVaultLogLevel = "VAULT_LOG_LEVEL" - // EnvVaultExperiments defines the experiments to enable for a server as a - // comma separated list. See experiments.ValidExperiments() for the list of - // valid experiments. Not mutable or persisted in storage, only read and - // logged at startup _per node_. This was initially introduced for the events - // system being developed over multiple release cycles. - EnvVaultExperiments = "VAULT_EXPERIMENTS" - - // flagNameAddress is the flag used in the base command to read in the - // address of the Vault server. - flagNameAddress = "address" - // flagnameCACert is the flag used in the base command to read in the CA - // cert. - flagNameCACert = "ca-cert" - // flagnameCAPath is the flag used in the base command to read in the CA - // cert path. - flagNameCAPath = "ca-path" - // flagNameClientCert is the flag used in the base command to read in the - // client key - flagNameClientKey = "client-key" - // flagNameClientCert is the flag used in the base command to read in the - // client cert - flagNameClientCert = "client-cert" - // flagNameTLSSkipVerify is the flag used in the base command to read in - // the option to ignore TLS certificate verification. - flagNameTLSSkipVerify = "tls-skip-verify" - // flagTLSServerName is the flag used in the base command to read in - // the TLS server name. - flagTLSServerName = "tls-server-name" - // flagNameAuditNonHMACRequestKeys is the flag name used for auth/secrets enable - flagNameAuditNonHMACRequestKeys = "audit-non-hmac-request-keys" - // flagNameAuditNonHMACResponseKeys is the flag name used for auth/secrets enable - flagNameAuditNonHMACResponseKeys = "audit-non-hmac-response-keys" - // flagNameDescription is the flag name used for tuning the secret and auth mount description parameter - flagNameDescription = "description" - // flagListingVisibility is the flag to toggle whether to show the mount in the UI-specific listing endpoint - flagNameListingVisibility = "listing-visibility" - // flagNamePassthroughRequestHeaders is the flag name used to set passthrough request headers to the backend - flagNamePassthroughRequestHeaders = "passthrough-request-headers" - // flagNameAllowedResponseHeaders is used to set allowed response headers from a plugin - flagNameAllowedResponseHeaders = "allowed-response-headers" - // flagNameTokenType is the flag name used to force a specific token type - flagNameTokenType = "token-type" - // flagNameAllowedManagedKeys is the flag name used for auth/secrets enable - flagNameAllowedManagedKeys = "allowed-managed-keys" - // flagNamePluginVersion selects what version of a plugin should be used. - flagNamePluginVersion = "plugin-version" - // flagNameUserLockoutThreshold is the flag name used for tuning the auth mount lockout threshold parameter - flagNameUserLockoutThreshold = "user-lockout-threshold" - // flagNameUserLockoutDuration is the flag name used for tuning the auth mount lockout duration parameter - flagNameUserLockoutDuration = "user-lockout-duration" - // flagNameUserLockoutCounterResetDuration is the flag name used for tuning the auth mount lockout counter reset parameter - flagNameUserLockoutCounterResetDuration = "user-lockout-counter-reset-duration" - // flagNameUserLockoutDisable is the flag name used for tuning the auth mount disable lockout parameter - flagNameUserLockoutDisable = "user-lockout-disable" - // flagNameDisableRedirects is used to prevent the client from honoring a single redirect as a response to a request - flagNameDisableRedirects = "disable-redirects" - // flagNameCombineLogs is used to specify whether log output should be combined and sent to stdout - flagNameCombineLogs = "combine-logs" - // flagNameLogFile is used to specify the path to the log file that Vault should use for logging - flagNameLogFile = "log-file" - // flagNameLogRotateBytes is the flag used to specify the number of bytes a log file should be before it is rotated. - flagNameLogRotateBytes = "log-rotate-bytes" - // flagNameLogRotateDuration is the flag used to specify the duration after which a log file should be rotated. - flagNameLogRotateDuration = "log-rotate-duration" - // flagNameLogRotateMaxFiles is the flag used to specify the maximum number of older/archived log files to keep. - flagNameLogRotateMaxFiles = "log-rotate-max-files" - // flagNameLogFormat is the flag used to specify the log format. Supported values are "standard" and "json" - flagNameLogFormat = "log-format" - // flagNameLogLevel is used to specify the log level applied to logging - // Supported log levels: Trace, Debug, Error, Warn, Info - flagNameLogLevel = "log-level" + // Keep track of schema version for future migrations + storageVersionKey = "version" + storageVersion = "2" // v2 merges auth-lease and secret-lease buckets into one ordered bucket + + // DatabaseFileName - filename for the persistent cache file + DatabaseFileName = "vault-agent-cache.db" + + // metaBucketName - naming the meta bucket that holds the version and + // bootstrapping keys + metaBucketName = "meta" + + // DEPRECATED: secretLeaseType - v1 Bucket/type for leases with secret info + secretLeaseType = "secret-lease" + + // DEPRECATED: authLeaseType - v1 Bucket/type for leases with auth info + authLeaseType = "auth-lease" + + // TokenType - Bucket/type for auto-auth tokens + TokenType = "token" + + // StaticSecretType - Bucket/type for static secrets + StaticSecretType = "static-secret" + + // TokenCapabilitiesType - Bucket/type for the token capabilities that + // are used to govern access to static secrets. These will be updated + // periodically to ensure that access to the cached secret remains. + TokenCapabilitiesType = "token-capabilities" + + // LeaseType - v2 Bucket/type for auth AND secret leases. + // + // This bucket stores keys in the same order they were created using + // auto-incrementing keys and the fact that BoltDB stores keys in byte + // slice order. This means when we iterate through this bucket during + // restore, we will always restore parent tokens before their children, + // allowing us to correctly attach child contexts to their parent's context. + LeaseType = "lease" + + // lookupType - v2 Bucket/type to map from a memcachedb index ID to an + // auto-incrementing BoltDB key. Facilitates deletes from the lease + // bucket using an ID instead of the auto-incrementing BoltDB key. + lookupType = "lookup" + + // AutoAuthToken - key for the latest auto-auth token + AutoAuthToken = "auto-auth-token" + + // RetrievalTokenMaterial is the actual key or token in the key bucket + RetrievalTokenMaterial = "retrieval-token-material" ) -var ( - auditBackends = map[string]audit.Factory{ - "file": auditFile.Factory, - "socket": auditSocket.Factory, - "syslog": auditSyslog.Factory, +// BoltStorage is a persistent cache using a bolt db. Items are organized with +// the version and bootstrapping items in the "meta" bucket, and tokens, auth +// leases, and secret leases in their own buckets. +type BoltStorage struct { + db *bolt.DB + logger hclog.Logger + wrapper wrapping.Wrapper + aad string +} + +// BoltStorageConfig is the collection of input parameters for setting up bolt +// storage +type BoltStorageConfig struct { + Path string + Logger hclog.Logger + Wrapper wrapping.Wrapper + AAD string +} + +// NewBoltStorage opens a new bolt db at the specified file path and returns it. +// If the db already exists the buckets will just be created if they don't +// exist. +func NewBoltStorage(config *BoltStorageConfig) (*BoltStorage, error) { + dbPath := filepath.Join(config.Path, DatabaseFileName) + db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + err = db.Update(func(tx *bolt.Tx) error { + return createBoltSchema(tx, storageVersion) + }) + if err != nil { + return nil, err + } + bs := &BoltStorage{ + db: db, + logger: config.Logger, + wrapper: config.Wrapper, + aad: config.AAD, } + return bs, nil +} - credentialBackends = map[string]logical.Factory{ - "plugin": plugin.Factory, +func createBoltSchema(tx *bolt.Tx, createVersion string) error { + switch { + case createVersion == "1": + if err := createV1BoltSchema(tx); err != nil { + return err + } + case createVersion == "2": + if err := createV2BoltSchema(tx); err != nil { + return err + } + default: + return fmt.Errorf("schema version %s not supported", createVersion) } - logicalBackends = map[string]logical.Factory{ - "plugin": plugin.Factory, - "database": logicalDb.Factory, - // This is also available in the plugin catalog, but is here due to the need to - // automatically mount it. - "kv": logicalKv.Factory, + meta, err := tx.CreateBucketIfNotExists([]byte(metaBucketName)) + if err != nil { + return fmt.Errorf("failed to create bucket %s: %w", metaBucketName, err) } - physicalBackends = map[string]physical.Factory{ - "aerospike": physAerospike.NewAerospikeBackend, - "alicloudoss": physAliCloudOSS.NewAliCloudOSSBackend, - "azure": physAzure.NewAzureBackend, - "cassandra": physCassandra.NewCassandraBackend, - "cockroachdb": physCockroachDB.NewCockroachDBBackend, - "consul": physConsul.NewConsulBackend, - "couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend, - "couchdb": physCouchDB.NewCouchDBBackend, - "dynamodb": physDynamoDB.NewDynamoDBBackend, - "etcd": physEtcd.NewEtcdBackend, - "file_transactional": physFile.NewTransactionalFileBackend, - "file": physFile.NewFileBackend, - "foundationdb": physFoundationDB.NewFDBBackend, - "gcs": physGCS.NewBackend, - "inmem_ha": physInmem.NewInmemHA, - "inmem_transactional_ha": physInmem.NewTransactionalInmemHA, - "inmem_transactional": physInmem.NewTransactionalInmem, - "inmem": physInmem.NewInmem, - "manta": physManta.NewMantaBackend, - "mssql": physMSSQL.NewMSSQLBackend, - "mysql": physMySQL.NewMySQLBackend, - "oci": physOCI.NewBackend, - "postgresql": physPostgreSQL.NewPostgreSQLBackend, - "s3": physS3.NewS3Backend, - "spanner": physSpanner.NewBackend, - "swift": physSwift.NewSwiftBackend, - "raft": physRaft.NewRaftBackend, - "zookeeper": physZooKeeper.NewZooKeeperBackend, + // Check and set file version in the meta bucket. + version := meta.Get([]byte(storageVersionKey)) + switch { + case version == nil: + err = meta.Put([]byte(storageVersionKey), []byte(createVersion)) + if err != nil { + return fmt.Errorf("failed to set storage version: %w", err) + } + + return nil + + case string(version) == createVersion: + return nil + + case string(version) == "1" && createVersion == "2": + return migrateFromV1ToV2Schema(tx) + + default: + return fmt.Errorf("storage migration from %s to %s not implemented", string(version), createVersion) + } +} + +func createV1BoltSchema(tx *bolt.Tx) error { + // Create the buckets for tokens and leases. + for _, bucket := range []string{TokenType, authLeaseType, secretLeaseType} { + if _, err := tx.CreateBucketIfNotExists([]byte(bucket)); err != nil { + return fmt.Errorf("failed to create %s bucket: %w", bucket, err) + } } - serviceRegistrations = map[string]sr.Factory{ - "consul": csr.NewServiceRegistration, - "kubernetes": ksr.NewServiceRegistration, + return nil +} + +func createV2BoltSchema(tx *bolt.Tx) error { + // Create the buckets for tokens and leases. + for _, bucket := range []string{TokenType, LeaseType, lookupType, StaticSecretType, TokenCapabilitiesType} { + if _, err := tx.CreateBucketIfNotExists([]byte(bucket)); err != nil { + return fmt.Errorf("failed to create %s bucket: %w", bucket, err) + } } - loginHandlers = map[string]LoginHandler{ - "alicloud": &credAliCloud.CLIHandler{}, - "aws": &credAws.CLIHandler{}, - "centrify": &credCentrify.CLIHandler{}, - "cert": &credCert.CLIHandler{}, - "cf": &credCF.CLIHandler{}, - "gcp": &credGcp.CLIHandler{}, - "github": &credGitHub.CLIHandler{}, - "kerberos": &credKerb.CLIHandler{}, - "ldap": &credLdap.CLIHandler{}, - "oci": &credOCI.CLIHandler{}, - "oidc": &credOIDC.CLIHandler{}, - "okta": &credOkta.CLIHandler{}, - "pcf": &credCF.CLIHandler{}, // Deprecated. - "radius": &credUserpass.CLIHandler{ - DefaultMount: "radius", - }, - "token": &credToken.CLIHandler{}, - "userpass": &credUserpass.CLIHandler{ - DefaultMount: "userpass", - }, + return nil +} + +func migrateFromV1ToV2Schema(tx *bolt.Tx) error { + if err := createV2BoltSchema(tx); err != nil { + return err + } + + for _, v1BucketType := range []string{authLeaseType, secretLeaseType} { + if bucket := tx.Bucket([]byte(v1BucketType)); bucket != nil { + bucket.ForEach(func(key, value []byte) error { + autoIncKey, err := autoIncrementedLeaseKey(tx, string(key)) + if err != nil { + return fmt.Errorf("error migrating %s %q key to auto incremented key: %w", v1BucketType, string(key), err) + } + if err := tx.Bucket([]byte(LeaseType)).Put(autoIncKey, value); err != nil { + return fmt.Errorf("error migrating %s %q from v1 to v2 schema: %w", v1BucketType, string(key), err) + } + return nil + }) + + if err := tx.DeleteBucket([]byte(v1BucketType)); err != nil { + return fmt.Errorf("failed to clean up %s bucket during v1 to v2 schema migration: %w", v1BucketType, err) + } + } + } + + meta, err := tx.CreateBucketIfNotExists([]byte(metaBucketName)) + if err != nil { + return fmt.Errorf("failed to create meta bucket: %w", err) + } + if err := meta.Put([]byte(storageVersionKey), []byte(storageVersion)); err != nil { + return fmt.Errorf("failed to update schema from v1 to v2: %w", err) + } + + return nil +} + +func autoIncrementedLeaseKey(tx *bolt.Tx, id string) ([]byte, error) { + leaseBucket := tx.Bucket([]byte(LeaseType)) + keyValue, err := leaseBucket.NextSequence() + if err != nil { + return nil, fmt.Errorf("failed to generate lookup key for id %q: %w", id, err) } -) -func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.CommandFactory { - getBaseCommand := func() *BaseCommand { - return &BaseCommand{ - UI: ui, - tokenHelper: runOpts.TokenHelper, - flagAddress: runOpts.Address, - client: runOpts.Client, + key := make([]byte, 8) + // MUST be big endian, because keys are ordered by byte slice comparison + // which progressively compares each byte in the slice starting at index 0. + // BigEndian in the range [255-257] looks like this: + // [0 0 0 0 0 0 0 255] + // [0 0 0 0 0 0 1 0] + // [0 0 0 0 0 0 1 1] + // LittleEndian in the same range looks like this: + // [255 0 0 0 0 0 0 0] + // [0 1 0 0 0 0 0 0] + // [1 1 0 0 0 0 0 0] + binary.BigEndian.PutUint64(key, keyValue) + + err = tx.Bucket([]byte(lookupType)).Put([]byte(id), key) + if err != nil { + return nil, err + } + + return key, nil +} + +// Set an index (token or lease) in bolt storage +func (b *BoltStorage) Set(ctx context.Context, id string, plaintext []byte, indexType string) error { + blob, err := b.wrapper.Encrypt(ctx, plaintext, wrapping.WithAad([]byte(b.aad))) + if err != nil { + return fmt.Errorf("error encrypting %s index: %w", indexType, err) + } + + protoBlob, err := proto.Marshal(blob) + if err != nil { + return err + } + + return b.db.Update(func(tx *bolt.Tx) error { + var key []byte + switch indexType { + case LeaseType: + // If this is a lease type, generate an auto-incrementing key and + // store an ID -> key lookup entry + key, err = autoIncrementedLeaseKey(tx, id) + if err != nil { + return err + } + case TokenType: + // If this is an auto-auth token, also stash it in the meta bucket for + // easy retrieval upon restore + key = []byte(id) + meta := tx.Bucket([]byte(metaBucketName)) + if err := meta.Put([]byte(AutoAuthToken), protoBlob); err != nil { + return fmt.Errorf("failed to set latest auto-auth token: %w", err) + } + case StaticSecretType: + key = []byte(id) + case TokenCapabilitiesType: + key = []byte(id) + default: + return fmt.Errorf("called Set for unsupported type %q", indexType) + } + s := tx.Bucket([]byte(indexType)) + if s == nil { + return fmt.Errorf("bucket %q not found", indexType) + } + return s.Put(key, protoBlob) + }) +} + +// Delete an index (token or lease) by key from bolt storage +func (b *BoltStorage) Delete(id string, indexType string) error { + return b.db.Update(func(tx *bolt.Tx) error { + key := []byte(id) + if indexType == LeaseType { + key = tx.Bucket([]byte(lookupType)).Get(key) + if key == nil { + return fmt.Errorf("failed to lookup bolt DB key for id %q", id) + } + + err := tx.Bucket([]byte(lookupType)).Delete([]byte(id)) + if err != nil { + return fmt.Errorf("failed to delete %q from lookup bucket: %w", id, err) + } + } + + bucket := tx.Bucket([]byte(indexType)) + if bucket == nil { + return fmt.Errorf("bucket %q not found during delete", indexType) + } + if err := bucket.Delete(key); err != nil { + return fmt.Errorf("failed to delete %q from %q bucket: %w", id, indexType, err) } + b.logger.Trace("deleted index from bolt db", "id", id) + return nil + }) +} + +func (b *BoltStorage) decrypt(ctx context.Context, ciphertext []byte) ([]byte, error) { + var blob wrapping.BlobInfo + if err := proto.Unmarshal(ciphertext, &blob); err != nil { + return nil, err } - commands := map[string]cli.CommandFactory{ - "agent": func() (cli.Command, error) { - return &AgentCommand{ - BaseCommand: &BaseCommand{ - UI: serverCmdUi, - }, - ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - }, nil - }, - "agent generate-config": func() (cli.Command, error) { - return &AgentGenerateConfigCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "audit": func() (cli.Command, error) { - return &AuditCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "audit disable": func() (cli.Command, error) { - return &AuditDisableCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "audit enable": func() (cli.Command, error) { - return &AuditEnableCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "audit list": func() (cli.Command, error) { - return &AuditListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "auth tune": func() (cli.Command, error) { - return &AuthTuneCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "auth": func() (cli.Command, error) { - return &AuthCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "auth disable": func() (cli.Command, error) { - return &AuthDisableCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "auth enable": func() (cli.Command, error) { - return &AuthEnableCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "auth help": func() (cli.Command, error) { - return &AuthHelpCommand{ - BaseCommand: getBaseCommand(), - Handlers: loginHandlers, - }, nil - }, - "auth list": func() (cli.Command, error) { - return &AuthListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "auth move": func() (cli.Command, error) { - return &AuthMoveCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "debug": func() (cli.Command, error) { - return &DebugCommand{ - BaseCommand: getBaseCommand(), - ShutdownCh: MakeShutdownCh(), - }, nil - }, - "delete": func() (cli.Command, error) { - return &DeleteCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "events subscribe": func() (cli.Command, error) { - return &EventsSubscribeCommands{ - BaseCommand: getBaseCommand(), - }, nil - }, - "lease": func() (cli.Command, error) { - return &LeaseCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "lease renew": func() (cli.Command, error) { - return &LeaseRenewCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "lease lookup": func() (cli.Command, error) { - return &LeaseLookupCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "lease revoke": func() (cli.Command, error) { - return &LeaseRevokeCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "list": func() (cli.Command, error) { - return &ListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "login": func() (cli.Command, error) { - return &LoginCommand{ - BaseCommand: getBaseCommand(), - Handlers: loginHandlers, - }, nil - }, - "namespace": func() (cli.Command, error) { - return &NamespaceCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "namespace list": func() (cli.Command, error) { - return &NamespaceListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "namespace lookup": func() (cli.Command, error) { - return &NamespaceLookupCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "namespace create": func() (cli.Command, error) { - return &NamespaceCreateCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "namespace patch": func() (cli.Command, error) { - return &NamespacePatchCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "namespace delete": func() (cli.Command, error) { - return &NamespaceDeleteCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "namespace lock": func() (cli.Command, error) { - return &NamespaceAPILockCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "namespace unlock": func() (cli.Command, error) { - return &NamespaceAPIUnlockCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator": func() (cli.Command, error) { - return &OperatorCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator diagnose": func() (cli.Command, error) { - return &OperatorDiagnoseCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator generate-root": func() (cli.Command, error) { - return &OperatorGenerateRootCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator init": func() (cli.Command, error) { - return &OperatorInitCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator key-status": func() (cli.Command, error) { - return &OperatorKeyStatusCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator migrate": func() (cli.Command, error) { - return &OperatorMigrateCommand{ - BaseCommand: getBaseCommand(), - PhysicalBackends: physicalBackends, - ShutdownCh: MakeShutdownCh(), - }, nil - }, - "operator raft": func() (cli.Command, error) { - return &OperatorRaftCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft autopilot get-config": func() (cli.Command, error) { - return &OperatorRaftAutopilotGetConfigCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft autopilot set-config": func() (cli.Command, error) { - return &OperatorRaftAutopilotSetConfigCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft autopilot state": func() (cli.Command, error) { - return &OperatorRaftAutopilotStateCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft list-peers": func() (cli.Command, error) { - return &OperatorRaftListPeersCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft join": func() (cli.Command, error) { - return &OperatorRaftJoinCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft remove-peer": func() (cli.Command, error) { - return &OperatorRaftRemovePeerCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft snapshot": func() (cli.Command, error) { - return &OperatorRaftSnapshotCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft snapshot inspect": func() (cli.Command, error) { - return &OperatorRaftSnapshotInspectCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft snapshot restore": func() (cli.Command, error) { - return &OperatorRaftSnapshotRestoreCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator raft snapshot save": func() (cli.Command, error) { - return &OperatorRaftSnapshotSaveCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator rekey": func() (cli.Command, error) { - return &OperatorRekeyCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator rotate": func() (cli.Command, error) { - return &OperatorRotateCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator seal": func() (cli.Command, error) { - return &OperatorSealCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator step-down": func() (cli.Command, error) { - return &OperatorStepDownCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator usage": func() (cli.Command, error) { - return &OperatorUsageCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator unseal": func() (cli.Command, error) { - return &OperatorUnsealCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "operator members": func() (cli.Command, error) { - return &OperatorMembersCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "patch": func() (cli.Command, error) { - return &PatchCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "path-help": func() (cli.Command, error) { - return &PathHelpCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki": func() (cli.Command, error) { - return &PKICommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki health-check": func() (cli.Command, error) { - return &PKIHealthCheckCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki issue": func() (cli.Command, error) { - return &PKIIssueCACommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki list-intermediates": func() (cli.Command, error) { - return &PKIListIntermediateCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki reissue": func() (cli.Command, error) { - return &PKIReIssueCACommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "pki verify-sign": func() (cli.Command, error) { - return &PKIVerifySignCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin": func() (cli.Command, error) { - return &PluginCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin deregister": func() (cli.Command, error) { - return &PluginDeregisterCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin info": func() (cli.Command, error) { - return &PluginInfoCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin list": func() (cli.Command, error) { - return &PluginListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin register": func() (cli.Command, error) { - return &PluginRegisterCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin reload": func() (cli.Command, error) { - return &PluginReloadCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin reload-status": func() (cli.Command, error) { - return &PluginReloadStatusCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin runtime": func() (cli.Command, error) { - return &PluginRuntimeCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin runtime register": func() (cli.Command, error) { - return &PluginRuntimeRegisterCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin runtime deregister": func() (cli.Command, error) { - return &PluginRuntimeDeregisterCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin runtime info": func() (cli.Command, error) { - return &PluginRuntimeInfoCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "plugin runtime list": func() (cli.Command, error) { - return &PluginRuntimeListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "proxy": func() (cli.Command, error) { - return &ProxyCommand{ - BaseCommand: &BaseCommand{ - UI: serverCmdUi, - }, - ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - }, nil - }, - "policy": func() (cli.Command, error) { - return &PolicyCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "policy delete": func() (cli.Command, error) { - return &PolicyDeleteCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "policy fmt": func() (cli.Command, error) { - return &PolicyFmtCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "policy list": func() (cli.Command, error) { - return &PolicyListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "policy read": func() (cli.Command, error) { - return &PolicyReadCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "policy write": func() (cli.Command, error) { - return &PolicyWriteCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "print": func() (cli.Command, error) { - return &PrintCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "print token": func() (cli.Command, error) { - return &PrintTokenCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "read": func() (cli.Command, error) { - return &ReadCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "secrets": func() (cli.Command, error) { - return &SecretsCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "secrets disable": func() (cli.Command, error) { - return &SecretsDisableCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "secrets enable": func() (cli.Command, error) { - return &SecretsEnableCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "secrets list": func() (cli.Command, error) { - return &SecretsListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "secrets move": func() (cli.Command, error) { - return &SecretsMoveCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "secrets tune": func() (cli.Command, error) { - return &SecretsTuneCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "server": func() (cli.Command, error) { - return &ServerCommand{ - BaseCommand: &BaseCommand{ - UI: serverCmdUi, - tokenHelper: runOpts.TokenHelper, - flagAddress: runOpts.Address, - }, - AuditBackends: auditBackends, - CredentialBackends: credentialBackends, - LogicalBackends: logicalBackends, - PhysicalBackends: physicalBackends, - - ServiceRegistrations: serviceRegistrations, - - ShutdownCh: MakeShutdownCh(), - SighupCh: MakeSighupCh(), - SigUSR2Ch: MakeSigUSR2Ch(), - }, nil - }, - "ssh": func() (cli.Command, error) { - return &SSHCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "status": func() (cli.Command, error) { - return &StatusCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transform": func() (cli.Command, error) { - return &TransformCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transform import": func() (cli.Command, error) { - return &TransformImportCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transform import-version": func() (cli.Command, error) { - return &TransformImportVersionCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transit": func() (cli.Command, error) { - return &TransitCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transit import": func() (cli.Command, error) { - return &TransitImportCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "transit import-version": func() (cli.Command, error) { - return &TransitImportVersionCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "token": func() (cli.Command, error) { - return &TokenCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "token create": func() (cli.Command, error) { - return &TokenCreateCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "token capabilities": func() (cli.Command, error) { - return &TokenCapabilitiesCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "token lookup": func() (cli.Command, error) { - return &TokenLookupCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "token renew": func() (cli.Command, error) { - return &TokenRenewCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "token revoke": func() (cli.Command, error) { - return &TokenRevokeCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "unwrap": func() (cli.Command, error) { - return &UnwrapCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "version": func() (cli.Command, error) { - return &VersionCommand{ - VersionInfo: version.GetVersion(), - BaseCommand: getBaseCommand(), - }, nil - }, - "version-history": func() (cli.Command, error) { - return &VersionHistoryCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "write": func() (cli.Command, error) { - return &WriteCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv": func() (cli.Command, error) { - return &KVCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv put": func() (cli.Command, error) { - return &KVPutCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv patch": func() (cli.Command, error) { - return &KVPatchCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv rollback": func() (cli.Command, error) { - return &KVRollbackCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv get": func() (cli.Command, error) { - return &KVGetCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv delete": func() (cli.Command, error) { - return &KVDeleteCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv list": func() (cli.Command, error) { - return &KVListCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv destroy": func() (cli.Command, error) { - return &KVDestroyCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv undelete": func() (cli.Command, error) { - return &KVUndeleteCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv enable-versioning": func() (cli.Command, error) { - return &KVEnableVersioningCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv metadata": func() (cli.Command, error) { - return &KVMetadataCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv metadata put": func() (cli.Command, error) { - return &KVMetadataPutCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv metadata patch": func() (cli.Command, error) { - return &KVMetadataPatchCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv metadata get": func() (cli.Command, error) { - return &KVMetadataGetCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "kv metadata delete": func() (cli.Command, error) { - return &KVMetadataDeleteCommand{ - BaseCommand: getBaseCommand(), - }, nil - }, - "monitor": func() (cli.Command, error) { - return &MonitorCommand{ - BaseCommand: getBaseCommand(), - ShutdownCh: MakeShutdownCh(), - }, nil - }, + return b.wrapper.Decrypt(ctx, &blob, wrapping.WithAad([]byte(b.aad))) +} + +// GetByType returns a list of stored items of the specified type +func (b *BoltStorage) GetByType(ctx context.Context, indexType string) ([][]byte, error) { + var returnBytes [][]byte + + err := b.db.View(func(tx *bolt.Tx) error { + var errors *multierror.Error + + bucket := tx.Bucket([]byte(indexType)) + if bucket == nil { + return fmt.Errorf("bucket %q not found", indexType) + } + bucket.ForEach(func(key, ciphertext []byte) error { + plaintext, err := b.decrypt(ctx, ciphertext) + if err != nil { + errors = multierror.Append(errors, fmt.Errorf("error decrypting entry %s: %w", key, err)) + return nil + } + + returnBytes = append(returnBytes, plaintext) + return nil + }) + return errors.ErrorOrNil() + }) + + return returnBytes, err +} + +// GetAutoAuthToken retrieves the latest auto-auth token, and returns nil if non +// exists yet +func (b *BoltStorage) GetAutoAuthToken(ctx context.Context) ([]byte, error) { + var encryptedToken []byte + + err := b.db.View(func(tx *bolt.Tx) error { + meta := tx.Bucket([]byte(metaBucketName)) + if meta == nil { + return fmt.Errorf("bucket %q not found", metaBucketName) + } + value := meta.Get([]byte(AutoAuthToken)) + if value != nil { + encryptedToken = make([]byte, len(value)) + copy(encryptedToken, value) + } + return nil + }) + if err != nil { + return nil, err + } + + if encryptedToken == nil { + return nil, nil } - entInitCommands(ui, serverCmdUi, runOpts, commands) - return commands + plaintext, err := b.decrypt(ctx, encryptedToken) + if err != nil { + return nil, fmt.Errorf("failed to decrypt auto-auth token: %w", err) + } + return plaintext, nil } -// MakeShutdownCh returns a channel that can be used for shutdown -// notifications for commands. This channel will send a message for every -// SIGINT or SIGTERM received. -func MakeShutdownCh() chan struct{} { - resultCh := make(chan struct{}) - - shutdownCh := make(chan os.Signal, 4) - signal.Notify(shutdownCh, os.Interrupt, syscall.SIGTERM) - go func() { - <-shutdownCh - close(resultCh) - }() - return resultCh +// GetRetrievalToken retrieves a plaintext token from the KeyBucket, which will +// be used by the key manager to retrieve the encryption key, nil if none set +func (b *BoltStorage) GetRetrievalToken() ([]byte, error) { + var token []byte + + err := b.db.View(func(tx *bolt.Tx) error { + metaBucket := tx.Bucket([]byte(metaBucketName)) + if metaBucket == nil { + return fmt.Errorf("bucket %q not found", metaBucketName) + } + value := metaBucket.Get([]byte(RetrievalTokenMaterial)) + if value != nil { + token = make([]byte, len(value)) + copy(token, value) + } + return nil + }) + if err != nil { + return nil, err + } + + return token, err } -// MakeSighupCh returns a channel that can be used for SIGHUP -// reloading. This channel will send a message for every -// SIGHUP received. -func MakeSighupCh() chan struct{} { - resultCh := make(chan struct{}) - - signalCh := make(chan os.Signal, 4) - signal.Notify(signalCh, syscall.SIGHUP) - go func() { - for { - <-signalCh - resultCh <- struct{}{} +// StoreRetrievalToken sets plaintext token material in the RetrievalTokenBucket +func (b *BoltStorage) StoreRetrievalToken(token []byte) error { + return b.db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket([]byte(metaBucketName)) + if bucket == nil { + return fmt.Errorf("bucket %q not found", metaBucketName) } - }() - return resultCh + return bucket.Put([]byte(RetrievalTokenMaterial), token) + }) +} + +// Close the boltdb +func (b *BoltStorage) Close() error { + b.logger.Trace("closing bolt db", "path", b.db.Path()) + return b.db.Close() +} + +// Clear the boltdb by deleting all the token and lease buckets and recreating +// the schema/layout +func (b *BoltStorage) Clear() error { + return b.db.Update(func(tx *bolt.Tx) error { + for _, name := range []string{TokenType, LeaseType, lookupType, StaticSecretType, TokenCapabilitiesType} { + b.logger.Trace("deleting bolt bucket", "name", name) + if err := tx.DeleteBucket([]byte(name)); err != nil { + return err + } + } + return createBoltSchema(tx, storageVersion) + }) +} + +// DBFileExists checks whether the vault agent cache file at `filePath` exists +func DBFileExists(path string) (bool, error) { + checkFile, err := os.OpenFile(filepath.Join(path, DatabaseFileName), os.O_RDWR, 0o600) + defer checkFile.Close() + switch { + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, fmt.Errorf("failed to check if bolt file exists at path %s: %w", path, err) + } } diff --git a/command/debug.go b/command/debug.go index 941249657a94..06a31780b5ad 100644 --- a/command/debug.go +++ b/command/debug.go @@ -1,1111 +1,400 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package command +package cacheboltdb import ( "context" - "encoding/json" "fmt" - "io/ioutil" - "net/url" "os" + "path" "path/filepath" - "runtime" - "strconv" "strings" - "sync" + "testing" "time" + "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-secure-stdlib/gatedwriter" - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/helper/osutil" - "github.com/hashicorp/vault/sdk/helper/jsonutil" - "github.com/hashicorp/vault/sdk/helper/logging" - "github.com/hashicorp/vault/version" - "github.com/mholt/archiver/v3" - "github.com/mitchellh/cli" - "github.com/oklog/run" - "github.com/posener/complete" + "github.com/hashicorp/vault/command/agentproxyshared/cache/keymanager" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -const ( - // debugIndexVersion tracks the canonical version in the index file - // for compatibility with future format/layout changes on the bundle. - debugIndexVersion = 1 +func getTestKeyManager(t *testing.T) keymanager.KeyManager { + t.Helper() - // debugMinInterval is the minimum acceptable interval capture value. This - // value applies to duration and all interval-related flags. - debugMinInterval = 5 * time.Second + km, err := keymanager.NewPassthroughKeyManager(context.Background(), nil) + require.NoError(t, err) - // debugDurationGrace is the grace period added to duration to allow for - // "last frame" capture if the interval falls into the last duration time - // value. For instance, using default values, adding a grace duration lets - // the command capture 5 intervals (0, 30, 60, 90, and 120th second) before - // exiting. - debugDurationGrace = 1 * time.Second - - // debugCompressionExt is the default compression extension used if - // compression is enabled. - debugCompressionExt = ".tar.gz" - - // fileFriendlyTimeFormat is the time format used for file and directory - // naming. - fileFriendlyTimeFormat = "2006-01-02T15-04-05Z" -) - -// debugIndex represents the data structure in the index file -type debugIndex struct { - Version int `json:"version"` - VaultAddress string `json:"vault_address"` - ClientVersion string `json:"client_version"` - ServerVersion string `json:"server_version"` - Timestamp time.Time `json:"timestamp"` - DurationSeconds int `json:"duration_seconds"` - IntervalSeconds int `json:"interval_seconds"` - MetricsIntervalSeconds int `json:"metrics_interval_seconds"` - Compress bool `json:"compress"` - RawArgs []string `json:"raw_args"` - Targets []string `json:"targets"` - Output map[string]interface{} `json:"output"` - Errors []*captureError `json:"errors"` -} - -// captureError holds an error entry that can occur during polling capture. -// It includes the timestamp, the target, and the error itself. -type captureError struct { - TargetError string `json:"error"` - Target string `json:"target"` - Timestamp time.Time `json:"timestamp"` + return km } -var ( - _ cli.Command = (*DebugCommand)(nil) - _ cli.CommandAutocomplete = (*DebugCommand)(nil) -) - -type DebugCommand struct { - *BaseCommand - - flagCompress bool - flagDuration time.Duration - flagInterval time.Duration - flagMetricsInterval time.Duration - flagOutput string - flagTargets []string - - // logFormat defines the output format for Monitor - logFormat string +func TestBolt_SetGet(t *testing.T) { + ctx := context.Background() - // debugIndex is used to keep track of the index state, which gets written - // to a file at the end. - debugIndex *debugIndex + path, err := os.MkdirTemp("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) - // skipTimingChecks bypasses timing-related checks, used primarily for tests - skipTimingChecks bool - // logger is the logger used for outputting capture progress - logger hclog.Logger - - // ShutdownCh is used to capture interrupt signal and end polling capture - ShutdownCh chan struct{} - - // Collection slices to hold data - hostInfoCollection []map[string]interface{} - metricsCollection []map[string]interface{} - replicationStatusCollection []map[string]interface{} - serverStatusCollection []map[string]interface{} - inFlightReqStatusCollection []map[string]interface{} - - // cachedClient holds the client retrieved during preflight - cachedClient *api.Client - - // errLock is used to lock error capture into the index file - errLock sync.Mutex -} - -func (c *DebugCommand) AutocompleteArgs() complete.Predictor { - // Predict targets - return c.PredictVaultDebugTargets() -} - -func (c *DebugCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *DebugCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) - - f := set.NewFlagSet("Command Options") - - f.BoolVar(&BoolVar{ - Name: "compress", - Target: &c.flagCompress, - Default: true, - Usage: "Toggles whether to compress output package", - }) - - f.DurationVar(&DurationVar{ - Name: "duration", - Target: &c.flagDuration, - Completion: complete.PredictAnything, - Default: 2 * time.Minute, - Usage: "Duration to run the command.", - }) - - f.DurationVar(&DurationVar{ - Name: "interval", - Target: &c.flagInterval, - Completion: complete.PredictAnything, - Default: 30 * time.Second, - Usage: "The polling interval at which to collect profiling data and server state.", - }) - - f.DurationVar(&DurationVar{ - Name: "metrics-interval", - Target: &c.flagMetricsInterval, - Completion: complete.PredictAnything, - Default: 10 * time.Second, - Usage: "The polling interval at which to collect metrics data.", + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), }) - - f.StringVar(&StringVar{ - Name: "output", - Target: &c.flagOutput, - Completion: complete.PredictAnything, - Usage: "Specifies the output path for the debug package.", - }) - - f.StringSliceVar(&StringSliceVar{ - Name: "target", - Target: &c.flagTargets, - Usage: "Target to capture, defaulting to all if none specified. " + - "This can be specified multiple times to capture multiple targets. " + - "Available targets are: config, host, metrics, pprof, " + - "replication-status, server-status, log.", - }) - - f.StringVar(&StringVar{ - Name: "log-format", - Target: &c.logFormat, - Default: "standard", - Usage: "Log format to be captured if \"log\" target specified. " + - "Supported values are \"standard\" and \"json\". The default is \"standard\".", - }) - - return set -} - -func (c *DebugCommand) Help() string { - helpText := ` -Usage: vault debug [options] - - Probes a specific Vault server node for a specified period of time, recording - information about the node, its cluster, and its host environment. The - information collected is packaged and written to the specified path. - - Certain endpoints that this command uses require ACL permissions to access. - If not permitted, the information from these endpoints will not be part of the - output. The command uses the Vault address and token as specified via - the login command, environment variables, or CLI flags. - - To create a debug package using default duration and interval values in the - current directory that captures all applicable targets: - - $ vault debug - - To create a debug package with a specific duration and interval in the current - directory that capture all applicable targets: - - $ vault debug -duration=10m -interval=1m - - To create a debug package in the current directory with a specific sub-set of - targets: - - $ vault debug -target=host -target=metrics - -` + c.Flags().Help() - - return helpText + require.NoError(t, err) + + secrets, err := b.GetByType(ctx, LeaseType) + assert.NoError(t, err) + require.Len(t, secrets, 0) + + err = b.Set(ctx, "test1", []byte("hello"), LeaseType) + assert.NoError(t, err) + secrets, err = b.GetByType(ctx, LeaseType) + assert.NoError(t, err) + require.Len(t, secrets, 1) + assert.Equal(t, []byte("hello"), secrets[0]) } -func (c *DebugCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - parsedArgs := f.Args() - if len(parsedArgs) > 0 { - c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(parsedArgs))) - return 1 - } - - // Initialize the logger for debug output - gatedWriter := gatedwriter.NewWriter(os.Stderr) - if c.logger == nil { - c.logger = logging.NewVaultLoggerWithWriter(gatedWriter, hclog.Trace) - } - - dstOutputFile, err := c.preflight(args) - if err != nil { - c.UI.Error(fmt.Sprintf("Error during validation: %s", err)) - return 1 - } - - // Print debug information - c.UI.Output("==> Starting debug capture...") - c.UI.Info(fmt.Sprintf(" Vault Address: %s", c.debugIndex.VaultAddress)) - c.UI.Info(fmt.Sprintf(" Client Version: %s", c.debugIndex.ClientVersion)) - c.UI.Info(fmt.Sprintf(" Server Version: %s", c.debugIndex.ServerVersion)) - c.UI.Info(fmt.Sprintf(" Duration: %s", c.flagDuration)) - c.UI.Info(fmt.Sprintf(" Interval: %s", c.flagInterval)) - c.UI.Info(fmt.Sprintf(" Metrics Interval: %s", c.flagMetricsInterval)) - c.UI.Info(fmt.Sprintf(" Targets: %s", strings.Join(c.flagTargets, ", "))) - c.UI.Info(fmt.Sprintf(" Output: %s", dstOutputFile)) - c.UI.Output("") +func TestBoltDelete(t *testing.T) { + ctx := context.Background() - // Release the log gate. - c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{ - Output: os.Stderr, - }, gatedWriter) + path, err := os.MkdirTemp("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) - // Capture static information - c.UI.Info("==> Capturing static information...") - if err := c.captureStaticTargets(); err != nil { - c.UI.Error(fmt.Sprintf("Error capturing static information: %s", err)) - return 2 - } - - c.UI.Output("") - - // Capture polling information - c.UI.Info("==> Capturing dynamic information...") - if err := c.capturePollingTargets(); err != nil { - c.UI.Error(fmt.Sprintf("Error capturing dynamic information: %s", err)) - return 2 - } - - c.UI.Output("Finished capturing information, bundling files...") - - // Generate index file - if err := c.generateIndex(); err != nil { - c.UI.Error(fmt.Sprintf("Error generating index: %s", err)) - return 1 - } - - if c.flagCompress { - if err := c.compress(dstOutputFile); err != nil { - c.UI.Error(fmt.Sprintf("Error encountered during bundle compression: %s", err)) - // We want to inform that data collection was captured and stored in - // a directory even if compression fails - c.UI.Info(fmt.Sprintf("Data written to: %s", c.flagOutput)) - return 1 - } - } - - c.UI.Info(fmt.Sprintf("Success! Bundle written to: %s", dstOutputFile)) - return 0 + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), + }) + require.NoError(t, err) + + err = b.Set(ctx, "secret-test1", []byte("hello1"), LeaseType) + require.NoError(t, err) + err = b.Set(ctx, "secret-test2", []byte("hello2"), LeaseType) + require.NoError(t, err) + + secrets, err := b.GetByType(ctx, LeaseType) + require.NoError(t, err) + assert.Len(t, secrets, 2) + assert.ElementsMatch(t, [][]byte{[]byte("hello1"), []byte("hello2")}, secrets) + + err = b.Delete("secret-test1", LeaseType) + require.NoError(t, err) + secrets, err = b.GetByType(ctx, LeaseType) + require.NoError(t, err) + require.Len(t, secrets, 1) + assert.Equal(t, []byte("hello2"), secrets[0]) } -func (c *DebugCommand) Synopsis() string { - return "Runs the debug command" -} +func TestBoltClear(t *testing.T) { + ctx := context.Background() -func (c *DebugCommand) generateIndex() error { - outputLayout := map[string]interface{}{ - "files": []string{}, - } - // Walk the directory to generate the output layout - err := filepath.Walk(c.flagOutput, func(path string, info os.FileInfo, err error) error { - // Prevent panic by handling failure accessing a path - if err != nil { - return err - } + path, err := os.MkdirTemp("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) - // Skip the base dir - if path == c.flagOutput { - return nil - } - - // If we're a directory, simply add a corresponding map - if info.IsDir() { - parsedTime, err := time.Parse(fileFriendlyTimeFormat, info.Name()) - if err != nil { - return err - } - - outputLayout[info.Name()] = map[string]interface{}{ - "timestamp": parsedTime, - "files": []string{}, - } - return nil - } - - relPath, err := filepath.Rel(c.flagOutput, path) - if err != nil { - return err - } - - dir, file := filepath.Split(relPath) - if len(dir) != 0 { - dir = filepath.Clean(dir) - filesArr := outputLayout[dir].(map[string]interface{})["files"] - outputLayout[dir].(map[string]interface{})["files"] = append(filesArr.([]string), file) - } else { - outputLayout["files"] = append(outputLayout["files"].([]string), file) - } - - return nil + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), }) - if err != nil { - return fmt.Errorf("error generating directory output layout: %s", err) - } - - c.debugIndex.Output = outputLayout - - // Marshal into json - bytes, err := json.MarshalIndent(c.debugIndex, "", " ") - if err != nil { - return fmt.Errorf("error marshaling index file: %s", err) - } - - // Write out file - if err := ioutil.WriteFile(filepath.Join(c.flagOutput, "index.json"), bytes, 0o600); err != nil { - return fmt.Errorf("error generating index file; %s", err) - } - - return nil + require.NoError(t, err) + + // Populate the bolt db + err = b.Set(ctx, "secret-test1", []byte("hello1"), LeaseType) + require.NoError(t, err) + secrets, err := b.GetByType(ctx, LeaseType) + require.NoError(t, err) + require.Len(t, secrets, 1) + assert.Equal(t, []byte("hello1"), secrets[0]) + + err = b.Set(ctx, "auth-test1", []byte("hello2"), LeaseType) + require.NoError(t, err) + auths, err := b.GetByType(ctx, LeaseType) + require.NoError(t, err) + require.Len(t, auths, 2) + assert.Equal(t, []byte("hello1"), auths[0]) + assert.Equal(t, []byte("hello2"), auths[1]) + + err = b.Set(ctx, "token-test1", []byte("hello"), TokenType) + require.NoError(t, err) + tokens, err := b.GetByType(ctx, TokenType) + require.NoError(t, err) + require.Len(t, tokens, 1) + assert.Equal(t, []byte("hello"), tokens[0]) + + err = b.Set(ctx, "static-secret", []byte("hello"), StaticSecretType) + require.NoError(t, err) + staticSecrets, err := b.GetByType(ctx, StaticSecretType) + require.NoError(t, err) + require.Len(t, staticSecrets, 1) + assert.Equal(t, []byte("hello"), staticSecrets[0]) + + err = b.Set(ctx, "capabilities-index", []byte("hello"), TokenCapabilitiesType) + require.NoError(t, err) + capabilities, err := b.GetByType(ctx, TokenCapabilitiesType) + require.NoError(t, err) + require.Len(t, capabilities, 1) + assert.Equal(t, []byte("hello"), capabilities[0]) + + // Clear the bolt db, and check that it's indeed clear + err = b.Clear() + require.NoError(t, err) + auths, err = b.GetByType(ctx, LeaseType) + require.NoError(t, err) + assert.Len(t, auths, 0) + tokens, err = b.GetByType(ctx, TokenType) + require.NoError(t, err) + assert.Len(t, tokens, 0) + staticSecrets, err = b.GetByType(ctx, StaticSecretType) + require.NoError(t, err) + require.Len(t, staticSecrets, 0) + capabilities, err = b.GetByType(ctx, TokenCapabilitiesType) + require.NoError(t, err) + require.Len(t, capabilities, 0) } -// preflight performs various checks against the provided flags to ensure they -// are valid/reasonable values. It also takes care of instantiating a client and -// index object for use by the command. -func (c *DebugCommand) preflight(rawArgs []string) (string, error) { - if !c.skipTimingChecks { - // Guard duration and interval values to acceptable values - if c.flagDuration < debugMinInterval { - c.UI.Info(fmt.Sprintf("Overwriting duration value %q to the minimum value of %q", c.flagDuration, debugMinInterval)) - c.flagDuration = debugMinInterval - } - if c.flagInterval < debugMinInterval { - c.UI.Info(fmt.Sprintf("Overwriting interval value %q to the minimum value of %q", c.flagInterval, debugMinInterval)) - c.flagInterval = debugMinInterval - } - if c.flagMetricsInterval < debugMinInterval { - c.UI.Info(fmt.Sprintf("Overwriting metrics interval value %q to the minimum value of %q", c.flagMetricsInterval, debugMinInterval)) - c.flagMetricsInterval = debugMinInterval - } - } - - // These timing checks are always applicable since interval shouldn't be - // greater than the duration - if c.flagInterval > c.flagDuration { - c.UI.Info(fmt.Sprintf("Overwriting interval value %q to the duration value %q", c.flagInterval, c.flagDuration)) - c.flagInterval = c.flagDuration - } - if c.flagMetricsInterval > c.flagDuration { - c.UI.Info(fmt.Sprintf("Overwriting metrics interval value %q to the duration value %q", c.flagMetricsInterval, c.flagDuration)) - c.flagMetricsInterval = c.flagDuration - } - - if len(c.flagTargets) == 0 { - c.flagTargets = c.defaultTargets() - } else { - // Check for any invalid targets and ignore them if found - invalidTargets := strutil.Difference(c.flagTargets, c.defaultTargets(), true) - if len(invalidTargets) != 0 { - c.UI.Info(fmt.Sprintf("Ignoring invalid targets: %s", strings.Join(invalidTargets, ", "))) - c.flagTargets = strutil.Difference(c.flagTargets, invalidTargets, true) - } - } - - // Make sure we can talk to the server - client, err := c.Client() - if err != nil { - return "", fmt.Errorf("unable to create client to connect to Vault: %s", err) - } - serverHealth, err := client.Sys().Health() - if err != nil { - return "", fmt.Errorf("unable to connect to the server: %s", err) - } +func TestBoltSetAutoAuthToken(t *testing.T) { + ctx := context.Background() - // Check if server is DR Secondary and we need to further - // ignore any targets due to endpoint restrictions - if serverHealth.ReplicationDRMode == "secondary" { - invalidDRTargets := strutil.Difference(c.flagTargets, c.validDRSecondaryTargets(), true) - if len(invalidDRTargets) != 0 { - c.UI.Info(fmt.Sprintf("Ignoring invalid targets for DR Secondary: %s", strings.Join(invalidDRTargets, ", "))) - c.flagTargets = strutil.Difference(c.flagTargets, invalidDRTargets, true) - } - } - c.cachedClient = client - - captureTime := time.Now().UTC() - if len(c.flagOutput) == 0 { - formattedTime := captureTime.Format(fileFriendlyTimeFormat) - c.flagOutput = fmt.Sprintf("vault-debug-%s", formattedTime) - } + path, err := os.MkdirTemp("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) - // Strip trailing slash before proceeding - c.flagOutput = filepath.Clean(c.flagOutput) - - // If compression is enabled, trim the extension so that the files are - // written to a directory even if compression somehow fails. We ensure the - // extension during compression. We also prevent overwriting if the file - // already exists. - dstOutputFile := c.flagOutput - if c.flagCompress { - if !strings.HasSuffix(dstOutputFile, ".tar.gz") && !strings.HasSuffix(dstOutputFile, ".tgz") { - dstOutputFile = dstOutputFile + debugCompressionExt - } - - // Ensure that the file doesn't already exist, and ensure that we always - // trim the extension from flagOutput since we'll be progressively - // writing to that. - _, err := os.Stat(dstOutputFile) - switch { - case os.IsNotExist(err): - c.flagOutput = strings.TrimSuffix(c.flagOutput, ".tar.gz") - c.flagOutput = strings.TrimSuffix(c.flagOutput, ".tgz") - case err != nil: - return "", fmt.Errorf("unable to stat file: %s", err) - default: - return "", fmt.Errorf("output file already exists: %s", dstOutputFile) - } - } - - // Stat check the directory to ensure we don't override any existing data. - _, err = os.Stat(c.flagOutput) - switch { - case os.IsNotExist(err): - err := os.MkdirAll(c.flagOutput, 0o700) - if err != nil { - return "", fmt.Errorf("unable to create output directory: %s", err) - } - case err != nil: - return "", fmt.Errorf("unable to stat directory: %s", err) - default: - return "", fmt.Errorf("output directory already exists: %s", c.flagOutput) - } - - // Populate initial index fields - c.debugIndex = &debugIndex{ - VaultAddress: client.Address(), - ClientVersion: version.GetVersion().VersionNumber(), - ServerVersion: serverHealth.Version, - Compress: c.flagCompress, - DurationSeconds: int(c.flagDuration.Seconds()), - IntervalSeconds: int(c.flagInterval.Seconds()), - MetricsIntervalSeconds: int(c.flagMetricsInterval.Seconds()), - RawArgs: rawArgs, - Version: debugIndexVersion, - Targets: c.flagTargets, - Timestamp: captureTime, - Errors: []*captureError{}, - } - - return dstOutputFile, nil -} - -func (c *DebugCommand) defaultTargets() []string { - return []string{"config", "host", "requests", "metrics", "pprof", "replication-status", "server-status", "log"} -} - -func (c *DebugCommand) validDRSecondaryTargets() []string { - return []string{"metrics", "replication-status", "server-status"} + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), + }) + require.NoError(t, err) + + token, err := b.GetAutoAuthToken(ctx) + assert.NoError(t, err) + assert.Nil(t, token) + + // set first token + err = b.Set(ctx, "token-test1", []byte("hello 1"), TokenType) + require.NoError(t, err) + secrets, err := b.GetByType(ctx, TokenType) + require.NoError(t, err) + require.Len(t, secrets, 1) + assert.Equal(t, []byte("hello 1"), secrets[0]) + token, err = b.GetAutoAuthToken(ctx) + assert.NoError(t, err) + assert.Equal(t, []byte("hello 1"), token) + + // set second token + err = b.Set(ctx, "token-test2", []byte("hello 2"), TokenType) + require.NoError(t, err) + secrets, err = b.GetByType(ctx, TokenType) + require.NoError(t, err) + require.Len(t, secrets, 2) + assert.ElementsMatch(t, [][]byte{[]byte("hello 1"), []byte("hello 2")}, secrets) + token, err = b.GetAutoAuthToken(ctx) + assert.NoError(t, err) + assert.Equal(t, []byte("hello 2"), token) } -func (c *DebugCommand) captureStaticTargets() error { - // Capture configuration state - if strutil.StrListContains(c.flagTargets, "config") { - c.logger.Info("capturing configuration state") - - resp, err := c.cachedClient.Logical().Read("sys/config/state/sanitized") - if err != nil { - c.captureError("config", err) - c.logger.Error("config: error capturing config state", "error", err) - return nil - } - - if resp != nil && resp.Data != nil { - collection := []map[string]interface{}{ - { - "timestamp": time.Now().UTC(), - "config": resp.Data, - }, +func TestDBFileExists(t *testing.T) { + testCases := []struct { + name string + mkDir bool + createFile bool + expectExist bool + }{ + { + name: "all exists", + mkDir: true, + createFile: true, + expectExist: true, + }, + { + name: "dir exist, file missing", + mkDir: true, + createFile: false, + expectExist: false, + }, + { + name: "all missing", + mkDir: false, + createFile: false, + expectExist: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var tmpPath string + var err error + if tc.mkDir { + tmpPath, err = os.MkdirTemp("", "test-db-path") + require.NoError(t, err) } - if err := c.persistCollection(collection, "config.json"); err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "config.json", err)) - } - } - } - - return nil -} - -// capturePollingTargets captures all dynamic targets over the specified -// duration and interval. -func (c *DebugCommand) capturePollingTargets() error { - var g run.Group - - ctx, cancelFunc := context.WithTimeout(context.Background(), c.flagDuration+debugDurationGrace) - defer cancelFunc() - - // This run group watches for interrupt or duration - g.Add(func() error { - for { - select { - case <-c.ShutdownCh: - return nil - case <-ctx.Done(): - return nil + if tc.createFile { + err = os.WriteFile(path.Join(tmpPath, DatabaseFileName), []byte("test-db-path"), 0o600) + require.NoError(t, err) } - } - }, func(error) {}) - - // Collect host-info if target is specified - if strutil.StrListContains(c.flagTargets, "host") { - g.Add(func() error { - c.collectHostInfo(ctx) - return nil - }, func(error) { - cancelFunc() - }) - } - - // Collect metrics if target is specified - if strutil.StrListContains(c.flagTargets, "metrics") { - g.Add(func() error { - c.collectMetrics(ctx) - return nil - }, func(error) { - cancelFunc() - }) - } - - // Collect pprof data if target is specified - if strutil.StrListContains(c.flagTargets, "pprof") { - g.Add(func() error { - c.collectPprof(ctx) - return nil - }, func(error) { - cancelFunc() - }) - } - - // Collect replication status if target is specified - if strutil.StrListContains(c.flagTargets, "replication-status") { - g.Add(func() error { - c.collectReplicationStatus(ctx) - return nil - }, func(error) { - cancelFunc() - }) - } - - // Collect server status if target is specified - if strutil.StrListContains(c.flagTargets, "server-status") { - g.Add(func() error { - c.collectServerStatus(ctx) - return nil - }, func(error) { - cancelFunc() - }) - } - - // Collect in-flight request status if target is specified - if strutil.StrListContains(c.flagTargets, "requests") { - g.Add(func() error { - c.collectInFlightRequestStatus(ctx) - return nil - }, func(error) { - cancelFunc() + exists, err := DBFileExists(tmpPath) + assert.NoError(t, err) + assert.Equal(t, tc.expectExist, exists) }) } - - if strutil.StrListContains(c.flagTargets, "log") { - g.Add(func() error { - c.writeLogs(ctx) - // If writeLogs returned earlier due to an error, wait for context - // to terminate so we don't abort everything. - <-ctx.Done() - return nil - }, func(error) { - cancelFunc() - }) - } - - // We shouldn't bump across errors since none is returned by the interrupts, - // but we error check for sanity here. - if err := g.Run(); err != nil { - return err - } - - // Write collected data to their corresponding files - if err := c.persistCollection(c.metricsCollection, "metrics.json"); err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "metrics.json", err)) - } - if err := c.persistCollection(c.serverStatusCollection, "server_status.json"); err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "server_status.json", err)) - } - if err := c.persistCollection(c.replicationStatusCollection, "replication_status.json"); err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "replication_status.json", err)) - } - if err := c.persistCollection(c.hostInfoCollection, "host_info.json"); err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "host_info.json", err)) - } - if err := c.persistCollection(c.inFlightReqStatusCollection, "requests.json"); err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %v", "requests.json", err)) - } - return nil -} - -func (c *DebugCommand) collectHostInfo(ctx context.Context) { - idxCount := 0 - intervalTicker := time.Tick(c.flagInterval) - - for { - if idxCount > 0 { - select { - case <-ctx.Done(): - return - case <-intervalTicker: - } - } - - c.logger.Info("capturing host information", "count", idxCount) - idxCount++ - - r := c.cachedClient.NewRequest("GET", "/v1/sys/host-info") - resp, err := c.cachedClient.RawRequestWithContext(ctx, r) - if err != nil { - c.captureError("host", err) - return - } - if resp != nil { - defer resp.Body.Close() - - secret, err := api.ParseSecret(resp.Body) - if err != nil { - c.captureError("host", err) - return - } - if secret != nil && secret.Data != nil { - hostEntry := secret.Data - c.hostInfoCollection = append(c.hostInfoCollection, hostEntry) - } - } - } } -func (c *DebugCommand) collectMetrics(ctx context.Context) { - idxCount := 0 - intervalTicker := time.Tick(c.flagMetricsInterval) - - for { - if idxCount > 0 { - select { - case <-ctx.Done(): - return - case <-intervalTicker: - } - } - - c.logger.Info("capturing metrics", "count", idxCount) - idxCount++ - - // Perform metrics request - r := c.cachedClient.NewRequest("GET", "/v1/sys/metrics") - resp, err := c.cachedClient.RawRequestWithContext(ctx, r) - if err != nil { - c.captureError("metrics", err) - continue - } - if resp != nil { - defer resp.Body.Close() - - metricsEntry := make(map[string]interface{}) - err := json.NewDecoder(resp.Body).Decode(&metricsEntry) - if err != nil { - c.captureError("metrics", err) - continue +func Test_SetGetRetrievalToken(t *testing.T) { + testCases := []struct { + name string + tokenToSet []byte + expectedToken []byte + }{ + { + name: "normal set and get", + tokenToSet: []byte("test token"), + expectedToken: []byte("test token"), + }, + { + name: "no token set", + tokenToSet: nil, + expectedToken: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + path, err := os.MkdirTemp("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) + + b, err := NewBoltStorage(&BoltStorageConfig{ + Path: path, + Logger: hclog.Default(), + Wrapper: getTestKeyManager(t).Wrapper(), + }) + require.NoError(t, err) + defer b.Close() + + if tc.tokenToSet != nil { + err := b.StoreRetrievalToken(tc.tokenToSet) + require.NoError(t, err) } - c.metricsCollection = append(c.metricsCollection, metricsEntry) - } + gotKey, err := b.GetRetrievalToken() + assert.NoError(t, err) + assert.Equal(t, tc.expectedToken, gotKey) + }) } } -func (c *DebugCommand) collectPprof(ctx context.Context) { - idxCount := 0 - startTime := time.Now() - intervalTicker := time.Tick(c.flagInterval) +func TestBolt_MigrateFromV1ToV2Schema(t *testing.T) { + ctx := context.Background() - for { - if idxCount > 0 { - select { - case <-ctx.Done(): - return - case <-intervalTicker: - } - } - - currentTimestamp := time.Now().UTC() - c.logger.Info("capturing pprof data", "count", idxCount) - idxCount++ - - // Create a sub-directory for pprof data - currentDir := currentTimestamp.Format(fileFriendlyTimeFormat) - dirName := filepath.Join(c.flagOutput, currentDir) - if err := os.MkdirAll(dirName, 0o700); err != nil { - c.UI.Error(fmt.Sprintf("Error creating sub-directory for time interval: %s", err)) - continue - } - - var wg sync.WaitGroup - - for _, target := range []string{"threadcreate", "allocs", "block", "mutex", "goroutine", "heap"} { - wg.Add(1) - go func(target string) { - defer wg.Done() - data, err := pprofTarget(ctx, c.cachedClient, target, nil) - if err != nil { - c.captureError("pprof."+target, err) - return - } - - err = ioutil.WriteFile(filepath.Join(dirName, target+".prof"), data, 0o600) - if err != nil { - c.captureError("pprof."+target, err) - } - }(target) - } - - // As a convenience, we'll also fetch the goroutine target using debug=2, which yields a text - // version of the stack traces that don't require using `go tool pprof` to view. - wg.Add(1) - go func() { - defer wg.Done() - data, err := pprofTarget(ctx, c.cachedClient, "goroutine", url.Values{"debug": []string{"2"}}) - if err != nil { - c.captureError("pprof.goroutines-text", err) - return - } + path, err := os.MkdirTemp("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) - err = ioutil.WriteFile(filepath.Join(dirName, "goroutines.txt"), data, 0o600) - if err != nil { - c.captureError("pprof.goroutines-text", err) - } - }() - - // If the our remaining duration is less than the interval value - // skip profile and trace. - runDuration := currentTimestamp.Sub(startTime) - if (c.flagDuration+debugDurationGrace)-runDuration < c.flagInterval { - wg.Wait() - continue - } - - // Capture profile - wg.Add(1) - go func() { - defer wg.Done() - data, err := pprofProfile(ctx, c.cachedClient, c.flagInterval) - if err != nil { - c.captureError("pprof.profile", err) - return - } - - err = ioutil.WriteFile(filepath.Join(dirName, "profile.prof"), data, 0o600) - if err != nil { - c.captureError("pprof.profile", err) - } - }() - - // Capture trace - wg.Add(1) - go func() { - defer wg.Done() - data, err := pprofTrace(ctx, c.cachedClient, c.flagInterval) - if err != nil { - c.captureError("pprof.trace", err) - return - } - - err = ioutil.WriteFile(filepath.Join(dirName, "trace.out"), data, 0o600) - if err != nil { - c.captureError("pprof.trace", err) - } - }() - - wg.Wait() - } -} - -func (c *DebugCommand) collectReplicationStatus(ctx context.Context) { - idxCount := 0 - intervalTicker := time.Tick(c.flagInterval) - - for { - if idxCount > 0 { - select { - case <-ctx.Done(): - return - case <-intervalTicker: - } - } - - c.logger.Info("capturing replication status", "count", idxCount) - idxCount++ - - r := c.cachedClient.NewRequest("GET", "/v1/sys/replication/status") - resp, err := c.cachedClient.RawRequestWithContext(ctx, r) - if err != nil { - c.captureError("replication-status", err) - return - } - if resp != nil { - defer resp.Body.Close() - - secret, err := api.ParseSecret(resp.Body) - if err != nil { - c.captureError("replication-status", err) - return - } - if secret != nil && secret.Data != nil { - replicationEntry := secret.Data - replicationEntry["timestamp"] = time.Now().UTC() - c.replicationStatusCollection = append(c.replicationStatusCollection, replicationEntry) - } - } + dbPath := filepath.Join(path, DatabaseFileName) + db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + require.NoError(t, err) + err = db.Update(func(tx *bolt.Tx) error { + return createBoltSchema(tx, "1") + }) + require.NoError(t, err) + b := &BoltStorage{ + db: db, + logger: hclog.Default(), + wrapper: getTestKeyManager(t).Wrapper(), } -} -func (c *DebugCommand) collectServerStatus(ctx context.Context) { - idxCount := 0 - intervalTicker := time.Tick(c.flagInterval) - - for { - if idxCount > 0 { - select { - case <-ctx.Done(): - return - case <-intervalTicker: - } - } - - c.logger.Info("capturing server status", "count", idxCount) - idxCount++ - - healthInfo, err := c.cachedClient.Sys().Health() + // Manually insert some items into the v1 schema. + err = db.Update(func(tx *bolt.Tx) error { + blob, err := b.wrapper.Encrypt(ctx, []byte("ignored-contents")) if err != nil { - c.captureError("server-status.health", err) + return fmt.Errorf("error encrypting contents: %w", err) } - sealInfo, err := c.cachedClient.Sys().SealStatus() + protoBlob, err := proto.Marshal(blob) if err != nil { - c.captureError("server-status.seal", err) + return err } - statusEntry := map[string]interface{}{ - "timestamp": time.Now().UTC(), - "health": healthInfo, - "seal": sealInfo, - } - c.serverStatusCollection = append(c.serverStatusCollection, statusEntry) - } -} - -func (c *DebugCommand) collectInFlightRequestStatus(ctx context.Context) { - idxCount := 0 - intervalTicker := time.Tick(c.flagInterval) - - for { - if idxCount > 0 { - select { - case <-ctx.Done(): - return - case <-intervalTicker: - } + if err := tx.Bucket([]byte(authLeaseType)).Put([]byte("test-auth-id-1"), protoBlob); err != nil { + return err } - - c.logger.Info("capturing in-flight request status", "count", idxCount) - idxCount++ - - req := c.cachedClient.NewRequest("GET", "/v1/sys/in-flight-req") - resp, err := c.cachedClient.RawRequestWithContext(ctx, req) - if err != nil { - c.captureError("requests", err) - return + if err := tx.Bucket([]byte(authLeaseType)).Put([]byte("test-auth-id-2"), protoBlob); err != nil { + return err } - - var data map[string]interface{} - if resp != nil { - defer resp.Body.Close() - err = jsonutil.DecodeJSONFromReader(resp.Body, &data) - if err != nil { - c.captureError("requests", err) - return - } - - statusEntry := map[string]interface{}{ - "timestamp": time.Now().UTC(), - "in_flight_requests": data, - } - c.inFlightReqStatusCollection = append(c.inFlightReqStatusCollection, statusEntry) + if err := tx.Bucket([]byte(secretLeaseType)).Put([]byte("test-secret-id-1"), protoBlob); err != nil { + return err } - } -} -// persistCollection writes the collected data for a particular target onto the -// specified file. If the collection is empty, it returns immediately. -func (c *DebugCommand) persistCollection(collection []map[string]interface{}, outFile string) error { - if len(collection) == 0 { return nil - } - - // Write server-status file and update the index - bytes, err := json.MarshalIndent(collection, "", " ") - if err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(c.flagOutput, outFile), bytes, 0o600); err != nil { - return err - } - - return nil -} - -func (c *DebugCommand) compress(dst string) error { - if runtime.GOOS != "windows" { - defer osutil.Umask(osutil.Umask(0o077)) - } - - tgz := archiver.NewTarGz() - if err := tgz.Archive([]string{c.flagOutput}, dst); err != nil { - return fmt.Errorf("failed to compress data: %s", err) - } - - // If everything is fine up to this point, remove original directory - if err := os.RemoveAll(c.flagOutput); err != nil { - return fmt.Errorf("failed to remove data directory: %s", err) - } - - return nil -} - -func pprofTarget(ctx context.Context, client *api.Client, target string, params url.Values) ([]byte, error) { - req := client.NewRequest("GET", "/v1/sys/pprof/"+target) - if params != nil { - req.Params = params - } - resp, err := client.RawRequestWithContext(ctx, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return data, nil -} - -func pprofProfile(ctx context.Context, client *api.Client, duration time.Duration) ([]byte, error) { - seconds := int(duration.Seconds()) - secStr := strconv.Itoa(seconds) - - req := client.NewRequest("GET", "/v1/sys/pprof/profile") - req.Params.Add("seconds", secStr) - resp, err := client.RawRequestWithContext(ctx, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return data, nil + }) + require.NoError(t, err) + + // Check we have the contents we would expect for the v1 schema. + leases, err := b.GetByType(ctx, authLeaseType) + require.NoError(t, err) + assert.Len(t, leases, 2) + leases, err = b.GetByType(ctx, secretLeaseType) + require.NoError(t, err) + assert.Len(t, leases, 1) + leases, err = b.GetByType(ctx, LeaseType) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) + + // Now migrate to the v2 schema. + err = db.Update(migrateFromV1ToV2Schema) + require.NoError(t, err) + + // Check all the leases have been migrated into one bucket. + leases, err = b.GetByType(ctx, authLeaseType) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) + leases, err = b.GetByType(ctx, secretLeaseType) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) + leases, err = b.GetByType(ctx, LeaseType) + require.NoError(t, err) + assert.Len(t, leases, 3) } -func pprofTrace(ctx context.Context, client *api.Client, duration time.Duration) ([]byte, error) { - seconds := int(duration.Seconds()) - secStr := strconv.Itoa(seconds) - - req := client.NewRequest("GET", "/v1/sys/pprof/trace") - req.Params.Add("seconds", secStr) - resp, err := client.RawRequestWithContext(ctx, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } +func TestBolt_MigrateFromInvalidToV2Schema(t *testing.T) { + ctx := context.Background() - return data, nil -} + path, err := os.MkdirTemp("", "bolt-test") + require.NoError(t, err) + defer os.RemoveAll(path) -// newCaptureError instantiates a new captureError. -func (c *DebugCommand) captureError(target string, err error) { - c.errLock.Lock() - c.debugIndex.Errors = append(c.debugIndex.Errors, &captureError{ - TargetError: err.Error(), - Target: target, - Timestamp: time.Now().UTC(), - }) - c.errLock.Unlock() -} - -func (c *DebugCommand) writeLogs(ctx context.Context) { - out, err := os.OpenFile(filepath.Join(c.flagOutput, "vault.log"), os.O_CREATE|os.O_WRONLY, 0o600) - if err != nil { - c.captureError("log", err) - return + dbPath := filepath.Join(path, DatabaseFileName) + db, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + require.NoError(t, err) + b := &BoltStorage{ + db: db, + logger: hclog.Default(), + wrapper: getTestKeyManager(t).Wrapper(), } - defer out.Close() - // Create Monitor specific client based on the cached client - mClient, err := c.cachedClient.Clone() - if err != nil { - c.captureError("log", err) - return + // All GetByType calls should fail as there's no schema + for _, bucket := range []string{authLeaseType, secretLeaseType, LeaseType} { + _, err = b.GetByType(ctx, bucket) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) } - mClient.SetToken(c.cachedClient.Token()) - // Set timeout to match the context explicitly - mClient.SetClientTimeout(c.flagDuration + debugDurationGrace) + // Now migrate to the v2 schema. + err = db.Update(migrateFromV1ToV2Schema) + require.NoError(t, err) - logCh, err := mClient.Sys().Monitor(ctx, "trace", c.logFormat) - if err != nil { - c.captureError("log", err) - return + // Deprecated auth and secret lease buckets still shouldn't exist + // All GetByType calls should fail as there's no schema + for _, bucket := range []string{authLeaseType, secretLeaseType} { + _, err = b.GetByType(ctx, bucket) + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "not found")) } - for { - select { - case log := <-logCh: - if len(log) > 0 { - if !strings.HasSuffix(log, "\n") { - log += "\n" - } - _, err = out.WriteString(log) - if err != nil { - c.captureError("log", err) - return - } - } - case <-ctx.Done(): - return - } - } + // GetByType for LeaseType should now return an empty result + leases, err := b.GetByType(ctx, LeaseType) + require.NoError(t, err) + require.Len(t, leases, 0) } diff --git a/command/debug_test.go b/command/debug_test.go index 80576311dde8..fd5102ad8f6d 100644 --- a/command/debug_test.go +++ b/command/debug_test.go @@ -1,846 +1,383 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package command +package eventbus import ( - "archive/tar" - "encoding/json" + "context" + "errors" "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" + "net/url" + "path" "strings" - "syscall" - "testing" + "sync" + "sync/atomic" "time" - "github.com/hashicorp/vault/api" - "github.com/mholt/archiver/v3" - "github.com/mitchellh/cli" + "github.com/armon/go-metrics" + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/eventlogger/formatter_filters/cloudevents" + "github.com/hashicorp/go-bexpr" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/ryanuber/go-glob" + "google.golang.org/protobuf/types/known/structpb" ) -func testDebugCommand(tb testing.TB) (*cli.MockUi, *DebugCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &DebugCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} +const ( + // eventTypeAll is purely internal to the event bus. We use it to send all + // events down one big firehose, and pipelines define their own filtering + // based on what each subscriber is interested in. + eventTypeAll = "*" + defaultTimeout = 60 * time.Second +) -func TestDebugCommand_Run(t *testing.T) { - t.Parallel() +var ( + ErrNotStarted = errors.New("event broker has not been started") + subscriptions atomic.Int64 // keeps track of event subscription count in all event buses - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) + // these metadata fields will have the plugin mount path prepended to them + metadataPrependPathFields = []string{ + "path", + logical.EventMetadataDataPath, } - defer os.RemoveAll(testDir) - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "valid", - []string{ - "-duration=1s", - fmt.Sprintf("-output=%s/valid", testDir), - }, - "", - 0, - }, - { - "too_many_args", - []string{ - "-duration=1s", - fmt.Sprintf("-output=%s/too_many_args", testDir), - "foo", - }, - "Too many arguments", - 1, - }, - { - "invalid_target", - []string{ - "-duration=1s", - fmt.Sprintf("-output=%s/invalid_target", testDir), - "-target=foo", - }, - "Ignoring invalid targets: foo", - 0, - }, - } - - for _, tc := range cases { - tc := tc +) - t.Run(tc.name, func(t *testing.T) { - t.Parallel() +// EventBus contains the main logic of running an event broker for Vault. +// Start() must be called before the EventBus will accept events for sending. +type EventBus struct { + logger hclog.Logger + broker *eventlogger.Broker + started atomic.Bool + formatterNodeID eventlogger.NodeID + timeout time.Duration + filters *Filters + cloudEventsFormatterFilter *cloudevents.FormatterFilter +} - client, closer := testVaultServer(t) - defer closer() +type pluginEventBus struct { + bus *EventBus + namespace *namespace.Namespace + pluginInfo *logical.EventPluginInfo +} - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true +type asyncChanNode struct { + // TODO: add bounded deque buffer of *EventReceived + ctx context.Context + ch chan *eventlogger.Event + logger hclog.Logger + + // used to close the connection + closeOnce sync.Once + cancelFunc context.CancelFunc + pipelineID eventlogger.PipelineID + removeFilter func() + removePipeline func(ctx context.Context, t eventlogger.EventType, id eventlogger.PipelineID) (bool, error) +} - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } +var ( + _ eventlogger.Node = (*asyncChanNode)(nil) + _ logical.EventSender = (*pluginEventBus)(nil) +) - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Fatalf("expected %q to contain %q", combined, tc.out) - } - }) +// Start starts the event bus, allowing events to be written. +// It is not possible to stop or restart the event bus. +// It is safe to call Start() multiple times. +func (bus *EventBus) Start() { + wasStarted := bus.started.Swap(true) + if !wasStarted { + bus.logger.Info("Starting event system") } } -func TestDebugCommand_Archive(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - ext string - expectError bool - }{ - { - "no-ext", - "", - false, - }, - { - "with-ext-tar-gz", - ".tar.gz", - false, - }, - { - "with-ext-tgz", - ".tgz", - false, - }, +// patchMountPath patches the event data's metadata "secret_path" field, if present, to include the mount path prepended. +func patchMountPath(data *logical.EventData, pluginInfo *logical.EventPluginInfo) *logical.EventData { + if pluginInfo == nil || pluginInfo.MountPath == "" || data.Metadata == nil { + return data } - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - // Create temp dirs for each test case since os.Stat and tgz.Walk - // (called down below) exhibits raciness otherwise. - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true - - // We use tc.name as the base path and apply the extension per - // test case. - basePath := tc.name - outputPath := filepath.Join(testDir, basePath+tc.ext) - args := []string{ - "-duration=1s", - fmt.Sprintf("-output=%s", outputPath), - "-target=server-status", - } - - code := cmd.Run(args) - if exp := 0; code != exp { - t.Log(ui.OutputWriter.String()) - t.Log(ui.ErrorWriter.String()) - t.Fatalf("expected %d to be %d", code, exp) - } - // If we expect an error we're done here - if tc.expectError { - return - } - - expectedExt := tc.ext - if expectedExt == "" { - expectedExt = debugCompressionExt - } - - bundlePath := filepath.Join(testDir, basePath+expectedExt) - _, err = os.Stat(bundlePath) - if os.IsNotExist(err) { - t.Log(ui.OutputWriter.String()) - t.Fatal(err) + for _, field := range metadataPrependPathFields { + if data.Metadata.Fields[field] != nil { + newPath := path.Join(pluginInfo.MountPath, data.Metadata.Fields[field].GetStringValue()) + if pluginInfo.MountClass == "auth" { + newPath = path.Join("auth", newPath) } + data.Metadata.Fields[field] = structpb.NewStringValue(newPath) + } + } - tgz := archiver.NewTarGz() - err = tgz.Walk(bundlePath, func(f archiver.File) error { - fh, ok := f.Header.(*tar.Header) - if !ok { - return fmt.Errorf("invalid file header: %#v", f.Header) - } - - // Ignore base directory and index file - if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") { - return nil - } + return data +} - if fh.Name != filepath.Join(basePath, "server_status.json") { - return fmt.Errorf("unexpected file: %s", fh.Name) - } - return nil - }) - if err != nil { - t.Fatal(err) - } - }) +// SendEventInternal sends an event to the event bus and routes it to all relevant subscribers. +// This function does *not* wait for all subscribers to acknowledge before returning. +// This function is meant to be used by trusted internal code, so it can specify details like the namespace +// and plugin info. Events from plugins should be routed through WithPlugin(), which will populate +// the namespace and plugin info automatically. +// The context passed in is currently ignored to ensure that the event is sent if the context is short-lived, +// such as with an HTTP request context. +func (bus *EventBus) SendEventInternal(_ context.Context, ns *namespace.Namespace, pluginInfo *logical.EventPluginInfo, eventType logical.EventType, data *logical.EventData) error { + if ns == nil { + return namespace.ErrNoNamespace + } + if !bus.started.Load() { + return ErrNotStarted + } + eventReceived := &logical.EventReceived{ + Event: patchMountPath(data, pluginInfo), + Namespace: ns.Path, + EventType: string(eventType), + PluginInfo: pluginInfo, + } + + // We can't easily know when the SendEvent is complete, so we can't call the cancel function. + // But, it is called automatically after bus.timeout, so there won't be any leak as long as bus.timeout is not too long. + ctx, _ := context.WithTimeout(context.Background(), bus.timeout) + _, err := bus.broker.Send(ctx, eventTypeAll, eventReceived) + if err != nil { + // if no listeners for this event type are registered, that's okay, the event + // will just not be sent anywhere + if strings.Contains(strings.ToLower(err.Error()), "no graph for eventtype") { + return nil + } } + return err } -func TestDebugCommand_CaptureTargets(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - targets []string - expectedFiles []string - }{ - { - "config", - []string{"config"}, - []string{"config.json"}, - }, - { - "host-info", - []string{"host"}, - []string{"host_info.json"}, - }, - { - "metrics", - []string{"metrics"}, - []string{"metrics.json"}, - }, - { - "replication-status", - []string{"replication-status"}, - []string{"replication_status.json"}, - }, - { - "server-status", - []string{"server-status"}, - []string{"server_status.json"}, - }, - { - "in-flight-req", - []string{"requests"}, - []string{"requests.json"}, - }, - { - "all-minus-pprof", - []string{"config", "host", "metrics", "replication-status", "server-status"}, - []string{"config.json", "host_info.json", "metrics.json", "replication_status.json", "server_status.json"}, - }, +func (bus *EventBus) WithPlugin(ns *namespace.Namespace, eventPluginInfo *logical.EventPluginInfo) (*pluginEventBus, error) { + if ns == nil { + return nil, namespace.ErrNoNamespace } + return &pluginEventBus{ + bus: bus, + namespace: ns, + pluginInfo: eventPluginInfo, + }, nil +} - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true - - basePath := tc.name - args := []string{ - "-duration=1s", - fmt.Sprintf("-output=%s/%s", testDir, basePath), - } - for _, target := range tc.targets { - args = append(args, fmt.Sprintf("-target=%s", target)) - } - - code := cmd.Run(args) - if exp := 0; code != exp { - t.Log(ui.ErrorWriter.String()) - t.Fatalf("expected %d to be %d", code, exp) - } - - bundlePath := filepath.Join(testDir, basePath+debugCompressionExt) - _, err = os.Open(bundlePath) - if err != nil { - t.Fatalf("failed to open archive: %s", err) - } - - tgz := archiver.NewTarGz() - err = tgz.Walk(bundlePath, func(f archiver.File) error { - fh, ok := f.Header.(*tar.Header) - if !ok { - t.Fatalf("invalid file header: %#v", f.Header) - } - - // Ignore base directory and index file - if fh.Name == basePath+"/" || fh.Name == filepath.Join(basePath, "index.json") { - return nil - } - - for _, fileName := range tc.expectedFiles { - if fh.Name == filepath.Join(basePath, fileName) { - return nil - } - } - - // If we reach here, it means that this is an unexpected file - return fmt.Errorf("unexpected file: %s", fh.Name) - }) - if err != nil { - t.Fatal(err) - } - }) - } +// SendEvent sends an event to the event bus and routes it to all relevant subscribers. +// This function does *not* wait for all subscribers to acknowledge before returning. +// The context passed in is currently ignored. +func (bus *pluginEventBus) SendEvent(ctx context.Context, eventType logical.EventType, data *logical.EventData) error { + return bus.bus.SendEventInternal(ctx, bus.namespace, bus.pluginInfo, eventType, data) } -func TestDebugCommand_Pprof(t *testing.T) { - testDir, err := ioutil.TempDir("", "vault-debug") +func NewEventBus(localNodeID string, logger hclog.Logger) (*EventBus, error) { + broker, err := eventlogger.NewBroker() if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true - - basePath := "pprof" - outputPath := filepath.Join(testDir, basePath) - // pprof requires a minimum interval of 1s, we set it to 2 to ensure it - // runs through and reduce flakiness on slower systems. - args := []string{ - "-compress=false", - "-duration=2s", - "-interval=2s", - fmt.Sprintf("-output=%s", outputPath), - "-target=pprof", + return nil, err } - code := cmd.Run(args) - if exp := 0; code != exp { - t.Log(ui.ErrorWriter.String()) - t.Fatalf("expected %d to be %d", code, exp) + formatterID, err := uuid.GenerateUUID() + if err != nil { + return nil, err } + formatterNodeID := eventlogger.NodeID(formatterID) - profiles := []string{"heap.prof", "goroutine.prof"} - pollingProfiles := []string{"profile.prof", "trace.out"} + if logger == nil { + logger = hclog.Default().Named("events") + } - // These are captures on the first (0th) and last (1st) frame - for _, v := range profiles { - files, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v)) - if len(files) != 2 { - t.Errorf("2 output files should exist for %s: got: %v", v, files) - } + sourceUrl, err := url.Parse("vault://" + localNodeID) + if err != nil { + return nil, err } - // Since profile and trace are polling outputs, these only get captured - // on the first (0th) frame. - for _, v := range pollingProfiles { - files, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v)) - if len(files) != 1 { - t.Errorf("1 output file should exist for %s: got: %v", v, files) - } + cloudEventsFormatterFilter := &cloudevents.FormatterFilter{ + Source: sourceUrl, + Predicate: func(_ context.Context, e interface{}) (bool, error) { + return true, nil + }, } - t.Log(ui.OutputWriter.String()) - t.Log(ui.ErrorWriter.String()) + return &EventBus{ + logger: logger, + broker: broker, + formatterNodeID: formatterNodeID, + timeout: defaultTimeout, + cloudEventsFormatterFilter: cloudEventsFormatterFilter, + filters: NewFilters(localNodeID), + }, nil } -func TestDebugCommand_IndexFile(t *testing.T) { - t.Parallel() +// Subscribe subscribes to events in the given namespace matching the event type pattern and after +// applying the optional go-bexpr filter. +func (bus *EventBus) Subscribe(ctx context.Context, ns *namespace.Namespace, pattern string, bexprFilter string) (<-chan *eventlogger.Event, context.CancelFunc, error) { + return bus.SubscribeMultipleNamespaces(ctx, []string{strings.Trim(ns.Path, "/")}, pattern, bexprFilter) +} - testDir, err := ioutil.TempDir("", "vault-debug") +// SubscribeMultipleNamespaces subscribes to events in the given namespace matching the event type +// pattern and after applying the optional go-bexpr filter. +func (bus *EventBus) SubscribeMultipleNamespaces(ctx context.Context, namespacePathPatterns []string, pattern string, bexprFilter string) (<-chan *eventlogger.Event, context.CancelFunc, error) { + // subscriptions are still stored even if the bus has not been started + pipelineID, err := uuid.GenerateUUID() if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true - - basePath := "index-test" - outputPath := filepath.Join(testDir, basePath) - // pprof requires a minimum interval of 1s - args := []string{ - "-compress=false", - "-duration=1s", - "-interval=1s", - "-metrics-interval=1s", - fmt.Sprintf("-output=%s", outputPath), + return nil, nil, err } - code := cmd.Run(args) - if exp := 0; code != exp { - t.Log(ui.ErrorWriter.String()) - t.Fatalf("expected %d to be %d", code, exp) + err = bus.broker.RegisterNode(bus.formatterNodeID, bus.cloudEventsFormatterFilter) + if err != nil { + return nil, nil, err } - content, err := ioutil.ReadFile(filepath.Join(outputPath, "index.json")) + filterNodeID, err := uuid.GenerateUUID() if err != nil { - t.Fatal(err) + return nil, nil, err } - index := &debugIndex{} - if err := json.Unmarshal(content, index); err != nil { - t.Fatal(err) + filterNode, err := newFilterNode(namespacePathPatterns, pattern, bexprFilter) + if err != nil { + return nil, nil, err } - if len(index.Output) == 0 { - t.Fatalf("expected valid index file: got: %v", index) + err = bus.broker.RegisterNode(eventlogger.NodeID(filterNodeID), filterNode) + if err != nil { + return nil, nil, err } -} - -func TestDebugCommand_TimingChecks(t *testing.T) { - t.Parallel() - testDir, err := ioutil.TempDir("", "vault-debug") + sinkNodeID, err := uuid.GenerateUUID() if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - cases := []struct { - name string - duration string - interval string - metricsInterval string - }{ - { - "short-values-all", - "10ms", - "10ms", - "10ms", - }, - { - "short-duration", - "10ms", - "", - "", - }, - { - "short-interval", - debugMinInterval.String(), - "10ms", - "", - }, - { - "short-metrics-interval", - debugMinInterval.String(), - "", - "10ms", - }, + return nil, nil, err } - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - // If we are past the minimum duration + some grace, trigger shutdown - // to prevent hanging - grace := 10 * time.Second - shutdownCh := make(chan struct{}) - go func() { - time.AfterFunc(grace, func() { - close(shutdownCh) - }) - }() - - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.ShutdownCh = shutdownCh - - basePath := tc.name - outputPath := filepath.Join(testDir, basePath) - // pprof requires a minimum interval of 1s - args := []string{ - "-target=server-status", - fmt.Sprintf("-output=%s", outputPath), - } - if tc.duration != "" { - args = append(args, fmt.Sprintf("-duration=%s", tc.duration)) - } - if tc.interval != "" { - args = append(args, fmt.Sprintf("-interval=%s", tc.interval)) - } - if tc.metricsInterval != "" { - args = append(args, fmt.Sprintf("-metrics-interval=%s", tc.metricsInterval)) - } - - code := cmd.Run(args) - if exp := 0; code != exp { - t.Log(ui.ErrorWriter.String()) - t.Fatalf("expected %d to be %d", code, exp) - } - - if !strings.Contains(ui.OutputWriter.String(), "Duration: 5s") { - t.Fatal("expected minimum duration value") - } + ctx, cancel := context.WithCancel(ctx) - if tc.interval != "" { - if !strings.Contains(ui.OutputWriter.String(), " Interval: 5s") { - t.Fatal("expected minimum interval value") - } - } + bus.filters.addPattern(bus.filters.self, namespacePathPatterns, pattern) - if tc.metricsInterval != "" { - if !strings.Contains(ui.OutputWriter.String(), "Metrics Interval: 5s") { - t.Fatal("expected minimum metrics interval value") - } - } - }) + asyncNode := newAsyncNode(ctx, bus.logger, bus.broker, func() { + bus.filters.removePattern(bus.filters.self, namespacePathPatterns, pattern) + }) + err = bus.broker.RegisterNode(eventlogger.NodeID(sinkNodeID), asyncNode) + if err != nil { + defer cancel() + return nil, nil, err } -} -func TestDebugCommand_NoConnection(t *testing.T) { - t.Parallel() + nodes := []eventlogger.NodeID{eventlogger.NodeID(filterNodeID), bus.formatterNodeID, eventlogger.NodeID(sinkNodeID)} - client, err := api.NewClient(nil) - if err != nil { - t.Fatal(err) + pipeline := eventlogger.Pipeline{ + PipelineID: eventlogger.PipelineID(pipelineID), + EventType: eventTypeAll, + NodeIDs: nodes, } - - if err := client.SetAddress(""); err != nil { - t.Fatal(err) + err = bus.broker.RegisterPipeline(pipeline) + if err != nil { + defer cancel() + return nil, nil, err } - _, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true - - args := []string{ - "-duration=1s", - "-target=server-status", - } + addSubscriptions(1) + // add info needed to cancel the subscription + asyncNode.pipelineID = eventlogger.PipelineID(pipelineID) + asyncNode.cancelFunc = cancel + // Capture context in a closure for the cancel func + return asyncNode.ch, func() { asyncNode.Close(ctx) }, nil +} - code := cmd.Run(args) - if exp := 1; code != exp { - t.Fatalf("expected %d to be %d", code, exp) - } +// SetSendTimeout sets the timeout of sending events. If the events are not accepted by the +// underlying channel before this timeout, then the channel closed. +func (bus *EventBus) SetSendTimeout(timeout time.Duration) { + bus.timeout = timeout } -func TestDebugCommand_OutputExists(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - compress bool - outputFile string - expectedError string - }{ - { - "no-compress", - false, - "output-exists", - "output directory already exists", - }, - { - "compress", - true, - "output-exist.tar.gz", - "output file already exists", - }, +func newFilterNode(namespacePatterns []string, pattern string, bexprFilter string) (*eventlogger.Filter, error) { + var evaluator *bexpr.Evaluator + if bexprFilter != "" { + var err error + evaluator, err = bexpr.CreateEvaluator(bexprFilter) + if err != nil { + return nil, err + } } - - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true - - outputPath := filepath.Join(testDir, tc.outputFile) - - // Create a conflicting file/directory - if tc.compress { - _, err = os.Create(outputPath) - if err != nil { - t.Fatal(err) + return &eventlogger.Filter{ + Predicate: func(e *eventlogger.Event) (bool, error) { + eventRecv := e.Payload.(*logical.EventReceived) + eventNs := strings.Trim(eventRecv.Namespace, "/") + // Drop if event is not in namespace patterns namespace. + if len(namespacePatterns) > 0 { + allow := false + for _, nsPattern := range namespacePatterns { + if glob.Glob(nsPattern, eventNs) { + allow = true + break + } } - } else { - err = os.Mkdir(outputPath, 0o700) - if err != nil { - t.Fatal(err) + if !allow { + return false, nil } } - args := []string{ - fmt.Sprintf("-compress=%t", tc.compress), - "-duration=1s", - "-interval=1s", - "-metrics-interval=1s", - fmt.Sprintf("-output=%s", outputPath), + // NodeFilter for correct event type, including wildcards. + if !glob.Glob(pattern, eventRecv.EventType) { + return false, nil } - code := cmd.Run(args) - if exp := 1; code != exp { - t.Log(ui.OutputWriter.String()) - t.Log(ui.ErrorWriter.String()) - t.Errorf("expected %d to be %d", code, exp) + // apply go-bexpr filter + if evaluator != nil { + return evaluator.Evaluate(eventRecv.BexprDatum()) } - - output := ui.ErrorWriter.String() + ui.OutputWriter.String() - if !strings.Contains(output, tc.expectedError) { - t.Fatalf("expected %s, got: %s", tc.expectedError, output) - } - }) - } + return true, nil + }, + }, nil } -func TestDebugCommand_PartialPermissions(t *testing.T) { - t.Parallel() - - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - client, closer := testVaultServer(t) - defer closer() - - // Create a new token with default policy - resp, err := client.Logical().Write("auth/token/create", map[string]interface{}{ - "policies": "default", - }) - if err != nil { - t.Fatal(err) - } - - client.SetToken(resp.Auth.ClientToken) - - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true - - basePath := "with-default-policy-token" - args := []string{ - "-duration=1s", - fmt.Sprintf("-output=%s/%s", testDir, basePath), - } - - code := cmd.Run(args) - if exp := 0; code != exp { - t.Log(ui.ErrorWriter.String()) - t.Fatalf("expected %d to be %d", code, exp) - } - - bundlePath := filepath.Join(testDir, basePath+debugCompressionExt) - _, err = os.Open(bundlePath) - if err != nil { - t.Fatalf("failed to open archive: %s", err) +func newAsyncNode(ctx context.Context, logger hclog.Logger, broker *eventlogger.Broker, removeFilter func()) *asyncChanNode { + return &asyncChanNode{ + ctx: ctx, + ch: make(chan *eventlogger.Event), + logger: logger, + removeFilter: removeFilter, + removePipeline: broker.RemovePipelineAndNodes, } +} - tgz := archiver.NewTarGz() - err = tgz.Walk(bundlePath, func(f archiver.File) error { - fh, ok := f.Header.(*tar.Header) - if !ok { - t.Fatalf("invalid file header: %#v", f.Header) - } - - // Ignore base directory and index file - if fh.Name == basePath+"/" { - return nil - } - - // Ignore directories, which still get created by pprof but should - // otherwise be empty. - if fh.FileInfo().IsDir() { - return nil - } +// Close tells the bus to stop sending us events. +func (node *asyncChanNode) Close(ctx context.Context) { + node.closeOnce.Do(func() { + defer node.cancelFunc() + node.removeFilter() + removed, err := node.removePipeline(ctx, eventTypeAll, node.pipelineID) switch { - case fh.Name == filepath.Join(basePath, "index.json"): - case fh.Name == filepath.Join(basePath, "replication_status.json"): - case fh.Name == filepath.Join(basePath, "server_status.json"): - case fh.Name == filepath.Join(basePath, "vault.log"): - default: - return fmt.Errorf("unexpected file: %s", fh.Name) + case err != nil && removed: + msg := fmt.Sprintf("Error removing nodes referenced by pipeline %q", node.pipelineID) + node.logger.Warn(msg, err) + case err != nil: + msg := fmt.Sprintf("Error removing pipeline %q", node.pipelineID) + node.logger.Warn(msg, err) } - - return nil + addSubscriptions(-1) }) - if err != nil { - t.Fatal(err) - } } -// set insecure umask to see if the files and directories get created with right permissions -func TestDebugCommand_InsecureUmask(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("test does not work in windows environment") - } - t.Parallel() - - cases := []struct { - name string - compress bool - outputFile string - expectError bool - }{ - { - "with-compress", - true, - "with-compress.tar.gz", - false, - }, - { - "no-compress", - false, - "no-compress", - false, - }, - } - - for _, tc := range cases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - // set insecure umask - defer syscall.Umask(syscall.Umask(0)) - - testDir, err := ioutil.TempDir("", "vault-debug") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(testDir) - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testDebugCommand(t) - cmd.client = client - cmd.skipTimingChecks = true - - outputPath := filepath.Join(testDir, tc.outputFile) - - args := []string{ - fmt.Sprintf("-compress=%t", tc.compress), - "-duration=1s", - "-interval=1s", - "-metrics-interval=1s", - fmt.Sprintf("-output=%s", outputPath), - } - - code := cmd.Run(args) - if exp := 0; code != exp { - t.Log(ui.ErrorWriter.String()) - t.Fatalf("expected %d to be %d", code, exp) - } - // If we expect an error we're done here - if tc.expectError { - return - } - - bundlePath := filepath.Join(testDir, tc.outputFile) - fs, err := os.Stat(bundlePath) - if os.IsNotExist(err) { - t.Log(ui.OutputWriter.String()) - t.Fatal(err) - } - // check permissions of the parent debug directory - err = isValidFilePermissions(fs) - if err != nil { - t.Fatalf(err.Error()) - } - - // check permissions of the files within the parent directory - switch tc.compress { - case true: - tgz := archiver.NewTarGz() - - err = tgz.Walk(bundlePath, func(f archiver.File) error { - fh, ok := f.Header.(*tar.Header) - if !ok { - return fmt.Errorf("invalid file header: %#v", f.Header) - } - err = isValidFilePermissions(fh.FileInfo()) - if err != nil { - t.Fatalf(err.Error()) - } - return nil - }) - - case false: - err = filepath.Walk(bundlePath, func(path string, info os.FileInfo, err error) error { - err = isValidFilePermissions(info) - if err != nil { - t.Fatalf(err.Error()) - } - return nil - }) - } +func (node *asyncChanNode) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { + // sends to the channel async in another goroutine + go func() { + var timeout bool + select { + case node.ch <- e: + case <-ctx.Done(): + timeout = errors.Is(ctx.Err(), context.DeadlineExceeded) + case <-node.ctx.Done(): + timeout = errors.Is(node.ctx.Err(), context.DeadlineExceeded) + } + if timeout { + node.logger.Info("Subscriber took too long to process event, closing", "ID", e.Payload.(*logical.EventReceived).Event.Id) + node.Close(ctx) + } + }() + return e, nil +} - if err != nil { - t.Fatal(err) - } - }) - } +func (node *asyncChanNode) Reopen() error { + return nil } -func isValidFilePermissions(info os.FileInfo) (err error) { - mode := info.Mode() - // check group permissions - for i := 4; i < 7; i++ { - if string(mode.String()[i]) != "-" { - return fmt.Errorf("expected no permissions for group but got %s permissions for file %s", string(mode.String()[i]), info.Name()) - } - } +func (node *asyncChanNode) Type() eventlogger.NodeType { + return eventlogger.NodeTypeSink +} - // check others permissions - for i := 7; i < 10; i++ { - if string(mode.String()[i]) != "-" { - return fmt.Errorf("expected no permissions for others but got %s permissions for file %s", string(mode.String()[i]), info.Name()) - } - } - return err +func addSubscriptions(delta int64) { + metrics.SetGauge([]string{"events", "subscriptions"}, float32(subscriptions.Add(delta))) } diff --git a/command/delete.go b/command/delete.go index 58005e95b261..0255f7fe8678 100644 --- a/command/delete.go +++ b/command/delete.go @@ -1,128 +1,705 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -package command +package eventbus import ( + "context" + "encoding/json" + "errors" "fmt" - "io" - "os" - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" + "sync/atomic" + "testing" + "time" + + "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-secure-stdlib/strutil" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/sdk/logical" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/structpb" ) -var ( - _ cli.Command = (*DeleteCommand)(nil) - _ cli.CommandAutocomplete = (*DeleteCommand)(nil) -) +// TestBusBasics tests that basic event sending and subscribing function. +func TestBusBasics(t *testing.T) { + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + eventType := logical.EventType("someType") + + event, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } + + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType, event) + if !errors.Is(err, ErrNotStarted) { + t.Errorf("Expected not started error but got: %v", err) + } -type DeleteCommand struct { - *BaseCommand + bus.Start() - testStdin io.Reader // for tests + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType, event) + if err != nil { + t.Errorf("Expected no error sending: %v", err) + } + + ch, cancel, err := bus.Subscribe(ctx, namespace.RootNamespace, string(eventType), "") + if err != nil { + t.Fatal(err) + } + defer cancel() + + event, err = logical.NewEvent() + if err != nil { + t.Fatal(err) + } + + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType, event) + if err != nil { + t.Error(err) + } + + timeout := time.After(1 * time.Second) + select { + case message := <-ch: + if message.Payload.(*logical.EventReceived).Event.Id != event.Id { + t.Errorf("Got unexpected message: %+v", message) + } + case <-timeout: + t.Error("Timeout waiting for message") + } +} + +// TestBusIgnoresSendContext tests that the context is ignored when sending to an event, +// so that we do not give up too quickly. +func TestBusIgnoresSendContext(t *testing.T) { + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) + } + eventType := logical.EventType("someType") + + event, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } + + bus.Start() + + ch, subCancel, err := bus.Subscribe(context.Background(), namespace.RootNamespace, string(eventType), "") + if err != nil { + t.Fatal(err) + } + defer subCancel() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() // cancel immediately + + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType, event) + if err != nil { + t.Errorf("Expected no error sending: %v", err) + } + + timeout := time.After(1 * time.Second) + select { + case message := <-ch: + if message.Payload.(*logical.EventReceived).Event.Id != event.Id { + t.Errorf("Got unexpected message: %+v", message) + } + case <-timeout: + t.Error("Timeout waiting for message") + } } -func (c *DeleteCommand) Synopsis() string { - return "Delete secrets and configuration" +// TestSubscribeNonRootNamespace verifies that events for non-root namespaces +// aren't filtered out by the bus. +func TestSubscribeNonRootNamespace(t *testing.T) { + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) + } + bus.Start() + ctx := context.Background() + + eventType := logical.EventType("someType") + + ns := &namespace.Namespace{ + ID: "abc", + Path: "abc/", + } + + ch, cancel, err := bus.Subscribe(ctx, ns, string(eventType), "") + if err != nil { + t.Fatal(err) + } + defer cancel() + + event, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } + + err = bus.SendEventInternal(ctx, ns, nil, eventType, event) + if err != nil { + t.Error(err) + } + + timeout := time.After(1 * time.Second) + select { + case message := <-ch: + if message.Payload.(*logical.EventReceived).Event.Id != event.Id { + t.Errorf("Got unexpected message: %+v", message) + } + case <-timeout: + t.Error("Timeout waiting for message") + } } -func (c *DeleteCommand) Help() string { - helpText := ` -Usage: vault delete [options] PATH +// TestNamespaceFiltering verifies that events for other namespaces are filtered out by the bus. +func TestNamespaceFiltering(t *testing.T) { + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) + } + bus.Start() + ctx := context.Background() - Deletes secrets and configuration from Vault at the given path. The behavior - of "delete" is delegated to the backend corresponding to the given path. + eventType := logical.EventType("someType") - Remove data in the status secret backend: + event, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } - $ vault delete secret/my-secret + ch, cancel, err := bus.Subscribe(ctx, namespace.RootNamespace, string(eventType), "") + if err != nil { + t.Fatal(err) + } + defer cancel() - Uninstall an encryption key in the transit backend: + event, err = logical.NewEvent() + if err != nil { + t.Fatal(err) + } - $ vault delete transit/keys/my-key + err = bus.SendEventInternal(ctx, &namespace.Namespace{ + ID: "abc", + Path: "/abc", + }, nil, eventType, event) + if err != nil { + t.Error(err) + } + + timeout := time.After(100 * time.Millisecond) + select { + case <-ch: + t.Errorf("Got abc namespace message when root namespace was specified") + case <-timeout: + // okay + } + + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType, event) + if err != nil { + t.Error(err) + } + + timeout = time.After(1 * time.Second) + select { + case message := <-ch: + if message.Payload.(*logical.EventReceived).Event.Id != event.Id { + t.Errorf("Got unexpected message %+v but was waiting for %+v", message, event) + } + + case <-timeout: + t.Error("Timed out waiting for message") + } +} - Delete an IAM role: +// TestBus2Subscriptions verifies that events of different types are successfully routed to the correct subscribers. +func TestBus2Subscriptions(t *testing.T) { + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + eventType1 := logical.EventType("someType1") + eventType2 := logical.EventType("someType2") + bus.Start() - $ vault delete aws/roles/ops + ch1, cancel1, err := bus.Subscribe(ctx, namespace.RootNamespace, string(eventType1), "") + if err != nil { + t.Fatal(err) + } + defer cancel1() - For a full list of examples and paths, please see the documentation that - corresponds to the secret backend in use. + ch2, cancel2, err := bus.Subscribe(ctx, namespace.RootNamespace, string(eventType2), "") + if err != nil { + t.Fatal(err) + } + defer cancel2() + + event1, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } + event2, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } -` + c.Flags().Help() + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType2, event2) + if err != nil { + t.Error(err) + } + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType1, event1) + if err != nil { + t.Error(err) + } - return strings.TrimSpace(helpText) + timeout := time.After(1 * time.Second) + select { + case message := <-ch1: + if message.Payload.(*logical.EventReceived).Event.Id != event1.Id { + t.Errorf("Got unexpected message: %v", message) + } + case <-timeout: + t.Error("Timeout waiting for event1") + } + select { + case message := <-ch2: + if message.Payload.(*logical.EventReceived).Event.Id != event2.Id { + t.Errorf("Got unexpected message: %v", message) + } + case <-timeout: + t.Error("Timeout waiting for event2") + } } -func (c *DeleteCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) +// TestBusSubscriptionsCancel verifies that canceled subscriptions are cleaned up. +func TestBusSubscriptionsCancel(t *testing.T) { + testCases := []struct { + cancel bool + }{ + {cancel: true}, + {cancel: false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("cancel=%v", tc.cancel), func(t *testing.T) { + subscriptions.Store(0) + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + if !tc.cancel { + // set the timeout very short to make the test faster if we aren't canceling explicitly + bus.SetSendTimeout(100 * time.Millisecond) + } + bus.Start() + + // create and stop a bunch of subscriptions + const create = 100 + const stop = 50 + + eventType := logical.EventType("someType") + + var channels []<-chan *eventlogger.Event + var cancels []context.CancelFunc + stopped := atomic.Int32{} + + received := atomic.Int32{} + + for i := 0; i < create; i++ { + ch, cancelFunc, err := bus.Subscribe(ctx, namespace.RootNamespace, string(eventType), "") + if err != nil { + t.Fatal(err) + } + t.Cleanup(cancelFunc) + channels = append(channels, ch) + cancels = append(cancels, cancelFunc) + + go func(i int32) { + <-ch // always receive one message + received.Add(1) + // continue receiving messages as long as are not stopped + for i < int32(stop) { + <-ch + received.Add(1) + } + if tc.cancel { + cancelFunc() // stop explicitly to unsubscribe + } + stopped.Add(1) + }(int32(i)) + } + + // check that all channels receive a message + event, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType, event) + if err != nil { + t.Error(err) + } + waitFor(t, 1*time.Second, func() bool { return received.Load() == int32(create) }) + waitFor(t, 1*time.Second, func() bool { return stopped.Load() == int32(stop) }) + + // send another message, but half should stop receiving + event, err = logical.NewEvent() + if err != nil { + t.Fatal(err) + } + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, eventType, event) + if err != nil { + t.Error(err) + } + waitFor(t, 1*time.Second, func() bool { return received.Load() == int32(create*2-stop) }) + // the sends should time out and the subscriptions should drop when cancelFunc is called or the context cancels + waitFor(t, 1*time.Second, func() bool { return subscriptions.Load() == int64(create-stop) }) + }) + } } -func (c *DeleteCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFiles() +// waitFor waits for a condition to be true, up to the maximum timeout. +// It waits with a capped exponential backoff starting at 1ms. +// It is guaranteed to try f() at least once. +func waitFor(t *testing.T, maxWait time.Duration, f func() bool) { + t.Helper() + start := time.Now() + + if f() { + return + } + sleepAmount := 1 * time.Millisecond + for time.Now().Sub(start) <= maxWait { + left := time.Now().Sub(start) + sleepAmount = sleepAmount * 2 + if sleepAmount > left { + sleepAmount = left + } + time.Sleep(sleepAmount) + if f() { + return + } + } + t.Error("Timeout waiting for condition") } -func (c *DeleteCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() +// TestBusWildcardSubscriptions tests that a single subscription can receive +// multiple event types using * for glob patterns. +func TestBusWildcardSubscriptions(t *testing.T) { + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + fooEventType := logical.EventType("kv/foo") + barEventType := logical.EventType("kv/bar") + bus.Start() + + ch1, cancel1, err := bus.Subscribe(ctx, namespace.RootNamespace, "kv/*", "") + if err != nil { + t.Fatal(err) + } + defer cancel1() + + ch2, cancel2, err := bus.Subscribe(ctx, namespace.RootNamespace, "*/bar", "") + if err != nil { + t.Fatal(err) + } + defer cancel2() + + event1, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } + event2, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } + + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, barEventType, event2) + if err != nil { + t.Error(err) + } + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, fooEventType, event1) + if err != nil { + t.Error(err) + } + + timeout := time.After(1 * time.Second) + // Expect to receive both events on ch1, which subscribed to kv/* + var ch1Seen []string + for i := 0; i < 2; i++ { + select { + case message := <-ch1: + ch1Seen = append(ch1Seen, message.Payload.(*logical.EventReceived).Event.Id) + case <-timeout: + t.Error("Timeout waiting for event1") + } + } + if len(ch1Seen) != 2 { + t.Errorf("Expected 2 events but got: %v", ch1Seen) + } else { + if !strutil.StrListContains(ch1Seen, event1.Id) { + t.Errorf("Did not find %s event1 ID in ch1seen", event1.Id) + } + if !strutil.StrListContains(ch1Seen, event2.Id) { + t.Errorf("Did not find %s event2 ID in ch1seen", event2.Id) + } + } + // Expect to receive just kv/bar on ch2, which subscribed to */bar + select { + case message := <-ch2: + if message.Payload.(*logical.EventReceived).Event.Id != event2.Id { + t.Errorf("Got unexpected message: %v", message) + } + case <-timeout: + t.Error("Timeout waiting for event2") + } } -func (c *DeleteCommand) Run(args []string) int { - f := c.Flags() +// TestDataPathIsPrependedWithMount tests that "data_path", if present in the +// metadata, is prepended with the plugin's mount. +func TestDataPathIsPrependedWithMount(t *testing.T) { + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + fooEventType := logical.EventType("kv/foo") + bus.Start() - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 + ch, cancel, err := bus.Subscribe(ctx, namespace.RootNamespace, "kv/*", "") + if err != nil { + t.Fatal(err) } + defer cancel() - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected at least 1, got %d)", len(args))) - return 1 + event, err := logical.NewEvent() + if err != nil { + t.Fatal(err) + } + metadata := map[string]string{ + logical.EventMetadataDataPath: "my/secret/path", + "not_touched": "xyz", + } + metadataBytes, err := json.Marshal(metadata) + if err != nil { + t.Fatal(err) + } + event.Metadata = &structpb.Struct{} + if err := event.Metadata.UnmarshalJSON(metadataBytes); err != nil { + t.Fatal(err) } - client, err := c.Client() + // no plugin info means nothing should change + err = bus.SendEventInternal(ctx, namespace.RootNamespace, nil, fooEventType, event) if err != nil { - c.UI.Error(err.Error()) - return 2 + t.Error(err) } - // Pull our fake stdin if needed - stdin := (io.Reader)(os.Stdin) - if c.testStdin != nil { - stdin = c.testStdin + timeout := time.After(1 * time.Second) + select { + case message := <-ch: + metadata := message.Payload.(*logical.EventReceived).Event.Metadata.AsMap() + assert.Contains(t, metadata, "not_touched") + assert.Equal(t, "xyz", metadata["not_touched"]) + assert.Contains(t, metadata, "data_path") + assert.Equal(t, "my/secret/path", metadata["data_path"]) + case <-timeout: + t.Error("Timeout waiting for event") } - path := sanitizePath(args[0]) + // send with a secrets plugin mounted + pluginInfo := logical.EventPluginInfo{ + MountClass: "secrets", + MountAccessor: "kv_abc", + MountPath: "secret/", + Plugin: "kv", + PluginVersion: "v1.13.1+builtin", + Version: "2", + } + err = bus.SendEventInternal(ctx, namespace.RootNamespace, &pluginInfo, fooEventType, event) + if err != nil { + t.Error(err) + } - data, err := parseArgsDataStringLists(stdin, args[1:]) + timeout = time.After(1 * time.Second) + select { + case message := <-ch: + metadata := message.Payload.(*logical.EventReceived).Event.Metadata.AsMap() + assert.Contains(t, metadata, "not_touched") + assert.Equal(t, "xyz", metadata["not_touched"]) + assert.Contains(t, metadata, "data_path") + assert.Equal(t, "secret/my/secret/path", metadata["data_path"]) + case <-timeout: + t.Error("Timeout waiting for event") + } + + // send with an auth plugin mounted + pluginInfo = logical.EventPluginInfo{ + MountClass: "auth", + MountAccessor: "kubernetes_abc", + MountPath: "kubernetes/", + Plugin: "vault-plugin-auth-kubernetes", + PluginVersion: "v1.13.1+builtin", + } + event, err = logical.NewEvent() + if err != nil { + t.Fatal(err) + } + metadata = map[string]string{ + logical.EventMetadataDataPath: "my/secret/path", + "not_touched": "xyz", + } + metadataBytes, err = json.Marshal(metadata) if err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse string list data: %s", err)) - return 1 + t.Fatal(err) + } + event.Metadata = &structpb.Struct{} + if err := event.Metadata.UnmarshalJSON(metadataBytes); err != nil { + t.Fatal(err) } + err = bus.SendEventInternal(ctx, namespace.RootNamespace, &pluginInfo, fooEventType, event) + if err != nil { + t.Error(err) + } + + timeout = time.After(1 * time.Second) + select { + case message := <-ch: + metadata := message.Payload.(*logical.EventReceived).Event.Metadata.AsMap() + assert.Contains(t, metadata, "not_touched") + assert.Equal(t, "xyz", metadata["not_touched"]) + assert.Contains(t, metadata, "data_path") + assert.Equal(t, "auth/kubernetes/my/secret/path", metadata["data_path"]) + case <-timeout: + t.Error("Timeout waiting for event") + } +} - secret, err := client.Logical().DeleteWithData(path, data) +// TestBexpr tests go-bexpr filters are evaluated on an event. +func TestBexpr(t *testing.T) { + bus, err := NewEventBus("", nil) if err != nil { - c.UI.Error(fmt.Sprintf("Error deleting %s: %s", path, err)) - if secret != nil { - OutputSecret(c.UI, secret) - } - return 2 + t.Fatal(err) } + ctx := context.Background() + + bus.Start() - if secret == nil { - // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { - c.UI.Info(fmt.Sprintf("Success! Data deleted (if it existed) at: %s", path)) + sendEvent := func(eventType string) error { + event, err := logical.NewEvent() + if err != nil { + return err + } + metadata := map[string]string{ + logical.EventMetadataDataPath: "my/secret/path", + logical.EventMetadataOperation: "write", + } + metadataBytes, err := json.Marshal(metadata) + if err != nil { + return err + } + event.Metadata = &structpb.Struct{} + if err := event.Metadata.UnmarshalJSON(metadataBytes); err != nil { + return err + } + // send with a secrets plugin mounted + pluginInfo := logical.EventPluginInfo{ + MountClass: "secrets", + MountAccessor: "kv_abc", + MountPath: "secret/", + Plugin: "kv", + PluginVersion: "v1.13.1+builtin", + Version: "2", } - return 0 + return bus.SendEventInternal(ctx, namespace.RootNamespace, &pluginInfo, logical.EventType(eventType), event) + } + + testCases := []struct { + name string + filter string + shouldPassFilter bool + }{ + {"empty expression", "", true}, + {"non-matching expression", "data_path == nothing", false}, + {"matching expression", "data_path == secret/my/secret/path", true}, + {"full matching expression", "data_path == secret/my/secret/path and operation != read and source_plugin_mount == secret/ and source_plugin_mount != somethingelse", true}, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + eventType, err := uuid.GenerateUUID() + if err != nil { + t.Fatal(err) + } + ch, cancel, err := bus.Subscribe(ctx, namespace.RootNamespace, eventType, testCase.filter) + if err != nil { + t.Fatal(err) + } + defer cancel() + err = sendEvent(eventType) + if err != nil { + t.Fatal(err) + } + + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + got := false + select { + case <-ch: + got = true + case <-timer.C: + } + assert.Equal(t, testCase.shouldPassFilter, got) + }) + } +} + +// TestPipelineCleanedUp ensures pipelines are properly cleaned up after +// subscriptions are closed. +func TestPipelineCleanedUp(t *testing.T) { + bus, err := NewEventBus("", nil) + if err != nil { + t.Fatal(err) } - // Handle single field output - if c.flagField != "" { - return PrintRawField(c.UI, secret, c.flagField) + eventType := logical.EventType("someType") + bus.Start() + + _, cancel, err := bus.Subscribe(context.Background(), namespace.RootNamespace, string(eventType), "") + if err != nil { + t.Fatal(err) + } + // check that the filters are set + if !bus.filters.anyMatch(namespace.RootNamespace, eventType) { + t.Fatal() } + if !bus.broker.IsAnyPipelineRegistered(eventTypeAll) { + cancel() + t.Fatal() + } + + cancel() - return OutputSecret(c.UI, secret) + if bus.broker.IsAnyPipelineRegistered(eventTypeAll) { + t.Fatal() + } + + // and that the filters are cleaned up + if bus.filters.anyMatch(namespace.RootNamespace, eventType) { + t.Fatal() + } } diff --git a/command/delete_test.go b/command/delete_test.go index 3c08bc685b0b..2e0bb0f33092 100644 --- a/command/delete_test.go +++ b/command/delete_test.go @@ -1,144 +1,68 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "strings" - "testing" - - "github.com/mitchellh/cli" -) - -func testDeleteCommand(tb testing.TB) (*cli.MockUi, *DeleteCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &DeleteCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} - -func TestDeleteCommand_Run(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - }{ - { - "default", - []string{"secret/foo"}, - "", - 0, - }, - { - "optional_args", - []string{"secret/foo", "bar=baz"}, - "", - 0, - }, - { - "not_enough_args", - []string{}, - "Not enough arguments", - 1, - }, - } - - t.Run("validations", func(t *testing.T) { - t.Parallel() - - for _, tc := range cases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - ui, cmd := testDeleteCommand(t) - cmd.client = client - - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } - }) - - t.Run("integration", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServer(t) - defer closer() - - if _, err := client.Logical().Write("secret/delete/foo", map[string]interface{}{ - "foo": "bar", - }); err != nil { - t.Fatal(err) - } - - ui, cmd := testDeleteCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "secret/delete/foo", - }) - if exp := 0; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Success! Data deleted (if it existed) at: secret/delete/foo" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - - secret, _ := client.Logical().Read("secret/delete/foo") - if secret != nil { - t.Errorf("expected deletion: %#v", secret) - } - }) - - t.Run("communication_failure", func(t *testing.T) { - t.Parallel() - - client, closer := testVaultServerBad(t) - defer closer() - - ui, cmd := testDeleteCommand(t) - cmd.client = client - - code := cmd.Run([]string{ - "secret/delete/foo", - }) - if exp := 2; code != exp { - t.Errorf("expected %d to be %d", code, exp) - } - - expected := "Error deleting secret/delete/foo: " - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, expected) { - t.Errorf("expected %q to contain %q", combined, expected) - } - }) - - t.Run("no_tabs", func(t *testing.T) { - t.Parallel() - - _, cmd := testDeleteCommand(t) - assertNoTabs(t, cmd) - }) -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { resolve } from 'rsvp'; +import { module, test } from 'qunit'; +import { setupTest } from 'ember-qunit'; + +module('Unit | Adapter | capabilities', function (hooks) { + setupTest(hooks); + + test('calls the correct url', function (assert) { + let url, method, options; + const adapter = this.owner.factoryFor('adapter:capabilities').create({ + ajax: (...args) => { + [url, method, options] = args; + return resolve(); + }, + }); + + adapter.findRecord(null, 'capabilities', 'foo'); + assert.strictEqual(url, '/v1/sys/capabilities-self', 'calls the correct URL'); + assert.deepEqual({ paths: ['foo'] }, options.data, 'data params OK'); + assert.strictEqual(method, 'POST', 'method OK'); + }); + + test('enterprise calls the correct url within namespace when userRoot = root', function (assert) { + const namespaceSvc = this.owner.lookup('service:namespace'); + namespaceSvc.setNamespace('admin'); + + let url, method, options; + const adapter = this.owner.factoryFor('adapter:capabilities').create({ + ajax: (...args) => { + [url, method, options] = args; + return resolve(); + }, + }); + + adapter.findRecord(null, 'capabilities', 'foo'); + assert.strictEqual(url, '/v1/sys/capabilities-self', 'calls the correct URL'); + assert.deepEqual({ paths: ['admin/foo'] }, options.data, 'data params prefix paths with namespace'); + assert.strictEqual(options.namespace, '', 'sent with root namespace'); + assert.strictEqual(method, 'POST', 'method OK'); + }); + + test('enterprise calls the correct url within namespace when userRoot is not root', function (assert) { + const namespaceSvc = this.owner.lookup('service:namespace'); + namespaceSvc.setNamespace('admin/bar/baz'); + namespaceSvc.reopen({ + userRootNamespace: 'admin/bar', + }); + + let url, method, options; + const adapter = this.owner.factoryFor('adapter:capabilities').create({ + ajax: (...args) => { + [url, method, options] = args; + return resolve(); + }, + }); + + adapter.findRecord(null, 'capabilities', 'foo'); + assert.strictEqual(url, '/v1/sys/capabilities-self', 'calls the correct URL'); + assert.deepEqual({ paths: ['baz/foo'] }, options.data, 'data params prefix path with relative namespace'); + assert.strictEqual(options.namespace, 'admin/bar', 'sent with root namespace'); + assert.strictEqual(method, 'POST', 'method OK'); + }); +}); diff --git a/command/events.go b/command/events.go index acc68b117f9c..8eee53c3e27b 100644 --- a/command/events.go +++ b/command/events.go @@ -1,197 +1,48 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "context" - "errors" - "fmt" - "net/http" - "os" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" - "nhooyr.io/websocket" -) - -var ( - _ cli.Command = (*EventsSubscribeCommands)(nil) - _ cli.CommandAutocomplete = (*EventsSubscribeCommands)(nil) -) - -type EventsSubscribeCommands struct { - *BaseCommand - - namespaces []string - bexprFilter string -} - -func (c *EventsSubscribeCommands) Synopsis() string { - return "Subscribe to events" -} - -func (c *EventsSubscribeCommands) Help() string { - helpText := ` -Usage: vault events subscribe [-namespaces=ns1] [-timeout=XYZs] [-filter=filterExpression] eventType - - Subscribe to events of the given event type (topic), which may be a glob - pattern (with "*" treated as a wildcard). The events will be sent to - standard out. - - The output will be a JSON object serialized using the default protobuf - JSON serialization format, with one line per event received. -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *EventsSubscribeCommands) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) - f := set.NewFlagSet("Subscribe Options") - f.StringVar(&StringVar{ - Name: "filter", - Usage: `A boolean expression to use to filter events. Only events matching - the filter will be subscribed to. This is applied after any filtering - by event type or namespace.`, - Default: "", - Target: &c.bexprFilter, - }) - f.StringSliceVar(&StringSliceVar{ - Name: "namespaces", - Usage: `Specifies one or more patterns of additional child namespaces - to subscribe to. The namespace of the request is automatically - prepended, so specifying 'ns2' when the request is in the 'ns1' - namespace will result in subscribing to 'ns1/ns2', in addition to - 'ns1'. Patterns can include "*" characters to indicate - wildcards. The default is to subscribe only to the request's - namespace.`, - Default: []string{}, - Target: &c.namespaces, - }) - return set -} - -func (c *EventsSubscribeCommands) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *EventsSubscribeCommands) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *EventsSubscribeCommands) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - err = c.subscribeRequest(client, "sys/events/subscribe/"+args[0]) - if err != nil { - c.UI.Error(err.Error()) - return 1 - } - return 0 -} - -// cleanNamespace removes leading and trailing space and /'s from the namespace path. -func cleanNamespace(ns string) string { - ns = strings.TrimSpace(ns) - ns = strings.Trim(ns, "/") - return ns -} - -// cleanNamespaces removes leading and trailing space and /'s from the namespace paths. -func cleanNamespaces(namespaces []string) []string { - cleaned := make([]string, len(namespaces)) - for i, ns := range namespaces { - cleaned[i] = cleanNamespace(ns) - } - return cleaned -} - -func (c *EventsSubscribeCommands) subscribeRequest(client *api.Client, path string) error { - r := client.NewRequest("GET", "/v1/"+path) - u := r.URL - if u.Scheme == "http" { - u.Scheme = "ws" - } else { - u.Scheme = "wss" - } - q := u.Query() - q.Set("json", "true") - if len(c.namespaces) > 0 { - q["namespaces"] = cleanNamespaces(c.namespaces) - } - bexprFilter := strings.TrimSpace(c.bexprFilter) - if bexprFilter != "" { - q.Set("filter", bexprFilter) - } - u.RawQuery = q.Encode() - client.AddHeader("X-Vault-Token", client.Token()) - client.AddHeader("X-Vault-Namespace", client.Namespace()) - ctx := context.Background() - - // Follow redirects in case our request if our request is forwarded to the leader. - url := u.String() - var conn *websocket.Conn - var err error - for attempt := 0; attempt < 10; attempt++ { - var resp *http.Response - conn, resp, err = websocket.Dial(ctx, url, &websocket.DialOptions{ - HTTPClient: client.CloneConfig().HttpClient, - HTTPHeader: client.Headers(), - }) - - if err == nil { - break - } - - switch { - case resp == nil: - return err - case resp.StatusCode == http.StatusTemporaryRedirect: - url = resp.Header.Get("Location") - continue - case resp.StatusCode == http.StatusNotFound: - return errors.New("events endpoint not found; check `vault read sys/experiments` to see if an events experiment is available but disabled") - default: - return err - } - } - - if conn == nil { - return fmt.Errorf("too many redirects") - } - defer conn.Close(websocket.StatusNormalClosure, "") - - for { - _, message, err := conn.Read(ctx) - if err != nil { - return err - } - _, err = os.Stdout.Write(message) - if err != nil { - return err - } - } -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import AdapterError from '@ember-data/adapter/error'; +import { set } from '@ember/object'; +import ApplicationAdapter from './application'; +import { sanitizePath } from 'core/utils/sanitize-path'; + +export default ApplicationAdapter.extend({ + pathForType() { + return 'capabilities-self'; + }, + + formatPaths(path) { + const { relativeNamespace } = this.namespaceService; + if (!relativeNamespace) { + return [path]; + } + // ensure original path doesn't have leading slash + return [`${relativeNamespace}/${path.replace(/^\//, '')}`]; + }, + + findRecord(store, type, id) { + const paths = this.formatPaths(id); + return this.ajax(this.buildURL(type), 'POST', { + data: { paths }, + namespace: sanitizePath(this.namespaceService.userRootNamespace), + }).catch((e) => { + if (e instanceof AdapterError) { + set(e, 'policyPath', 'sys/capabilities-self'); + } + throw e; + }); + }, + + queryRecord(store, type, query) { + const { id } = query; + if (!id) { + return; + } + return this.findRecord(store, type, id).then((resp) => { + resp.path = id; + return resp; + }); + }, +}); diff --git a/command/events_test.go b/command/events_test.go index 336dc0a34225..3d16b68cff8d 100644 --- a/command/events_test.go +++ b/command/events_test.go @@ -1,71 +1,65 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ -package command +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import { rootPem } from 'vault/tests/helpers/pki/values'; +import { rootDer } from 'vault/tests/helpers/pki/values'; -import ( - "strings" - "testing" +const SELECTORS = { + label: '[data-test-certificate-label]', + value: '[data-test-certificate-value]', + icon: '[data-test-certificate-icon]', + copyButton: '[data-test-copy-button]', + copyIcon: '[data-test-icon="clipboard-copy"]', +}; - "github.com/mitchellh/cli" -) +module('Integration | Component | certificate-card', function (hooks) { + setupRenderingTest(hooks); -func testEventsSubscribeCommand(tb testing.TB) (*cli.MockUi, *EventsSubscribeCommands) { - tb.Helper() + test('it renders', async function (assert) { + await render(hbs``); - ui := cli.NewMockUi() - return ui, &EventsSubscribeCommands{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} + assert.dom(SELECTORS.label).hasNoText('There is no label because there is no value'); + assert.dom(SELECTORS.value).hasNoText('There is no value because none was provided'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyIcon).exists('The copy icon renders'); + }); -// TestEventsSubscribeCommand_Run tests that the command argument parsing is working as expected. -func TestEventsSubscribeCommand_Run(t *testing.T) { - t.Parallel() + test('it renders with an example PEM Certificate', async function (assert) { + const certificate = rootPem; + this.set('certificate', certificate); + await render(hbs``); - cases := []struct { - name string - args []string - out string - code int - }{ - { - "not_enough_args", - []string{}, - "Not enough arguments", - 1, - }, - { - "too_many_args", - []string{"foo", "bar"}, - "Too many arguments", - 1, - }, - } + assert.dom(SELECTORS.label).hasText('PEM Format', 'The label text is PEM Format'); + assert.dom(SELECTORS.value).hasText(certificate, 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + }); - for _, tc := range cases { - tc := tc + test('it renders with an example DER Certificate', async function (assert) { + const certificate = rootDer; + this.set('certificate', certificate); + await render(hbs``); - t.Run(tc.name, func(t *testing.T) { - t.Parallel() + assert.dom(SELECTORS.label).hasText('DER Format', 'The label text is DER Format'); + assert.dom(SELECTORS.value).hasText(certificate, 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + }); - client, closer := testVaultServer(t) - defer closer() + test('it renders with the PEM Format label regardless of the value provided when @isPem is true', async function (assert) { + const certificate = 'example-certificate-text'; + this.set('certificate', certificate); + await render(hbs``); - ui, cmd := testEventsSubscribeCommand(t) - cmd.client = client - - code := cmd.Run(tc.args) - if code != tc.code { - t.Errorf("expected %d to be %d", code, tc.code) - } - - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, tc.out) { - t.Errorf("expected %q to contain %q", combined, tc.out) - } - }) - } -} + assert.dom(SELECTORS.label).hasText('PEM Format', 'The label text is PEM Format'); + assert.dom(SELECTORS.value).hasText(certificate, 'The data rendered is correct'); + assert.dom(SELECTORS.icon).exists('The certificate icon exists'); + assert.dom(SELECTORS.copyButton).exists('The copy button exists'); + }); +}); diff --git a/command/format.go b/command/format.go index 4b12771d98d1..044067b817e1 100644 --- a/command/format.go +++ b/command/format.go @@ -1,726 +1,32 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "os" - "sort" - "strings" - "time" - - "github.com/ghodss/yaml" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/ryanuber/columnize" -) - -const ( - // hopeDelim is the delimiter to use when splitting columns. We call it a - // hopeDelim because we hope that it's never contained in a secret. - hopeDelim = "♨" -) - -type FormatOptions struct { - Format string -} - -func OutputSecret(ui cli.Ui, secret *api.Secret) int { - return outputWithFormat(ui, secret, secret) -} - -func OutputList(ui cli.Ui, data interface{}) int { - switch data := data.(type) { - case *api.Secret: - secret := data - return outputWithFormat(ui, secret, secret.Data["keys"]) - default: - return outputWithFormat(ui, nil, data) - } -} - -func OutputData(ui cli.Ui, data interface{}) int { - return outputWithFormat(ui, nil, data) -} - -func outputWithFormat(ui cli.Ui, secret *api.Secret, data interface{}) int { - format := Format(ui) - formatter, ok := Formatters[format] - if !ok { - ui.Error(fmt.Sprintf("Invalid output format: %s", format)) - return 1 - } - - if err := formatter.Output(ui, secret, data); err != nil { - ui.Error(fmt.Sprintf("Could not parse output: %s", err.Error())) - return 1 - } - return 0 -} - -type Formatter interface { - Output(ui cli.Ui, secret *api.Secret, data interface{}) error - Format(data interface{}) ([]byte, error) -} - -var Formatters = map[string]Formatter{ - "json": JsonFormatter{}, - "table": TableFormatter{}, - "yaml": YamlFormatter{}, - "yml": YamlFormatter{}, - "pretty": PrettyFormatter{}, - "raw": RawFormatter{}, -} - -func Format(ui cli.Ui) string { - switch ui := ui.(type) { - case *VaultUI: - return ui.format - } - - format := os.Getenv(EnvVaultFormat) - if format == "" { - format = "table" - } - - return format -} - -func Detailed(ui cli.Ui) bool { - switch ui := ui.(type) { - case *VaultUI: - return ui.detailed - } - - return false -} - -// An output formatter for json output of an object -type JsonFormatter struct{} - -func (j JsonFormatter) Format(data interface{}) ([]byte, error) { - return json.MarshalIndent(data, "", " ") -} - -func (j JsonFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { - b, err := j.Format(data) - if err != nil { - return err - } - - if secret != nil { - shouldListWithInfo := Detailed(ui) - - // Show the raw JSON of the LIST call, rather than only the - // list of keys. - if shouldListWithInfo { - b, err = j.Format(secret) - if err != nil { - return err - } - } - } - - ui.Output(string(b)) - return nil -} - -// An output formatter for raw output of the original request object -type RawFormatter struct{} - -func (r RawFormatter) Format(data interface{}) ([]byte, error) { - byte_data, ok := data.([]byte) - if !ok { - return nil, fmt.Errorf("This command does not support the -format=raw option; only `vault read` does.") - } - - return byte_data, nil -} - -func (r RawFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { - b, err := r.Format(data) - if err != nil { - return err - } - ui.Output(string(b)) - return nil -} - -// An output formatter for yaml output format of an object -type YamlFormatter struct{} - -func (y YamlFormatter) Format(data interface{}) ([]byte, error) { - return yaml.Marshal(data) -} - -func (y YamlFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { - b, err := y.Format(data) - if err == nil { - ui.Output(strings.TrimSpace(string(b))) - } - return err -} - -type PrettyFormatter struct{} - -func (p PrettyFormatter) Format(data interface{}) ([]byte, error) { - return nil, nil -} - -func (p PrettyFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { - switch data.(type) { - case *api.AutopilotState: - p.OutputAutopilotState(ui, data) - default: - return errors.New("cannot use the pretty formatter for this type") - } - return nil -} - -func outputStringSlice(buffer *bytes.Buffer, indent string, values []string) { - for _, val := range values { - buffer.WriteString(fmt.Sprintf("%s%s\n", indent, val)) - } -} - -type mapOutput struct { - key string - value string -} - -func formatServer(srv *api.AutopilotServer) string { - var buffer bytes.Buffer - - buffer.WriteString(fmt.Sprintf(" %s\n", srv.ID)) - buffer.WriteString(fmt.Sprintf(" Name: %s\n", srv.Name)) - buffer.WriteString(fmt.Sprintf(" Address: %s\n", srv.Address)) - buffer.WriteString(fmt.Sprintf(" Status: %s\n", srv.Status)) - buffer.WriteString(fmt.Sprintf(" Node Status: %s\n", srv.NodeStatus)) - buffer.WriteString(fmt.Sprintf(" Healthy: %t\n", srv.Healthy)) - buffer.WriteString(fmt.Sprintf(" Last Contact: %s\n", srv.LastContact)) - buffer.WriteString(fmt.Sprintf(" Last Term: %d\n", srv.LastTerm)) - buffer.WriteString(fmt.Sprintf(" Last Index: %d\n", srv.LastIndex)) - buffer.WriteString(fmt.Sprintf(" Version: %s\n", srv.Version)) - - if srv.UpgradeVersion != "" { - buffer.WriteString(fmt.Sprintf(" Upgrade Version: %s\n", srv.UpgradeVersion)) - } - if srv.RedundancyZone != "" { - buffer.WriteString(fmt.Sprintf(" Redundancy Zone: %s\n", srv.RedundancyZone)) - } - if srv.NodeType != "" { - buffer.WriteString(fmt.Sprintf(" Node Type: %s\n", srv.NodeType)) - } - - return buffer.String() -} - -func (p PrettyFormatter) OutputAutopilotState(ui cli.Ui, data interface{}) { - state := data.(*api.AutopilotState) - - var buffer bytes.Buffer - buffer.WriteString(fmt.Sprintf("Healthy: %t\n", state.Healthy)) - buffer.WriteString(fmt.Sprintf("Failure Tolerance: %d\n", state.FailureTolerance)) - buffer.WriteString(fmt.Sprintf("Leader: %s\n", state.Leader)) - buffer.WriteString("Voters:\n") - outputStringSlice(&buffer, " ", state.Voters) - - if len(state.NonVoters) > 0 { - buffer.WriteString("Non Voters:\n") - outputStringSlice(&buffer, " ", state.NonVoters) - } - - if state.OptimisticFailureTolerance > 0 { - buffer.WriteString(fmt.Sprintf("Optimistic Failure Tolerance: %d\n", state.OptimisticFailureTolerance)) - } - - // Servers - buffer.WriteString("Servers:\n") - var outputs []mapOutput - for id, srv := range state.Servers { - outputs = append(outputs, mapOutput{key: id, value: formatServer(srv)}) - } - sort.Slice(outputs, func(i, j int) bool { - return outputs[i].key < outputs[j].key - }) - for _, output := range outputs { - buffer.WriteString(output.value) - } - - // Redundancy Zones - if len(state.RedundancyZones) > 0 { - buffer.WriteString("Redundancy Zones:\n") - zoneList := make([]string, 0, len(state.RedundancyZones)) - for z := range state.RedundancyZones { - zoneList = append(zoneList, z) - } - sort.Strings(zoneList) - for _, zoneName := range zoneList { - zone := state.RedundancyZones[zoneName] - servers := zone.Servers - voters := zone.Voters - sort.Strings(servers) - sort.Strings(voters) - buffer.WriteString(fmt.Sprintf(" %s\n", zoneName)) - buffer.WriteString(fmt.Sprintf(" Servers: %s\n", strings.Join(servers, ", "))) - buffer.WriteString(fmt.Sprintf(" Voters: %s\n", strings.Join(voters, ", "))) - buffer.WriteString(fmt.Sprintf(" Failure Tolerance: %d\n", zone.FailureTolerance)) - } - } - - // Upgrade Info - if state.Upgrade != nil { - buffer.WriteString("Upgrade Info:\n") - buffer.WriteString(fmt.Sprintf(" Status: %s\n", state.Upgrade.Status)) - buffer.WriteString(fmt.Sprintf(" Target Version: %s\n", state.Upgrade.TargetVersion)) - buffer.WriteString(fmt.Sprintf(" Target Version Voters: %s\n", strings.Join(state.Upgrade.TargetVersionVoters, ", "))) - buffer.WriteString(fmt.Sprintf(" Target Version Non-Voters: %s\n", strings.Join(state.Upgrade.TargetVersionNonVoters, ", "))) - buffer.WriteString(fmt.Sprintf(" Other Version Voters: %s\n", strings.Join(state.Upgrade.OtherVersionVoters, ", "))) - buffer.WriteString(fmt.Sprintf(" Other Version Non-Voters: %s\n", strings.Join(state.Upgrade.OtherVersionNonVoters, ", "))) - - if len(state.Upgrade.RedundancyZones) > 0 { - buffer.WriteString(" Redundancy Zones:\n") - for zoneName, zoneVersion := range state.Upgrade.RedundancyZones { - buffer.WriteString(fmt.Sprintf(" %s\n", zoneName)) - buffer.WriteString(fmt.Sprintf(" Target Version Voters: %s\n", strings.Join(zoneVersion.TargetVersionVoters, ", "))) - buffer.WriteString(fmt.Sprintf(" Target Version Non-Voters: %s\n", strings.Join(zoneVersion.TargetVersionNonVoters, ", "))) - buffer.WriteString(fmt.Sprintf(" Other Version Voters: %s\n", strings.Join(zoneVersion.OtherVersionVoters, ", "))) - buffer.WriteString(fmt.Sprintf(" Other Version Non-Voters: %s\n", strings.Join(zoneVersion.OtherVersionNonVoters, ", "))) - } - } - } - - ui.Output(buffer.String()) -} - -// An output formatter for table output of an object -type TableFormatter struct{} - -// We don't use this due to the TableFormatter introducing a bug when the -field flag is supplied: -// https://github.com/hashicorp/vault/commit/b24cf9a8af2190e96c614205b8cdf06d8c4b6718 . -func (t TableFormatter) Format(data interface{}) ([]byte, error) { - return nil, nil -} - -func (t TableFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{}) error { - switch data := data.(type) { - case *api.Secret: - return t.OutputSecret(ui, secret) - case []interface{}: - return t.OutputList(ui, secret, data) - case []string: - return t.OutputList(ui, nil, data) - case map[string]interface{}: - return t.OutputMap(ui, data) - case SealStatusOutput: - return t.OutputSealStatusStruct(ui, nil, data) - default: - return errors.New("cannot use the table formatter for this type") - } -} - -func (t TableFormatter) OutputSealStatusStruct(ui cli.Ui, secret *api.Secret, data interface{}) error { - var status SealStatusOutput = data.(SealStatusOutput) - var sealPrefix string - - out := []string{} - out = append(out, "Key | Value") - out = append(out, fmt.Sprintf("Seal Type | %s", status.Type)) - if status.RecoverySeal { - sealPrefix = "Recovery " - out = append(out, fmt.Sprintf("Recovery Seal Type | %s", status.RecoverySealType)) - } - out = append(out, fmt.Sprintf("Initialized | %t", status.Initialized)) - out = append(out, fmt.Sprintf("Sealed | %t", status.Sealed)) - out = append(out, fmt.Sprintf("Total %sShares | %d", sealPrefix, status.N)) - out = append(out, fmt.Sprintf("Threshold | %d", status.T)) - - if status.Sealed { - out = append(out, fmt.Sprintf("Unseal Progress | %d/%d", status.Progress, status.T)) - out = append(out, fmt.Sprintf("Unseal Nonce | %s", status.Nonce)) - } - - if status.Migration { - out = append(out, fmt.Sprintf("Seal Migration in Progress | %t", status.Migration)) - } - - out = append(out, fmt.Sprintf("Version | %s", status.Version)) - out = append(out, fmt.Sprintf("Build Date | %s", status.BuildDate)) - out = append(out, fmt.Sprintf("Storage Type | %s", status.StorageType)) - - if status.ClusterName != "" && status.ClusterID != "" { - out = append(out, fmt.Sprintf("Cluster Name | %s", status.ClusterName)) - out = append(out, fmt.Sprintf("Cluster ID | %s", status.ClusterID)) - } - - // Output if HCP link is configured - if status.HCPLinkStatus != "" { - out = append(out, fmt.Sprintf("HCP Link Status | %s", status.HCPLinkStatus)) - out = append(out, fmt.Sprintf("HCP Link Resource ID | %s", status.HCPLinkResourceID)) - } - - // Output if HA is enabled - out = append(out, fmt.Sprintf("HA Enabled | %t", status.HAEnabled)) - - if status.HAEnabled { - mode := "sealed" - if !status.Sealed { - out = append(out, fmt.Sprintf("HA Cluster | %s", status.LeaderClusterAddress)) - mode = "standby" - showLeaderAddr := false - if status.IsSelf { - mode = "active" - } else { - if status.LeaderAddress == "" { - status.LeaderAddress = "" - } - showLeaderAddr = true - } - out = append(out, fmt.Sprintf("HA Mode | %s", mode)) - - if status.IsSelf && !status.ActiveTime.IsZero() { - out = append(out, fmt.Sprintf("Active Since | %s", status.ActiveTime.Format(time.RFC3339Nano))) - } - // This is down here just to keep ordering consistent - if showLeaderAddr { - out = append(out, fmt.Sprintf("Active Node Address | %s", status.LeaderAddress)) - } - - if status.PerfStandby { - out = append(out, fmt.Sprintf("Performance Standby Node | %t", status.PerfStandby)) - out = append(out, fmt.Sprintf("Performance Standby Last Remote WAL | %d", status.PerfStandbyLastRemoteWAL)) - } - } - } - - if status.RaftCommittedIndex > 0 { - out = append(out, fmt.Sprintf("Raft Committed Index | %d", status.RaftCommittedIndex)) - } - if status.RaftAppliedIndex > 0 { - out = append(out, fmt.Sprintf("Raft Applied Index | %d", status.RaftAppliedIndex)) - } - if status.LastWAL != 0 { - out = append(out, fmt.Sprintf("Last WAL | %d", status.LastWAL)) - } - if len(status.Warnings) > 0 { - out = append(out, fmt.Sprintf("Warnings | %v", status.Warnings)) - } - - ui.Output(tableOutput(out, &columnize.Config{ - Delim: "|", - })) - return nil -} - -func (t TableFormatter) OutputList(ui cli.Ui, secret *api.Secret, data interface{}) error { - t.printWarnings(ui, secret) - - // Determine if we have additional information from a ListResponseWithInfo endpoint. - var additionalInfo map[string]interface{} - if secret != nil { - shouldListWithInfo := Detailed(ui) - if additional, ok := secret.Data["key_info"]; shouldListWithInfo && ok && len(additional.(map[string]interface{})) > 0 { - additionalInfo = additional.(map[string]interface{}) - } - } - - switch data := data.(type) { - case []interface{}: - case []string: - ui.Output(tableOutput(data, nil)) - return nil - default: - return errors.New("error: table formatter cannot output list for this data type") - } - - list := data.([]interface{}) - - if len(list) > 0 { - keys := make([]string, len(list)) - for i, v := range list { - typed, ok := v.(string) - if !ok { - return fmt.Errorf("%v is not a string", v) - } - keys[i] = typed - } - sort.Strings(keys) - - // If we have a ListResponseWithInfo endpoint, we'll need to show - // additional headers. To satisfy the table outputter, we'll need - // to concat them with the deliminator. - var headers []string - header := "Keys" - if len(additionalInfo) > 0 { - seenHeaders := make(map[string]bool) - for key, rawValues := range additionalInfo { - // Most endpoints use the well-behaved ListResponseWithInfo. - // However, some use a hand-rolled equivalent, where the - // returned "keys" doesn't match the key of the "key_info" - // member (namely, /sys/policies/egp). We seek to exclude - // headers only visible from "non-visitable" key_info rows, - // to make table output less confusing. These non-visitable - // rows will still be visible in the JSON output. - index := sort.SearchStrings(keys, key) - if index < len(keys) && keys[index] != key { - continue - } - - values := rawValues.(map[string]interface{}) - for key := range values { - seenHeaders[key] = true - } - } - - for key := range seenHeaders { - headers = append(headers, key) - } - sort.Strings(headers) - - header = header + hopeDelim + strings.Join(headers, hopeDelim) - } - - // Finally, if we have a ListResponseWithInfo, we'll need to update - // the returned rows to not just have the keys (in the sorted order), - // but also have the values for each header (in their sorted order). - rows := keys - if len(additionalInfo) > 0 && len(headers) > 0 { - for index, row := range rows { - formatted := []string{row} - if rawValues, ok := additionalInfo[row]; ok { - values := rawValues.(map[string]interface{}) - for _, header := range headers { - if rawValue, ok := values[header]; ok { - if looksLikeDuration(header) { - rawValue = humanDurationInt(rawValue) - } - - formatted = append(formatted, fmt.Sprintf("%v", rawValue)) - } else { - // Show a default empty n/a when this field is - // missing from the additional information. - formatted = append(formatted, "n/a") - } - } - } - - rows[index] = strings.Join(formatted, hopeDelim) - } - } - - // Prepend the header to the formatted rows. - output := append([]string{header}, rows...) - ui.Output(tableOutput(output, &columnize.Config{ - Delim: hopeDelim, - })) - } - - return nil -} - -// printWarnings prints any warnings in the secret. -func (t TableFormatter) printWarnings(ui cli.Ui, secret *api.Secret) { - if secret != nil && len(secret.Warnings) > 0 { - ui.Warn("WARNING! The following warnings were returned from Vault:\n") - for _, warning := range secret.Warnings { - ui.Warn(wrapAtLengthWithPadding(fmt.Sprintf("* %s", warning), 2)) - ui.Warn("") - } - } -} - -func (t TableFormatter) OutputSecret(ui cli.Ui, secret *api.Secret) error { - if secret == nil { - return nil - } - - t.printWarnings(ui, secret) - - out := make([]string, 0, 8) - if secret.LeaseDuration > 0 { - if secret.LeaseID != "" { - out = append(out, fmt.Sprintf("lease_id %s %s", hopeDelim, secret.LeaseID)) - out = append(out, fmt.Sprintf("lease_duration %s %v", hopeDelim, humanDurationInt(secret.LeaseDuration))) - out = append(out, fmt.Sprintf("lease_renewable %s %t", hopeDelim, secret.Renewable)) - } else { - // This is probably the generic secret backend which has leases, but we - // print them as refresh_interval to reduce confusion. - out = append(out, fmt.Sprintf("refresh_interval %s %v", hopeDelim, humanDurationInt(secret.LeaseDuration))) - } - } - - if secret.Auth != nil { - if secret.Auth.MFARequirement != nil { - out = append(out, fmt.Sprintf("mfa_request_id %s %s", hopeDelim, secret.Auth.MFARequirement.MFARequestID)) - - for k, constraintSet := range secret.Auth.MFARequirement.MFAConstraints { - for _, constraint := range constraintSet.Any { - out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_id %s %s", k, constraint.Type, hopeDelim, constraint.ID)) - out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_uses_passcode %s %t", k, constraint.Type, hopeDelim, constraint.UsesPasscode)) - if constraint.Name != "" { - out = append(out, fmt.Sprintf("mfa_constraint_%s_%s_name %s %s", k, constraint.Type, hopeDelim, constraint.Name)) - } - } - } - } else { // Token information only makes sense if no further MFA requirement (i.e. if we actually have a token) - out = append(out, fmt.Sprintf("token %s %s", hopeDelim, secret.Auth.ClientToken)) - out = append(out, fmt.Sprintf("token_accessor %s %s", hopeDelim, secret.Auth.Accessor)) - // If the lease duration is 0, it's likely a root token, so output the - // duration as "infinity" to clear things up. - if secret.Auth.LeaseDuration == 0 { - out = append(out, fmt.Sprintf("token_duration %s %s", hopeDelim, "∞")) - } else { - out = append(out, fmt.Sprintf("token_duration %s %v", hopeDelim, humanDurationInt(secret.Auth.LeaseDuration))) - } - out = append(out, fmt.Sprintf("token_renewable %s %t", hopeDelim, secret.Auth.Renewable)) - out = append(out, fmt.Sprintf("token_policies %s %q", hopeDelim, secret.Auth.TokenPolicies)) - out = append(out, fmt.Sprintf("identity_policies %s %q", hopeDelim, secret.Auth.IdentityPolicies)) - out = append(out, fmt.Sprintf("policies %s %q", hopeDelim, secret.Auth.Policies)) - for k, v := range secret.Auth.Metadata { - out = append(out, fmt.Sprintf("token_meta_%s %s %v", k, hopeDelim, v)) - } - } - } - - if secret.WrapInfo != nil { - out = append(out, fmt.Sprintf("wrapping_token: %s %s", hopeDelim, secret.WrapInfo.Token)) - out = append(out, fmt.Sprintf("wrapping_accessor: %s %s", hopeDelim, secret.WrapInfo.Accessor)) - out = append(out, fmt.Sprintf("wrapping_token_ttl: %s %v", hopeDelim, humanDurationInt(secret.WrapInfo.TTL))) - out = append(out, fmt.Sprintf("wrapping_token_creation_time: %s %s", hopeDelim, secret.WrapInfo.CreationTime.String())) - out = append(out, fmt.Sprintf("wrapping_token_creation_path: %s %s", hopeDelim, secret.WrapInfo.CreationPath)) - if secret.WrapInfo.WrappedAccessor != "" { - out = append(out, fmt.Sprintf("wrapped_accessor: %s %s", hopeDelim, secret.WrapInfo.WrappedAccessor)) - } - } - - if len(secret.Data) > 0 { - keys := make([]string, 0, len(secret.Data)) - for k := range secret.Data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := secret.Data[k] - - // If the field "looks" like a TTL, print it as a time duration instead. - if looksLikeDuration(k) { - v = humanDurationInt(v) - } - - out = append(out, fmt.Sprintf("%s %s %v", k, hopeDelim, v)) - } - } - - // If we got this far and still don't have any data, there's nothing to print, - // sorry. - if len(out) == 0 { - return nil - } - - // Prepend the header - out = append([]string{"Key" + hopeDelim + "Value"}, out...) - - ui.Output(tableOutput(out, &columnize.Config{ - Delim: hopeDelim, - })) - return nil -} - -func (t TableFormatter) OutputMap(ui cli.Ui, data map[string]interface{}) error { - out := make([]string, 0, len(data)+1) - if len(data) > 0 { - keys := make([]string, 0, len(data)) - for k := range data { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - v := data[k] - - // If the field "looks" like a TTL, print it as a time duration instead. - if looksLikeDuration(k) { - v = humanDurationInt(v) - } - - out = append(out, fmt.Sprintf("%s %s %v", k, hopeDelim, v)) - } - } - - // If we got this far and still don't have any data, there's nothing to print, - // sorry. - if len(out) == 0 { - return nil - } - - // Prepend the header - out = append([]string{"Key" + hopeDelim + "Value"}, out...) - - ui.Output(tableOutput(out, &columnize.Config{ - Delim: hopeDelim, - })) - return nil -} - -// OutputSealStatus will print *api.SealStatusResponse in the CLI according to the format provided -func OutputSealStatus(ui cli.Ui, client *api.Client, status *api.SealStatusResponse) int { - sealStatusOutput := SealStatusOutput{SealStatusResponse: *status} - - // Mask the 'Vault is sealed' error, since this means HA is enabled, but that - // we cannot query for the leader since we are sealed. - leaderStatus, err := client.Sys().Leader() - if err != nil && strings.Contains(err.Error(), "Vault is sealed") { - leaderStatus = &api.LeaderResponse{HAEnabled: true} - err = nil - } - if err != nil { - ui.Error(fmt.Sprintf("Error checking leader status: %s", err)) - return 1 - } - - // copy leaderStatus fields into sealStatusOutput for display later - sealStatusOutput.HAEnabled = leaderStatus.HAEnabled - sealStatusOutput.IsSelf = leaderStatus.IsSelf - sealStatusOutput.ActiveTime = leaderStatus.ActiveTime - sealStatusOutput.LeaderAddress = leaderStatus.LeaderAddress - sealStatusOutput.LeaderClusterAddress = leaderStatus.LeaderClusterAddress - sealStatusOutput.PerfStandby = leaderStatus.PerfStandby - sealStatusOutput.PerfStandbyLastRemoteWAL = leaderStatus.PerfStandbyLastRemoteWAL - sealStatusOutput.LastWAL = leaderStatus.LastWAL - sealStatusOutput.RaftCommittedIndex = leaderStatus.RaftCommittedIndex - sealStatusOutput.RaftAppliedIndex = leaderStatus.RaftAppliedIndex - OutputData(ui, sealStatusOutput) - return 0 -} - -// looksLikeDuration checks if the given key "k" looks like a duration value. -// This is used to pretty-format duration values in responses, especially from -// plugins. -func looksLikeDuration(k string) bool { - return k == "period" || strings.HasSuffix(k, "_period") || - k == "ttl" || strings.HasSuffix(k, "_ttl") || - k == "duration" || strings.HasSuffix(k, "_duration") || - k == "lease_max" || k == "ttl_max" -} - -// This struct is responsible for capturing all the fields to be output by a -// vault status command, including fields that do not come from the status API. -// Currently we are adding the fields from api.LeaderResponse -type SealStatusOutput struct { - api.SealStatusResponse - HAEnabled bool `json:"ha_enabled"` - IsSelf bool `json:"is_self,omitempty"` - ActiveTime time.Time `json:"active_time,omitempty"` - LeaderAddress string `json:"leader_address,omitempty"` - LeaderClusterAddress string `json:"leader_cluster_address,omitempty"` - PerfStandby bool `json:"performance_standby,omitempty"` - PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal,omitempty"` - LastWAL uint64 `json:"last_wal,omitempty"` - RaftCommittedIndex uint64 `json:"raft_committed_index,omitempty"` - RaftAppliedIndex uint64 `json:"raft_applied_index,omitempty"` -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + + + + +
+

+ {{this.format}} +

+ + {{@data}} + +
+
+ +
+
\ No newline at end of file diff --git a/command/kv.go b/command/kv.go index 9a7e9eaee15d..d789ad92873f 100644 --- a/command/kv.go +++ b/command/kv.go @@ -1,61 +1,104 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "strings" - - "github.com/mitchellh/cli" -) - -var _ cli.Command = (*KVCommand)(nil) - -type KVCommand struct { - *BaseCommand -} - -func (c *KVCommand) Synopsis() string { - return "Interact with Vault's Key-Value storage" -} - -func (c *KVCommand) Help() string { - helpText := ` -Usage: vault kv [options] [args] - - This command has subcommands for interacting with Vault's key-value - store. Here are some simple examples, and more detailed examples are - available in the subcommands or the documentation. - - Create or update the key named "foo" in the "secret" mount with the value - "bar=baz": - - $ vault kv put -mount=secret foo bar=baz - - Read this value back: - - $ vault kv get -mount=secret foo - - Get metadata for the key: - - $ vault kv metadata get -mount=secret foo - - Get a specific version of the key: - - $ vault kv get -mount=secret -version=1 foo - - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/data/foo) can cause confusion: - - $ vault kv get secret/foo - - Please see the individual subcommand help for detailed usage information. -` - - return strings.TrimSpace(helpText) -} - -func (c *KVCommand) Run(args []string) int { - return cli.RunResultHelp -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + + + + + +

+ {{if @model.isNew "Create" "Edit"}} + Application +

+
+
+
+
+ + {{#each @model.formFields as |attr|}} + + {{/each}} + + {{! MORE OPTIONS TOGGLE }} + +
+ {{! RADIO CARD + SEARCH SELECT }} +
+

Assign access

+
+ + +
+ {{#if (eq this.radioCardGroupValue "limited")}} + + {{/if}} +
+
+
+ + +
+ {{#if this.invalidFormAlert}} +
+ +
+ {{/if}} +
+
\ No newline at end of file diff --git a/command/kv_delete.go b/command/kv_delete.go index acb4abecaf41..8bd9cea4ee8f 100644 --- a/command/kv_delete.go +++ b/command/kv_delete.go @@ -1,209 +1,1174 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. -package command +package ocsp import ( + "bytes" + "context" + "crypto" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "errors" "fmt" - "path" + "io" + "math/big" + "net" + "net/http" + "net/url" + "strconv" "strings" + "sync" + "time" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-retryablehttp" + lru "github.com/hashicorp/golang-lru" + "github.com/hashicorp/vault/sdk/helper/certutil" + "golang.org/x/crypto/ocsp" ) -var ( - _ cli.Command = (*KVDeleteCommand)(nil) - _ cli.CommandAutocomplete = (*KVDeleteCommand)(nil) +// FailOpenMode is OCSP fail open mode. FailOpenTrue by default and may +// set to ocspModeFailClosed for fail closed mode +type FailOpenMode uint32 + +type requestFunc func(method, urlStr string, body interface{}) (*retryablehttp.Request, error) + +type clientInterface interface { + Do(req *retryablehttp.Request) (*http.Response, error) +} + +const ( + httpHeaderContentType = "Content-Type" + httpHeaderAccept = "accept" + httpHeaderContentLength = "Content-Length" + httpHeaderHost = "Host" + ocspRequestContentType = "application/ocsp-request" + ocspResponseContentType = "application/ocsp-response" +) + +const ( + ocspFailOpenNotSet FailOpenMode = iota + // FailOpenTrue represents OCSP fail open mode. + FailOpenTrue + // FailOpenFalse represents OCSP fail closed mode. + FailOpenFalse +) + +const ( + ocspModeFailOpen = "FAIL_OPEN" + ocspModeFailClosed = "FAIL_CLOSED" + ocspModeInsecure = "INSECURE" ) -type KVDeleteCommand struct { - *BaseCommand +const ocspCacheKey = "ocsp_cache" - flagVersions []string - flagMount string +const ( + // defaultOCSPResponderTimeout is the total timeout for OCSP responder. + defaultOCSPResponderTimeout = 10 * time.Second +) + +const ( + // cacheExpire specifies cache data expiration time in seconds. + cacheExpire = float64(24 * 60 * 60) +) + +type ocspCachedResponse struct { + time float64 + producedAt float64 + thisUpdate float64 + nextUpdate float64 + status ocspStatusCode } -func (c *KVDeleteCommand) Synopsis() string { - return "Deletes versions in the KV store" +type Client struct { + // caRoot includes the CA certificates. + caRoot map[string]*x509.Certificate + // certPool includes the CA certificates. + certPool *x509.CertPool + ocspResponseCache *lru.TwoQueueCache + ocspResponseCacheLock sync.RWMutex + // cacheUpdated is true if the memory cache is updated + cacheUpdated bool + logFactory func() hclog.Logger } -func (c *KVDeleteCommand) Help() string { - helpText := ` -Usage: vault kv delete [options] PATH +type ocspStatusCode int - Deletes the data for the provided version and path in the key-value store. The - versioned data will not be fully removed, but marked as deleted and will no - longer be returned in normal get requests. +type ocspStatus struct { + code ocspStatusCode + err error +} - To delete the latest version of the key "foo": +const ( + ocspSuccess ocspStatusCode = 0 + ocspStatusGood ocspStatusCode = -1 + ocspStatusRevoked ocspStatusCode = -2 + ocspStatusUnknown ocspStatusCode = -3 + ocspStatusOthers ocspStatusCode = -4 + ocspFailedDecomposeRequest ocspStatusCode = -5 + ocspInvalidValidity ocspStatusCode = -6 + ocspMissedCache ocspStatusCode = -7 + ocspCacheExpired ocspStatusCode = -8 +) - $ vault kv delete -mount=secret foo +// copied from crypto/ocsp.go +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/data/foo) can cause confusion: - - $ vault kv delete secret/foo +// cache key +type certIDKey struct { + NameHash string + IssuerKeyHash string + SerialNumber string +} - To delete version 3 of key foo: +// copied from crypto/ocsp +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} - $ vault kv delete -mount=secret -versions=3 foo +// copied from crypto/ocsp +func getOIDFromHashAlgorithm(target crypto.Hash) (asn1.ObjectIdentifier, error) { + for hash, oid := range hashOIDs { + if hash == target { + return oid, nil + } + } + return nil, fmt.Errorf("no valid OID is found for the hash algorithm: %v", target) +} + +func (c *Client) ClearCache() { + c.ocspResponseCache.Purge() +} - To delete all versions and metadata, see the "vault kv metadata" subcommand. +func (c *Client) getHashAlgorithmFromOID(target pkix.AlgorithmIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target.Algorithm) { + return hash + } + } + // no valid hash algorithm is found for the oid. Falling back to SHA1 + return crypto.SHA1 +} - Additional flags and more advanced use cases are detailed below. +// isInValidityRange checks the validity +func isInValidityRange(currTime, nextUpdate time.Time) bool { + return !nextUpdate.IsZero() && !currTime.After(nextUpdate) +} -` + c.Flags().Help() +func extractCertIDKeyFromRequest(ocspReq []byte) (*certIDKey, *ocspStatus) { + r, err := ocsp.ParseRequest(ocspReq) + if err != nil { + return nil, &ocspStatus{ + code: ocspFailedDecomposeRequest, + err: err, + } + } - return strings.TrimSpace(helpText) + // encode CertID, used as a key in the cache + encodedCertID := &certIDKey{ + base64.StdEncoding.EncodeToString(r.IssuerNameHash), + base64.StdEncoding.EncodeToString(r.IssuerKeyHash), + r.SerialNumber.String(), + } + return encodedCertID, &ocspStatus{ + code: ocspSuccess, + } } -func (c *KVDeleteCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) - // Common Options - f := set.NewFlagSet("Common Options") +func (c *Client) encodeCertIDKey(certIDKeyBase64 string) (*certIDKey, error) { + r, err := base64.StdEncoding.DecodeString(certIDKeyBase64) + if err != nil { + return nil, err + } + var cid certID + rest, err := asn1.Unmarshal(r, &cid) + if err != nil { + // error in parsing + return nil, err + } + if len(rest) > 0 { + // extra bytes to the end + return nil, err + } + return &certIDKey{ + base64.StdEncoding.EncodeToString(cid.NameHash), + base64.StdEncoding.EncodeToString(cid.IssuerKeyHash), + cid.SerialNumber.String(), + }, nil +} - f.StringSliceVar(&StringSliceVar{ - Name: "versions", - Target: &c.flagVersions, - Default: nil, - Usage: `Specifies the version numbers to delete.`, - }) +func (c *Client) checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate) (*ocspStatus, error) { + c.ocspResponseCacheLock.RLock() + var cacheValue *ocspCachedResponse + v, ok := c.ocspResponseCache.Get(*encodedCertID) + if ok { + cacheValue = v.(*ocspCachedResponse) + } + c.ocspResponseCacheLock.RUnlock() - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV - v2 secrets.`, - }) + status, err := c.extractOCSPCacheResponseValue(cacheValue, subject, issuer) + if err != nil { + return nil, err + } + if !isValidOCSPStatus(status.code) { + c.deleteOCSPCache(encodedCertID) + } + return status, err +} - return set +func (c *Client) deleteOCSPCache(encodedCertID *certIDKey) { + c.ocspResponseCacheLock.Lock() + c.ocspResponseCache.Remove(*encodedCertID) + c.cacheUpdated = true + c.ocspResponseCacheLock.Unlock() } -func (c *KVDeleteCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFiles() +func validateOCSP(ocspRes *ocsp.Response) (*ocspStatus, error) { + curTime := time.Now() + + if ocspRes == nil { + return nil, errors.New("OCSP Response is nil") + } + if !isInValidityRange(curTime, ocspRes.NextUpdate) { + return &ocspStatus{ + code: ocspInvalidValidity, + err: fmt.Errorf("invalid validity: producedAt: %v, thisUpdate: %v, nextUpdate: %v", ocspRes.ProducedAt, ocspRes.ThisUpdate, ocspRes.NextUpdate), + }, nil + } + return returnOCSPStatus(ocspRes), nil } -func (c *KVDeleteCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() +func returnOCSPStatus(ocspRes *ocsp.Response) *ocspStatus { + switch ocspRes.Status { + case ocsp.Good: + return &ocspStatus{ + code: ocspStatusGood, + err: nil, + } + case ocsp.Revoked: + return &ocspStatus{ + code: ocspStatusRevoked, + } + case ocsp.Unknown: + return &ocspStatus{ + code: ocspStatusUnknown, + err: errors.New("OCSP status unknown."), + } + default: + return &ocspStatus{ + code: ocspStatusOthers, + err: fmt.Errorf("OCSP others. %v", ocspRes.Status), + } + } } -func (c *KVDeleteCommand) Run(args []string) int { - f := c.Flags() +// retryOCSP is the second level of retry method if the returned contents are corrupted. It often happens with OCSP +// serer and retry helps. +func (c *Client) retryOCSP( + ctx context.Context, + client clientInterface, + req requestFunc, + ocspHost *url.URL, + headers map[string]string, + reqBody []byte, + issuer *x509.Certificate, +) (ocspRes *ocsp.Response, ocspResBytes []byte, ocspS *ocspStatus, retErr error) { + doRequest := func(request *retryablehttp.Request) (*http.Response, error) { + if request != nil { + request = request.WithContext(ctx) + for k, v := range headers { + request.Header[k] = append(request.Header[k], v) + } + } + res, err := client.Do(request) + if err != nil { + return nil, err + } + c.Logger().Debug("StatusCode from OCSP Server:", "statusCode", res.StatusCode) + return res, err + } + + for _, method := range []string{"GET", "POST"} { + reqUrl := *ocspHost + var body []byte + + switch method { + case "GET": + reqUrl.Path = reqUrl.Path + "/" + base64.StdEncoding.EncodeToString(reqBody) + case "POST": + body = reqBody + default: + // Programming error; all request/systems errors are multierror + // and appended. + return nil, nil, nil, fmt.Errorf("unknown request method: %v", method) + } + + var res *http.Response + request, err := req(method, reqUrl.String(), bytes.NewBuffer(body)) + if err != nil { + err = fmt.Errorf("error creating %v request: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + if res, err = doRequest(request); err != nil { + err = fmt.Errorf("error doing %v request: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } else { + defer res.Body.Close() + } - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 + if res.StatusCode != http.StatusOK { + err = fmt.Errorf("HTTP code is not OK on %v request. %v: %v", method, res.StatusCode, res.Status) + retErr = multierror.Append(retErr, err) + continue + } + + ocspResBytes, err = io.ReadAll(res.Body) + if err != nil { + err = fmt.Errorf("error reading %v request body: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + + // Reading an OCSP response shouldn't be fatal. A misconfigured + // endpoint might return invalid results for e.g., GET but return + // valid results for POST on retry. This could happen if e.g., the + // server responds with JSON. + ocspRes, err = ocsp.ParseResponse(ocspResBytes /*issuer = */, nil /* !!unsafe!! */) + if err != nil { + err = fmt.Errorf("error parsing %v OCSP response: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + + // Above, we use the unsafe issuer=nil parameter to ocsp.ParseResponse + // because Go's library does the wrong thing. + // + // Here, we lack a full chain, but we know we trust the parent issuer, + // so if the Go library incorrectly discards useful certificates, we + // likely cannot verify this without passing through the full chain + // back to the root. + // + // Instead, take one of two paths: 1. if there is no certificate in + // the ocspRes, verify the OCSP response directly with our trusted + // issuer certificate, or 2. if there is a certificate, either verify + // it directly matches our trusted issuer certificate, or verify it + // is signed by our trusted issuer certificate. + // + // See also: https://github.com/golang/go/issues/59641 + // + // This addresses the !!unsafe!! behavior above. + if ocspRes.Certificate == nil { + if err := ocspRes.CheckSignatureFrom(issuer); err != nil { + err = fmt.Errorf("error directly verifying signature on %v OCSP response: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + } else { + // Because we have at least one certificate here, we know that + // Go's ocsp library verified the signature from this certificate + // onto the response and it was valid. Now we need to know we trust + // this certificate. There's two ways we can do this: + // + // 1. Via confirming issuer == ocspRes.Certificate, or + // 2. Via confirming ocspRes.Certificate.CheckSignatureFrom(issuer). + if !bytes.Equal(issuer.Raw, ocspRes.Raw) { + // 1 must not hold, so 2 holds; verify the signature. + if err := ocspRes.Certificate.CheckSignatureFrom(issuer); err != nil { + err = fmt.Errorf("error checking chain of trust on %v OCSP response via %v failed: %w", method, issuer.Subject.String(), err) + retErr = multierror.Append(retErr, err) + continue + } + + // Verify the OCSP responder certificate is still valid and + // contains the required EKU since it is a delegated OCSP + // responder certificate. + if ocspRes.Certificate.NotAfter.Before(time.Now()) { + err := fmt.Errorf("error checking delegated OCSP responder on %v OCSP response: certificate has expired", method) + retErr = multierror.Append(retErr, err) + continue + } + haveEKU := false + for _, ku := range ocspRes.Certificate.ExtKeyUsage { + if ku == x509.ExtKeyUsageOCSPSigning { + haveEKU = true + break + } + } + if !haveEKU { + err := fmt.Errorf("error checking delegated OCSP responder on %v OCSP response: certificate lacks the OCSP Signing EKU", method) + retErr = multierror.Append(retErr, err) + continue + } + } + } + + // While we haven't validated the signature on the OCSP response, we + // got what we presume is a definitive answer and simply changing + // methods will likely not help us in that regard. Use this status + // to return without retrying another method, when it looks definitive. + // + // We don't accept ocsp.Unknown here: presumably, we could've hit a CDN + // with static mapping of request->responses, with a default "unknown" + // handler for everything else. By retrying here, we use POST, which + // could hit a live OCSP server with fresher data than the cached CDN. + if ocspRes.Status == ocsp.Good || ocspRes.Status == ocsp.Revoked { + break + } + + // Here, we didn't have a valid response. Even though we didn't get an + // error, we should inform the user that this (valid-looking) response + // wasn't utilized. + err = fmt.Errorf("fetched %v OCSP response of status %v; wanted either good (%v) or revoked (%v)", method, ocspRes.Status, ocsp.Good, ocsp.Revoked) + retErr = multierror.Append(retErr, err) } - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 + if ocspRes != nil && ocspResBytes != nil { + // Clear retErr, because we have one parseable-but-maybe-not-quite-correct + // OCSP response. + retErr = nil + ocspS = &ocspStatus{ + code: ocspSuccess, + } } - client, err := c.Client() + return +} + +// GetRevocationStatus checks the certificate revocation status for subject using issuer certificate. +func (c *Client) GetRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate, conf *VerifyConfig) (*ocspStatus, error) { + status, ocspReq, encodedCertID, err := c.validateWithCache(subject, issuer) if err != nil { - c.UI.Error(err.Error()) - return 2 + return nil, err + } + if isValidOCSPStatus(status.code) { + return status, nil + } + if ocspReq == nil || encodedCertID == nil { + return status, nil + } + c.Logger().Debug("cache missed", "server", subject.OCSPServer) + if len(subject.OCSPServer) == 0 && len(conf.OcspServersOverride) == 0 { + return nil, fmt.Errorf("no OCSP responder URL: subject: %v", subject.Subject) + } + ocspHosts := subject.OCSPServer + if len(conf.OcspServersOverride) > 0 { + ocspHosts = conf.OcspServersOverride } - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" + var wg sync.WaitGroup - var ( - mountPath string - partialPath string - v2 bool - ) + ocspStatuses := make([]*ocspStatus, len(ocspHosts)) + ocspResponses := make([]*ocsp.Response, len(ocspHosts)) + errors := make([]error, len(ocspHosts)) - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) + for i, ocspHost := range ocspHosts { + u, err := url.Parse(ocspHost) if err != nil { - c.UI.Error(err.Error()) - return 2 + return nil, err } - if v2 { - partialPath = path.Join(mountPath, partialPath) + hostname := u.Hostname() + + headers := make(map[string]string) + headers[httpHeaderContentType] = ocspRequestContentType + headers[httpHeaderAccept] = ocspResponseContentType + headers[httpHeaderContentLength] = strconv.Itoa(len(ocspReq)) + headers[httpHeaderHost] = hostname + timeout := defaultOCSPResponderTimeout + + ocspClient := retryablehttp.NewClient() + ocspClient.HTTPClient.Timeout = timeout + ocspClient.HTTPClient.Transport = newInsecureOcspTransport(conf.ExtraCas) + + doRequest := func() error { + if conf.QueryAllServers { + defer wg.Done() + } + ocspRes, _, ocspS, err := c.retryOCSP( + ctx, ocspClient, retryablehttp.NewRequest, u, headers, ocspReq, issuer) + ocspResponses[i] = ocspRes + if err != nil { + errors[i] = err + return err + } + if ocspS.code != ocspSuccess { + ocspStatuses[i] = ocspS + return nil + } + + ret, err := validateOCSP(ocspRes) + if err != nil { + errors[i] = err + return err + } + if isValidOCSPStatus(ret.code) { + ocspStatuses[i] = ret + } else if ret.err != nil { + // This check needs to occur after the isValidOCSPStatus as the unknown + // status also sets an err value within ret. + errors[i] = ret.err + return ret.err + } + return nil + } + if conf.QueryAllServers { + wg.Add(1) + go doRequest() + } else { + err = doRequest() + if err == nil { + break + } + } + } + if conf.QueryAllServers { + wg.Wait() + } + // Good by default + var ret *ocspStatus + ocspRes := ocspResponses[0] + var firstError error + for i := range ocspHosts { + if errors[i] != nil { + if firstError == nil { + firstError = errors[i] + } + } else if ocspStatuses[i] != nil { + switch ocspStatuses[i].code { + case ocspStatusRevoked: + ret = ocspStatuses[i] + ocspRes = ocspResponses[i] + break + case ocspStatusGood: + // Use this response only if we don't have a status already, or if what we have was unknown + if ret == nil || ret.code == ocspStatusUnknown { + ret = ocspStatuses[i] + ocspRes = ocspResponses[i] + } + case ocspStatusUnknown: + if ret == nil { + // We may want to use this as the overall result + ret = ocspStatuses[i] + ocspRes = ocspResponses[i] + } + } } + } + + // If no server reported the cert revoked, but we did have an error, report it + if (ret == nil || ret.code == ocspStatusUnknown) && firstError != nil { + return nil, firstError + } + // An extra safety in case ret and firstError are both nil + if ret == nil { + return nil, fmt.Errorf("failed to extract a known response code or error from the OCSP server") + } + + // otherwise ret should contain a response for the overall request + if !isValidOCSPStatus(ret.code) { + return ret, nil + } + v := ocspCachedResponse{ + status: ret.code, + time: float64(time.Now().UTC().Unix()), + producedAt: float64(ocspRes.ProducedAt.UTC().Unix()), + thisUpdate: float64(ocspRes.ThisUpdate.UTC().Unix()), + nextUpdate: float64(ocspRes.NextUpdate.UTC().Unix()), + } + + c.ocspResponseCacheLock.Lock() + c.ocspResponseCache.Add(*encodedCertID, &v) + c.cacheUpdated = true + c.ocspResponseCacheLock.Unlock() + return ret, nil +} + +func isValidOCSPStatus(status ocspStatusCode) bool { + return status == ocspStatusGood || status == ocspStatusRevoked || status == ocspStatusUnknown +} + +type VerifyConfig struct { + OcspEnabled bool + ExtraCas []*x509.Certificate + OcspServersOverride []string + OcspFailureMode FailOpenMode + QueryAllServers bool +} + +// VerifyLeafCertificate verifies just the subject against it's direct issuer +func (c *Client) VerifyLeafCertificate(ctx context.Context, subject, issuer *x509.Certificate, conf *VerifyConfig) error { + results, err := c.GetRevocationStatus(ctx, subject, issuer, conf) + if err != nil { + return err + } + if results.code == ocspStatusGood { + return nil } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) + serial := issuer.SerialNumber + serialHex := strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":")) + if results.code == ocspStatusRevoked { + return fmt.Errorf("certificate with serial number %s has been revoked", serialHex) + } else if conf.OcspFailureMode == FailOpenFalse { + return fmt.Errorf("unknown OCSP status for cert with serial number %s", strings.TrimSpace(certutil.GetHexFormatted(serial.Bytes(), ":"))) + } else { + c.Logger().Warn("could not validate OCSP status for cert, but continuing in fail open mode", "serial", serialHex) + } + } + return nil +} + +// VerifyPeerCertificate verifies all of certificate revocation status +func (c *Client) VerifyPeerCertificate(ctx context.Context, verifiedChains [][]*x509.Certificate, conf *VerifyConfig) error { + for i := 0; i < len(verifiedChains); i++ { + // Certificate signed by Root CA. This should be one before the last in the Certificate Chain + numberOfNoneRootCerts := len(verifiedChains[i]) - 1 + if !verifiedChains[i][numberOfNoneRootCerts].IsCA || string(verifiedChains[i][numberOfNoneRootCerts].RawIssuer) != string(verifiedChains[i][numberOfNoneRootCerts].RawSubject) { + // Check if the last Non Root Cert is also a CA or is self signed. + // if the last certificate is not, add it to the list + rca := c.caRoot[string(verifiedChains[i][numberOfNoneRootCerts].RawIssuer)] + if rca == nil { + return fmt.Errorf("failed to find root CA. pkix.name: %v", verifiedChains[i][numberOfNoneRootCerts].Issuer) + } + verifiedChains[i] = append(verifiedChains[i], rca) + numberOfNoneRootCerts++ + } + results, err := c.GetAllRevocationStatus(ctx, verifiedChains[i], conf) if err != nil { - c.UI.Error(err.Error()) - return 2 + return err + } + if r := c.canEarlyExitForOCSP(results, numberOfNoneRootCerts, conf); r != nil { + return r.err } } - var secret *api.Secret - var fullPath string - if v2 { - secret, err = c.deleteV2(partialPath, mountPath, client) - fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) + return nil +} + +func (c *Client) canEarlyExitForOCSP(results []*ocspStatus, chainSize int, conf *VerifyConfig) *ocspStatus { + msg := "" + if conf.OcspFailureMode == FailOpenFalse { + // Fail closed. any error is returned to stop connection + for _, r := range results { + if r.err != nil { + return r + } + } } else { - // v1 - if mountFlagSyntax { - fullPath = path.Join(mountPath, partialPath) - } else { - fullPath = partialPath + // Fail open and all results are valid. + allValid := len(results) == chainSize + for _, r := range results { + if !isValidOCSPStatus(r.code) { + allValid = false + break + } } - secret, err = client.Logical().Delete(fullPath) + for _, r := range results { + if allValid && r.code == ocspStatusRevoked { + return r + } + if r != nil && r.code != ocspStatusGood && r.err != nil { + msg += "" + r.err.Error() + } + } + } + if len(msg) > 0 { + c.Logger().Warn( + "OCSP is set to fail-open, and could not retrieve OCSP based revocation checking but proceeding.", "detail", msg) } + return nil +} +func (c *Client) validateWithCacheForAllCertificates(verifiedChains []*x509.Certificate) (bool, error) { + n := len(verifiedChains) - 1 + for j := 0; j < n; j++ { + subject := verifiedChains[j] + issuer := verifiedChains[j+1] + status, _, _, err := c.validateWithCache(subject, issuer) + if err != nil { + return false, err + } + if !isValidOCSPStatus(status.code) { + return false, nil + } + } + return true, nil +} + +func (c *Client) validateWithCache(subject, issuer *x509.Certificate) (*ocspStatus, []byte, *certIDKey, error) { + ocspReq, err := ocsp.CreateRequest(subject, issuer, &ocsp.RequestOptions{}) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to create OCSP request from the certificates: %v", err) + } + encodedCertID, ocspS := extractCertIDKeyFromRequest(ocspReq) + if ocspS.code != ocspSuccess { + return nil, nil, nil, fmt.Errorf("failed to extract CertID from OCSP Request: %v", err) + } + status, err := c.checkOCSPResponseCache(encodedCertID, subject, issuer) + if err != nil { + return nil, nil, nil, err + } + return status, ocspReq, encodedCertID, nil +} + +func (c *Client) GetAllRevocationStatus(ctx context.Context, verifiedChains []*x509.Certificate, conf *VerifyConfig) ([]*ocspStatus, error) { + _, err := c.validateWithCacheForAllCertificates(verifiedChains) if err != nil { - c.UI.Error(fmt.Sprintf("Error deleting %s: %s", fullPath, err)) - if secret != nil { - OutputSecret(c.UI, secret) + return nil, err + } + n := len(verifiedChains) - 1 + results := make([]*ocspStatus, n) + for j := 0; j < n; j++ { + results[j], err = c.GetRevocationStatus(ctx, verifiedChains[j], verifiedChains[j+1], conf) + if err != nil { + return nil, err } - return 2 + if !isValidOCSPStatus(results[j].code) { + return results, nil + } + } + return results, nil +} + +// verifyPeerCertificateSerial verifies the certificate revocation status in serial. +func (c *Client) verifyPeerCertificateSerial(conf *VerifyConfig) func(_ [][]byte, verifiedChains [][]*x509.Certificate) (err error) { + return func(_ [][]byte, verifiedChains [][]*x509.Certificate) error { + return c.VerifyPeerCertificate(context.TODO(), verifiedChains, conf) + } +} + +func (c *Client) extractOCSPCacheResponseValueWithoutSubject(cacheValue ocspCachedResponse) (*ocspStatus, error) { + return c.extractOCSPCacheResponseValue(&cacheValue, nil, nil) +} + +func (c *Client) extractOCSPCacheResponseValue(cacheValue *ocspCachedResponse, subject, issuer *x509.Certificate) (*ocspStatus, error) { + subjectName := "Unknown" + if subject != nil { + subjectName = subject.Subject.CommonName + } + + curTime := time.Now() + if cacheValue == nil { + return &ocspStatus{ + code: ocspMissedCache, + err: fmt.Errorf("miss cache data. subject: %v", subjectName), + }, nil + } + currentTime := float64(curTime.UTC().Unix()) + if currentTime-cacheValue.time >= cacheExpire { + return &ocspStatus{ + code: ocspCacheExpired, + err: fmt.Errorf("cache expired. current: %v, cache: %v", + time.Unix(int64(currentTime), 0).UTC(), time.Unix(int64(cacheValue.time), 0).UTC()), + }, nil + } + + return validateOCSP(&ocsp.Response{ + ProducedAt: time.Unix(int64(cacheValue.producedAt), 0).UTC(), + ThisUpdate: time.Unix(int64(cacheValue.thisUpdate), 0).UTC(), + NextUpdate: time.Unix(int64(cacheValue.nextUpdate), 0).UTC(), + Status: int(cacheValue.status), + }) +} + +/* +// writeOCSPCache writes a OCSP Response cache +func (c *Client) writeOCSPCache(ctx context.Context, storage logical.Storage) error { + c.Logger().Debug("writing OCSP Response cache") + t := time.Now() + m := make(map[string][]interface{}) + keys := c.ocspResponseCache.Keys() + if len(keys) > persistedCacheSize { + keys = keys[:persistedCacheSize] + } + for _, k := range keys { + e, ok := c.ocspResponseCache.Get(k) + if ok { + entry := e.(*ocspCachedResponse) + // Don't store if expired + if isInValidityRange(t, time.Unix(int64(entry.thisUpdate), 0), time.Unix(int64(entry.nextUpdate), 0)) { + key := k.(certIDKey) + cacheKeyInBase64, err := decodeCertIDKey(&key) + if err != nil { + return err + } + m[cacheKeyInBase64] = []interface{}{entry.status, entry.time, entry.producedAt, entry.thisUpdate, entry.nextUpdate} + } + } + } + + v, err := jsonutil.EncodeJSONAndCompress(m, nil) + if err != nil { + return err + } + entry := logical.StorageEntry{ + Key: ocspCacheKey, + Value: v, + } + return storage.Put(ctx, &entry) +} + +// readOCSPCache reads a OCSP Response cache from storage +func (c *Client) readOCSPCache(ctx context.Context, storage logical.Storage) error { + c.Logger().Debug("reading OCSP Response cache") + + entry, err := storage.Get(ctx, ocspCacheKey) + if err != nil { + return err + } + if entry == nil { + return nil + } + var untypedCache map[string][]interface{} + + err = jsonutil.DecodeJSON(entry.Value, &untypedCache) + if err != nil { + return errors.New("failed to unmarshal OCSP cache") } - if secret == nil { - // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { - c.UI.Info(fmt.Sprintf("Success! Data deleted (if it existed) at: %s", fullPath)) + for k, v := range untypedCache { + key, err := c.encodeCertIDKey(k) + if err != nil { + return err + } + var times [4]float64 + for i, t := range v[1:] { + if jn, ok := t.(json.Number); ok { + times[i], err = jn.Float64() + if err != nil { + return err + } + } else { + times[i] = t.(float64) + } + } + var status int + if jn, ok := v[0].(json.Number); ok { + s, err := jn.Int64() + if err != nil { + return err + } + status = int(s) + } else { + status = v[0].(int) } - return 0 + + c.ocspResponseCache.Add(*key, &ocspCachedResponse{ + status: ocspStatusCode(status), + time: times[0], + producedAt: times[1], + thisUpdate: times[2], + nextUpdate: times[3], + }) } - if c.flagField != "" { - return PrintRawField(c.UI, secret, c.flagField) + return nil +} +*/ + +func New(logFactory func() hclog.Logger, cacheSize int) *Client { + if cacheSize < 100 { + cacheSize = 100 } + cache, _ := lru.New2Q(cacheSize) + c := Client{ + caRoot: make(map[string]*x509.Certificate), + ocspResponseCache: cache, + logFactory: logFactory, + } + + return &c +} - return OutputSecret(c.UI, secret) +func (c *Client) Logger() hclog.Logger { + return c.logFactory() } -func (c *KVDeleteCommand) deleteV2(path, mountPath string, client *api.Client) (*api.Secret, error) { - var err error - var secret *api.Secret - switch { - case len(c.flagVersions) > 0: - path = addPrefixToKVPath(path, mountPath, "delete", false) - data := map[string]interface{}{ - "versions": kvParseVersionsFlags(c.flagVersions), +// insecureOcspTransport is the transport object that doesn't do certificate revocation check. +func newInsecureOcspTransport(extraCas []*x509.Certificate) *http.Transport { + // Get the SystemCertPool, continue with an empty pool on error + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + for _, c := range extraCas { + rootCAs.AddCert(c) + } + config := &tls.Config{ + RootCAs: rootCAs, + } + return &http.Transport{ + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Minute, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + TLSClientConfig: config, + } +} + +// NewTransport includes the certificate revocation check with OCSP in sequential. +func (c *Client) NewTransport(conf *VerifyConfig) *http.Transport { + rootCAs := c.certPool + if rootCAs == nil { + rootCAs, _ = x509.SystemCertPool() + } + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } + for _, c := range conf.ExtraCas { + rootCAs.AddCert(c) + } + return &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: rootCAs, + VerifyPeerCertificate: c.verifyPeerCertificateSerial(conf), + }, + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Minute, + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + } +} + +/* +func (c *Client) WriteCache(ctx context.Context, storage logical.Storage) error { + c.ocspResponseCacheLock.Lock() + defer c.ocspResponseCacheLock.Unlock() + if c.cacheUpdated { + err := c.writeOCSPCache(ctx, storage) + if err == nil { + c.cacheUpdated = false } - secret, err = client.Logical().Write(path, data) - default: - path = addPrefixToKVPath(path, mountPath, "data", false) - secret, err = client.Logical().Delete(path) + return err } + return nil +} - return secret, err +func (c *Client) ReadCache(ctx context.Context, storage logical.Storage) error { + c.ocspResponseCacheLock.Lock() + defer c.ocspResponseCacheLock.Unlock() + return c.readOCSPCache(ctx, storage) } +*/ +/* + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ diff --git a/command/kv_destroy.go b/command/kv_destroy.go index bc73021fff82..f2a9659efdf4 100644 --- a/command/kv_destroy.go +++ b/command/kv_destroy.go @@ -1,185 +1,100 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "path" - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVDestroyCommand)(nil) - _ cli.CommandAutocomplete = (*KVDestroyCommand)(nil) -) - -type KVDestroyCommand struct { - *BaseCommand - - flagVersions []string - flagMount string -} - -func (c *KVDestroyCommand) Synopsis() string { - return "Permanently removes one or more versions in the KV store" -} - -func (c *KVDestroyCommand) Help() string { - helpText := ` -Usage: vault kv destroy [options] KEY - - Permanently removes the specified versions' data from the key-value store. If - no key exists at the path, no action is taken. - - To destroy version 3 of key foo: - - $ vault kv destroy -mount=secret -versions=3 foo - - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/data/foo) can cause confusion: - - $ vault kv destroy -versions=3 secret/foo - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *KVDestroyCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - // Common Options - f := set.NewFlagSet("Common Options") - - f.StringSliceVar(&StringSliceVar{ - Name: "versions", - Target: &c.flagVersions, - Default: nil, - Usage: `Specifies the version numbers to destroy.`, - }) - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV - v2 secrets.`, - }) - - return set -} - -func (c *KVDestroyCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *KVDestroyCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVDestroyCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - if len(c.flagVersions) == 0 { - c.UI.Error("No versions provided, use the \"-versions\" flag to specify the version to destroy.") - return 1 - } - - var err error - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } - - if !v2 { - c.UI.Error("Destroy not supported on KV Version 1") - return 1 - } - destroyPath := addPrefixToKVPath(partialPath, mountPath, "destroy", false) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - data := map[string]interface{}{ - "versions": kvParseVersionsFlags(c.flagVersions), - } - - secret, err := client.Logical().Write(destroyPath, data) - if err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", destroyPath, err)) - if secret != nil { - OutputSecret(c.UI, secret) - } - return 2 - } - if secret == nil { - // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { - c.UI.Info(fmt.Sprintf("Success! Data written to: %s", destroyPath)) - } - return 0 - } - - return OutputSecret(c.UI, secret) -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import { stubFeaturesAndPermissions } from 'vault/tests/helpers/components/sidebar-nav'; + +const renderComponent = () => { + return render(hbs` + + + + `); +}; + +module('Integration | Component | sidebar-nav-cluster', function (hooks) { + setupRenderingTest(hooks); + + test('it should render nav headings', async function (assert) { + const headings = ['Vault', 'Monitoring']; + stubFeaturesAndPermissions(this.owner, true, true); + await renderComponent(); + + assert + .dom('[data-test-sidebar-nav-heading]') + .exists({ count: headings.length }, 'Correct number of headings render'); + headings.forEach((heading) => { + assert + .dom(`[data-test-sidebar-nav-heading="${heading}"]`) + .hasText(heading, `${heading} heading renders`); + }); + }); + + test('it should hide links and headings user does not have access too', async function (assert) { + await renderComponent(); + assert + .dom('[data-test-sidebar-nav-link]') + .exists({ count: 3 }, 'Nav links are hidden other than secrets, secrets sync and dashboard'); + assert + .dom('[data-test-sidebar-nav-heading]') + .exists({ count: 1 }, 'Headings are hidden other than Vault'); + }); + + test('it should render nav links', async function (assert) { + const links = [ + 'Dashboard', + 'Secrets Engines', + 'Secrets Sync', + 'Access', + 'Policies', + 'Tools', + 'Replication', + 'Raft Storage', + 'Client Count', + 'License', + 'Seal Vault', + ]; + stubFeaturesAndPermissions(this.owner, true, true); + await renderComponent(); + + assert + .dom('[data-test-sidebar-nav-link]') + .exists({ count: links.length }, 'Correct number of links render'); + links.forEach((link) => { + assert.dom(`[data-test-sidebar-nav-link="${link}"]`).hasText(link, `${link} link renders`); + }); + }); + + test('it should hide enterprise related links in child namespace', async function (assert) { + const links = [ + 'Disaster Recovery', + 'Performance', + 'Replication', + 'Raft Storage', + 'License', + 'Seal Vault', + ]; + this.owner.lookup('service:namespace').set('path', 'foo'); + const stubs = stubFeaturesAndPermissions(this.owner, true, true); + stubs.hasNavPermission.callsFake((route) => route !== 'clients'); + + await renderComponent(); + + assert + .dom('[data-test-sidebar-nav-heading="Monitoring"]') + .doesNotExist( + 'Monitoring heading is hidden in child namespace when user does not have access to Client Count' + ); + + links.forEach((link) => { + assert + .dom(`[data-test-sidebar-nav-link="${link}"]`) + .doesNotExist(`${link} is hidden in child namespace`); + }); + }); +}); diff --git a/command/kv_enable_versioning.go b/command/kv_enable_versioning.go index 1b522269034b..2b0ecff88ddf 100644 --- a/command/kv_enable_versioning.go +++ b/command/kv_enable_versioning.go @@ -1,94 +1,110 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVEnableVersioningCommand)(nil) - _ cli.CommandAutocomplete = (*KVEnableVersioningCommand)(nil) -) - -type KVEnableVersioningCommand struct { - *BaseCommand -} - -func (c *KVEnableVersioningCommand) Synopsis() string { - return "Turns on versioning for a KV store" -} - -func (c *KVEnableVersioningCommand) Help() string { - helpText := ` -Usage: vault kv enable-versioning [options] KEY - - This command turns on versioning for the backend at the provided path. - - $ vault kv enable-versioning secret - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *KVEnableVersioningCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - return set -} - -func (c *KVEnableVersioningCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *KVEnableVersioningCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVEnableVersioningCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // Append a trailing slash to indicate it's a path in output - mountPath := ensureTrailingSlash(sanitizePath(args[0])) - - if err := client.Sys().TuneMount(mountPath, api.MountConfigInput{ - Options: map[string]string{ - "version": "2", - }, - }); err != nil { - c.UI.Error(fmt.Sprintf("Error tuning secrets engine %s: %s", mountPath, err)) - return 2 - } - - c.UI.Output(fmt.Sprintf("Success! Tuned the secrets engine at: %s", mountPath)) - return 0 -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + + Vault + + + + + {{#if (has-permission "access")}} + + {{/if}} + {{#if (has-permission "policies")}} + + {{/if}} + {{#if (has-permission "tools")}} + + {{/if}} + + {{#if + (or + (and + this.namespace.inRootNamespace (has-permission "status" routeParams=(array "replication" "raft" "license" "seal")) + ) + (has-permission "clients" routeParams="activity") + ) + }} + Monitoring + {{/if}} + {{#if + (and + this.version.isEnterprise + this.namespace.inRootNamespace + (not this.cluster.replicationRedacted) + (has-permission "status" routeParams="replication") + ) + }} + + {{/if}} + {{#if (and this.cluster.usingRaft this.namespace.inRootNamespace (has-permission "status" routeParams="raft"))}} + + {{/if}} + {{#if (and (has-permission "clients" routeParams="activity") (not this.cluster.dr.isSecondary))}} + + {{/if}} + {{#if + (and + this.version.features + this.namespace.inRootNamespace + (has-permission "status" routeParams="license") + (not this.cluster.dr.isSecondary) + ) + }} + + {{/if}} + {{#if (and this.namespace.inRootNamespace (has-permission "status" routeParams="seal") (not this.cluster.dr.isSecondary))}} + + {{/if}} + \ No newline at end of file diff --git a/command/kv_get.go b/command/kv_get.go index d852a722b48e..7d55d33b90ad 100644 --- a/command/kv_get.go +++ b/command/kv_get.go @@ -1,243 +1,145 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "path" - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVGetCommand)(nil) - _ cli.CommandAutocomplete = (*KVGetCommand)(nil) -) - -type KVGetCommand struct { - *BaseCommand - - flagVersion int - flagMount string -} - -func (c *KVGetCommand) Synopsis() string { - return "Retrieves data from the KV store" -} - -func (c *KVGetCommand) Help() string { - helpText := ` -Usage: vault kv get [options] KEY - - Retrieves the value from Vault's key-value store at the given key name. If no - key exists with that name, an error is returned. If a key exists with that - name but has no data, nothing is returned. - - $ vault kv get -mount=secret foo - - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/data/foo) can cause confusion: - - $ vault kv get secret/foo - - To view the given key name at a specific version in time, specify the "-version" - flag: - - $ vault kv get -mount=secret -version=1 foo - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *KVGetCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) - - // Common Options - f := set.NewFlagSet("Common Options") - - f.IntVar(&IntVar{ - Name: "version", - Target: &c.flagVersion, - Default: 0, - Usage: `If passed, the value at the version number will be returned.`, - }) - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV - v2 secrets.`, - }) - - return set -} - -func (c *KVGetCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFiles() -} - -func (c *KVGetCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVGetCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - v2 bool - ) - - // Ignore leading slash - partialPath := strings.TrimPrefix(args[0], "/") - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } - - var versionParam map[string]string - var fullPath string - // Add /data to v2 paths only - if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) - - if c.flagVersion > 0 { - versionParam = map[string]string{ - "version": fmt.Sprintf("%d", c.flagVersion), - } - } - } else { - // v1 - if mountFlagSyntax { - fullPath = path.Join(mountPath, partialPath) - } else { - fullPath = partialPath - } - } - - secret, err := kvReadRequest(client, fullPath, versionParam) - if err != nil { - c.UI.Error(fmt.Sprintf("Error reading %s: %s", fullPath, err)) - if secret != nil { - OutputSecret(c.UI, secret) - } - return 2 - } - if secret == nil { - c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) - return 2 - } - - if c.flagField != "" { - if v2 { - // This is a v2, pass in the data field - if data, ok := secret.Data["data"]; ok && data != nil { - // If they requested a literal "data" see if they meant actual - // value or the data block itself - if c.flagField == "data" { - if dataMap, ok := data.(map[string]interface{}); ok { - if _, ok := dataMap["data"]; ok { - return PrintRawField(c.UI, dataMap, c.flagField) - } - } - return PrintRawField(c.UI, secret, c.flagField) - } - return PrintRawField(c.UI, data, c.flagField) - } else { - c.UI.Error(fmt.Sprintf("No data found at %s", fullPath)) - return 2 - } - } else { - return PrintRawField(c.UI, secret, c.flagField) - } - } - - // If we have wrap info print the secret normally. - if secret.WrapInfo != nil || c.flagFormat != "table" { - return OutputSecret(c.UI, secret) - } - - if len(secret.Warnings) > 0 { - tf := TableFormatter{} - tf.printWarnings(c.UI, secret) - } - - if v2 { - outputPath(c.UI, fullPath, "Secret Path") - } - - if metadata, ok := secret.Data["metadata"]; ok && metadata != nil { - c.UI.Info(getHeaderForMap("Metadata", metadata.(map[string]interface{}))) - OutputData(c.UI, metadata) - c.UI.Info("") - } - - data := secret.Data - if v2 && data != nil { - data = nil - dataRaw := secret.Data["data"] - if dataRaw != nil { - data = dataRaw.(map[string]interface{}) - } - } - - if data != nil { - c.UI.Info(getHeaderForMap("Data", data)) - OutputData(c.UI, data) - } - - return 0 -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { inject as service } from '@ember/service'; +import { computed } from '@ember/object'; +import { reject } from 'rsvp'; +import Route from '@ember/routing/route'; +import { task, timeout } from 'ember-concurrency'; +import Ember from 'ember'; +import getStorage from '../../lib/token-storage'; +import localStorage from 'vault/lib/local-storage'; +import ClusterRoute from 'vault/mixins/cluster-route'; +import ModelBoundaryRoute from 'vault/mixins/model-boundary-route'; +import { assert } from '@ember/debug'; + +const POLL_INTERVAL_MS = 10000; + +export const getManagedNamespace = (nsParam, root) => { + if (!nsParam || nsParam.replaceAll('/', '') === root) return root; + // Check if param starts with root and / + if (nsParam.startsWith(`${root}/`)) { + return nsParam; + } + // Otherwise prepend the given param with the root + return `${root}/${nsParam}`; +}; + +export default Route.extend(ModelBoundaryRoute, ClusterRoute, { + namespaceService: service('namespace'), + version: service(), + permissions: service(), + store: service(), + auth: service(), + featureFlagService: service('featureFlag'), + currentCluster: service(), + modelTypes: computed(function () { + return ['node', 'secret', 'secret-engine']; + }), + + queryParams: { + namespaceQueryParam: { + refreshModel: true, + }, + }, + + getClusterId(params) { + const { cluster_name } = params; + const cluster = this.modelFor('vault').findBy('name', cluster_name); + return cluster ? cluster.get('id') : null; + }, + + async beforeModel() { + const params = this.paramsFor(this.routeName); + let namespace = params.namespaceQueryParam; + const currentTokenName = this.auth.get('currentTokenName'); + const managedRoot = this.featureFlagService.managedNamespaceRoot; + assert( + 'Cannot use VAULT_CLOUD_ADMIN_NAMESPACE flag with non-enterprise Vault version', + !(managedRoot && this.version.isCommunity) + ); + if (!namespace && currentTokenName && !Ember.testing) { + // if no namespace queryParam and user authenticated, + // use user's root namespace to redirect to properly param'd url + const storage = getStorage().getItem(currentTokenName); + namespace = storage?.userRootNamespace; + // only redirect if something other than nothing + if (namespace) { + this.transitionTo({ queryParams: { namespace } }); + } + } else if (managedRoot !== null) { + const managed = getManagedNamespace(namespace, managedRoot); + if (managed !== namespace) { + this.transitionTo({ queryParams: { namespace: managed } }); + } + } + this.namespaceService.setNamespace(namespace); + const id = this.getClusterId(params); + if (id) { + this.auth.setCluster(id); + if (this.auth.currentToken) { + this.version.fetchVersion(); + await this.permissions.getPaths.perform(); + } + return this.version.fetchFeatures(); + } else { + return reject({ httpStatus: 404, message: 'not found', path: params.cluster_name }); + } + }, + + model(params) { + // if a user's browser settings block localStorage they will be unable to use Vault. The method will throw the error and the rest of the application will not load. + localStorage.isLocalStorageSupported(); + + const id = this.getClusterId(params); + return this.store.findRecord('cluster', id); + }, + + poll: task(function* () { + while (true) { + // when testing, the polling loop causes promises to never settle so acceptance tests hang + // to get around that, we just disable the poll in tests + if (Ember.testing) { + return; + } + yield timeout(POLL_INTERVAL_MS); + try { + /* eslint-disable-next-line ember/no-controller-access-in-routes */ + yield this.controller.model.reload(); + yield this.transitionToTargetRoute(); + } catch (e) { + // we want to keep polling here + } + } + }) + .cancelOn('deactivate') + .keepLatest(), + + afterModel(model, transition) { + this._super(...arguments); + this.currentCluster.setCluster(model); + + // Check that namespaces is enabled and if not, + // clear the namespace by transition to this route w/o it + if (this.namespaceService.path && !this.version.hasNamespaces) { + return this.transitionTo(this.routeName, { queryParams: { namespace: '' } }); + } + return this.transitionToTargetRoute(transition); + }, + + setupController() { + this._super(...arguments); + this.poll.perform(); + }, + + actions: { + error(e) { + if (e.httpStatus === 503 && e.errors[0] === 'Vault is sealed') { + this.refresh(); + } + return true; + }, + }, +}); diff --git a/command/kv_helpers.go b/command/kv_helpers.go index 844af21848d2..c5be6d878c33 100644 --- a/command/kv_helpers.go +++ b/command/kv_helpers.go @@ -1,269 +1,20 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "context" - "errors" - "fmt" - "io" - paths "path" - "sort" - "strings" - - "github.com/hashicorp/go-secure-stdlib/strutil" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" -) - -func kvReadRequest(client *api.Client, path string, params map[string]string) (*api.Secret, error) { - r := client.NewRequest("GET", "/v1/"+path) - for k, v := range params { - r.Params.Set(k, v) - } - resp, err := client.RawRequest(r) - if resp != nil { - defer resp.Body.Close() - } - if resp != nil && resp.StatusCode == 404 { - secret, parseErr := api.ParseSecret(resp.Body) - switch parseErr { - case nil: - case io.EOF: - return nil, nil - default: - return nil, err - } - if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { - return secret, nil - } - return nil, nil - } - if err != nil { - return nil, err - } - - return api.ParseSecret(resp.Body) -} - -func kvPreflightVersionRequest(client *api.Client, path string) (string, int, error) { - // We don't want to use a wrapping call here so save any custom value and - // restore after - currentWrappingLookupFunc := client.CurrentWrappingLookupFunc() - client.SetWrappingLookupFunc(nil) - defer client.SetWrappingLookupFunc(currentWrappingLookupFunc) - currentOutputCurlString := client.OutputCurlString() - client.SetOutputCurlString(false) - defer client.SetOutputCurlString(currentOutputCurlString) - currentOutputPolicy := client.OutputPolicy() - client.SetOutputPolicy(false) - defer client.SetOutputPolicy(currentOutputPolicy) - - r := client.NewRequest("GET", "/v1/sys/internal/ui/mounts/"+path) - resp, err := client.RawRequest(r) - if resp != nil { - defer resp.Body.Close() - } - if err != nil { - // If we get a 404 we are using an older version of vault, default to - // version 1 - if resp != nil { - if resp.StatusCode == 404 { - return "", 1, nil - } - - // if the original request had the -output-curl-string or -output-policy flag, - if (currentOutputCurlString || currentOutputPolicy) && resp.StatusCode == 403 { - // we provide a more helpful error for the user, - // who may not understand why the flag isn't working. - err = fmt.Errorf( - `This output flag requires the success of a preflight request -to determine the version of a KV secrets engine. Please -re-run this command with a token with read access to %s. -Note that if the path you are trying to reach is a KV v2 path, your token's policy must -allow read access to that path in the format 'mount-path/data/foo', not just 'mount-path/foo'.`, path) - } - } - - return "", 0, err - } - - secret, err := api.ParseSecret(resp.Body) - if err != nil { - return "", 0, err - } - if secret == nil { - return "", 0, errors.New("nil response from pre-flight request") - } - var mountPath string - if mountPathRaw, ok := secret.Data["path"]; ok { - mountPath = mountPathRaw.(string) - } - options := secret.Data["options"] - if options == nil { - return mountPath, 1, nil - } - versionRaw := options.(map[string]interface{})["version"] - if versionRaw == nil { - return mountPath, 1, nil - } - version := versionRaw.(string) - switch version { - case "", "1": - return mountPath, 1, nil - case "2": - return mountPath, 2, nil - } - - return mountPath, 1, nil -} - -func isKVv2(path string, client *api.Client) (string, bool, error) { - mountPath, version, err := kvPreflightVersionRequest(client, path) - if err != nil { - return "", false, err - } - - return mountPath, version == 2, nil -} - -func addPrefixToKVPath(path, mountPath, apiPrefix string, skipIfExists bool) string { - if path == mountPath || path == strings.TrimSuffix(mountPath, "/") { - return paths.Join(mountPath, apiPrefix) - } - - pathSuffix := strings.TrimPrefix(path, mountPath) - for { - // If the entire mountPath is included in the path, we are done - if pathSuffix != path { - break - } - // Trim the parts of the mountPath that are not included in the - // path, for example, in cases where the mountPath contains - // namespaces which are not included in the path. - partialMountPath := strings.SplitN(mountPath, "/", 2) - if len(partialMountPath) <= 1 || partialMountPath[1] == "" { - break - } - mountPath = strings.TrimSuffix(partialMountPath[1], "/") - pathSuffix = strings.TrimPrefix(pathSuffix, mountPath) - } - - if skipIfExists { - if strings.HasPrefix(pathSuffix, apiPrefix) || strings.HasPrefix(pathSuffix, "/"+apiPrefix) { - return paths.Join(mountPath, pathSuffix) - } - } - - return paths.Join(mountPath, apiPrefix, pathSuffix) -} - -func getHeaderForMap(header string, data map[string]interface{}) string { - maxKey := 0 - for k := range data { - if len(k) > maxKey { - maxKey = len(k) - } - } - - // 4 for the column spaces and 5 for the len("value") - totalLen := maxKey + 4 + 5 - - return padEqualSigns(header, totalLen) -} - -func kvParseVersionsFlags(versions []string) []string { - versionsOut := make([]string, 0, len(versions)) - for _, v := range versions { - versionsOut = append(versionsOut, strutil.ParseStringSlice(v, ",")...) - } - - return versionsOut -} - -func outputPath(ui cli.Ui, path string, title string) { - ui.Info(padEqualSigns(title, len(path))) - ui.Info(path) - ui.Info("") -} - -// Pad the table header with equal signs on each side -func padEqualSigns(header string, totalLen int) string { - equalSigns := totalLen - (len(header) + 2) - - // If we have zero or fewer equal signs bump it back up to two on either - // side of the header. - if equalSigns <= 0 { - equalSigns = 4 - } - - // If the number of equal signs is not divisible by two add a sign. - if equalSigns%2 != 0 { - equalSigns = equalSigns + 1 - } - - return fmt.Sprintf("%s %s %s", strings.Repeat("=", equalSigns/2), header, strings.Repeat("=", equalSigns/2)) -} - -// walkSecretsTree dfs-traverses the secrets tree rooted at the given path -// and calls the `visit` functor for each of the directory and leaf paths. -// Note: for kv-v2, a "metadata" path is expected and "metadata" paths will be -// returned in the visit functor. -func walkSecretsTree(ctx context.Context, client *api.Client, path string, visit func(path string, directory bool) error) error { - resp, err := client.Logical().ListWithContext(ctx, path) - if err != nil { - return fmt.Errorf("could not list %q path: %w", path, err) - } - - if resp == nil || resp.Data == nil { - return fmt.Errorf("no value found at %q: %w", path, err) - } - - keysRaw, ok := resp.Data["keys"] - if !ok { - return fmt.Errorf("unexpected list response at %q", path) - } - - keysRawSlice, ok := keysRaw.([]interface{}) - if !ok { - return fmt.Errorf("unexpected list response type %T at %q", keysRaw, path) - } - - keys := make([]string, 0, len(keysRawSlice)) - - for _, keyRaw := range keysRawSlice { - key, ok := keyRaw.(string) - if !ok { - return fmt.Errorf("unexpected key type %T at %q", keyRaw, path) - } - keys = append(keys, key) - } - - // sort the keys for a deterministic output - sort.Strings(keys) - - for _, key := range keys { - // the keys are relative to the current path: combine them - child := paths.Join(path, key) - - if strings.HasSuffix(key, "/") { - // visit the directory - if err := visit(child, true); err != nil { - return err - } - - // this is not a leaf node: we need to go deeper... - if err := walkSecretsTree(ctx, client, child, visit); err != nil { - return err - } - } else { - // this is a leaf node: add it to the list - if err := visit(child, false); err != nil { - return err - } - } - } - - return nil -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
+ {{@codeBlock}} + +
\ No newline at end of file diff --git a/command/kv_list.go b/command/kv_list.go index d4733a137f59..d1de67e4def6 100644 --- a/command/kv_list.go +++ b/command/kv_list.go @@ -1,179 +1,33 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 +//go:build !enterprise + package command -import ( - "fmt" - "path" - "strings" +//go:generate go run github.com/hashicorp/vault/tools/stubmaker - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVListCommand)(nil) - _ cli.CommandAutocomplete = (*KVListCommand)(nil) +import ( + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/vault" ) -type KVListCommand struct { - *BaseCommand - flagMount string +func entInitCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions, commands map[string]cli.CommandFactory) { } -func (c *KVListCommand) Synopsis() string { - return "List data or secrets" +func entEnableFourClusterDev(c *ServerCommand, base *vault.CoreConfig, info map[string]string, infoKeys []string, tempDir string) int { + c.logger.Error("-dev-four-cluster only supported in enterprise Vault") + return 1 } -func (c *KVListCommand) Help() string { - helpText := ` - -Usage: vault kv list [options] PATH - - Lists data from Vault's key-value store at the given path. - - List values under the "my-app" folder of the key-value store: - - $ vault kv list secret/my-app/ - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - - return strings.TrimSpace(helpText) -} - -func (c *KVListCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - // Common Options - f := set.NewFlagSet("Common Options") - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV - v2 secrets.`, - }) - - return set +func entAdjustCoreConfig(config *server.Config, coreConfig *vault.CoreConfig) { } -func (c *KVListCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFolders() +func entCheckStorageType(coreConfig *vault.CoreConfig) bool { + return true } -func (c *KVListCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVListCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - if c.flagMount == "" { - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - } - args = []string{""} - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } - - // Add /metadata to v2 paths only - var fullPath string - if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "metadata", false) - } else { - // v1 - if mountFlagSyntax { - fullPath = path.Join(mountPath, partialPath) - } else { - fullPath = partialPath - } - } - - secret, err := client.Logical().List(fullPath) - if err != nil { - c.UI.Error(fmt.Sprintf("Error listing %s: %s", fullPath, err)) - return 2 - } - - // If the secret is wrapped, return the wrapped response. - if secret != nil && secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { - return OutputSecret(c.UI, secret) - } - - _, ok := extractListData(secret) - if Format(c.UI) != "table" { - if secret == nil || secret.Data == nil || !ok { - OutputData(c.UI, map[string]interface{}{}) - return 2 - } - } - - if secret == nil || secret.Data == nil { - c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) - return 2 - } - - if !ok { - c.UI.Error(fmt.Sprintf("No entries found at %s", fullPath)) - return 2 - } - - return OutputList(c.UI, secret) +func entGetFIPSInfoKey() string { + return "" } diff --git a/command/kv_metadata.go b/command/kv_metadata.go index 8c0d2ca59274..5ef880b78742 100644 --- a/command/kv_metadata.go +++ b/command/kv_metadata.go @@ -4,54 +4,331 @@ package command import ( + "context" + "encoding/base64" + "net" + "net/http" "strings" + "testing" + "time" - "github.com/mitchellh/cli" + "github.com/hashicorp/cli" + log "github.com/hashicorp/go-hclog" + kv "github.com/hashicorp/vault-plugin-secrets-kv" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/builtin/logical/pki" + "github.com/hashicorp/vault/builtin/logical/ssh" + "github.com/hashicorp/vault/builtin/logical/transit" + "github.com/hashicorp/vault/helper/benchhelpers" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" + + auditFile "github.com/hashicorp/vault/builtin/audit/file" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" + vaulthttp "github.com/hashicorp/vault/http" ) -var _ cli.Command = (*KVMetadataCommand)(nil) +var ( + defaultVaultLogger = log.NewNullLogger() + + defaultVaultCredentialBackends = map[string]logical.Factory{ + "userpass": credUserpass.Factory, + } + + defaultVaultAuditBackends = map[string]audit.Factory{ + "file": auditFile.Factory, + } + + defaultVaultLogicalBackends = map[string]logical.Factory{ + "generic-leased": vault.LeasedPassthroughBackendFactory, + "pki": pki.Factory, + "ssh": ssh.Factory, + "transit": transit.Factory, + "kv": kv.Factory, + } +) + +// assertNoTabs asserts the CLI help has no tab characters. +func assertNoTabs(tb testing.TB, c cli.Command) { + tb.Helper() + + if strings.ContainsRune(c.Help(), '\t') { + tb.Errorf("%#v help output contains tabs", c) + } +} + +// testVaultServer creates a test vault cluster and returns a configured API +// client and closer function. +func testVaultServer(tb testing.TB) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerUnseal(tb) + return client, closer +} + +func testVaultServerWithSecrets(ctx context.Context, tb testing.TB) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerUnseal(tb) + + // enable kv-v1 backend + if err := client.Sys().Mount("kv-v1/", &api.MountInput{ + Type: "kv-v1", + }); err != nil { + tb.Fatal(err) + } + + // enable kv-v2 backend + if err := client.Sys().Mount("kv-v2/", &api.MountInput{ + Type: "kv-v2", + }); err != nil { + tb.Fatal(err) + } + + // populate dummy secrets + for _, path := range []string{ + "foo", + "app-1/foo", + "app-1/bar", + "app-1/nested/baz", + } { + if err := client.KVv1("kv-v1").Put(ctx, path, map[string]interface{}{ + "user": "test", + "password": "Hashi123", + }); err != nil { + tb.Fatal(err) + } + + if _, err := client.KVv2("kv-v2").Put(ctx, path, map[string]interface{}{ + "user": "test", + "password": "Hashi123", + }); err != nil { + tb.Fatal(err) + } + } + + return client, closer +} + +func testVaultServerWithKVVersion(tb testing.TB, kvVersion string) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerUnsealWithKVVersionWithSeal(tb, kvVersion, nil) + return client, closer +} + +func testVaultServerAllBackends(tb testing.TB) (*api.Client, func()) { + tb.Helper() + + client, _, closer := testVaultServerCoreConfig(tb, &vault.CoreConfig{ + CredentialBackends: credentialBackends, + AuditBackends: auditBackends, + LogicalBackends: logicalBackends, + BuiltinRegistry: builtinplugins.Registry, + }) + return client, closer +} + +// testVaultServerAutoUnseal creates a test vault cluster and sets it up with auto unseal +// the function returns a client, the recovery keys, and a closer function +func testVaultServerAutoUnseal(tb testing.TB) (*api.Client, []string, func()) { + testSeal, _ := seal.NewTestSeal(nil) + autoSeal := vault.NewAutoSeal(testSeal) + return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", autoSeal) +} -type KVMetadataCommand struct { - *BaseCommand +// testVaultServerUnseal creates a test vault cluster and returns a configured +// API client, list of unseal keys (as strings), and a closer function. +func testVaultServerUnseal(tb testing.TB) (*api.Client, []string, func()) { + return testVaultServerUnsealWithKVVersionWithSeal(tb, "1", nil) } -func (c *KVMetadataCommand) Synopsis() string { - return "Interact with Vault's Key-Value storage" +func testVaultServerUnsealWithKVVersionWithSeal(tb testing.TB, kvVersion string, seal vault.Seal) (*api.Client, []string, func()) { + tb.Helper() + + return testVaultServerCoreConfigWithOpts(tb, &vault.CoreConfig{ + CredentialBackends: defaultVaultCredentialBackends, + AuditBackends: defaultVaultAuditBackends, + LogicalBackends: defaultVaultLogicalBackends, + BuiltinRegistry: builtinplugins.Registry, + Seal: seal, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + KVVersion: kvVersion, + }) } -func (c *KVMetadataCommand) Help() string { - helpText := ` -Usage: vault kv metadata [options] [args] +// testVaultServerUnseal creates a test vault cluster and returns a configured +// API client, list of unseal keys (as strings), and a closer function +// configured with the given plugin directory. +func testVaultServerPluginDir(tb testing.TB, pluginDir string) (*api.Client, []string, func()) { + tb.Helper() + + return testVaultServerCoreConfig(tb, &vault.CoreConfig{ + CredentialBackends: defaultVaultCredentialBackends, + AuditBackends: defaultVaultAuditBackends, + LogicalBackends: defaultVaultLogicalBackends, + PluginDirectory: pluginDir, + BuiltinRegistry: builtinplugins.Registry, + }) +} + +func testVaultServerCoreConfig(tb testing.TB, coreConfig *vault.CoreConfig) (*api.Client, []string, func()) { + return testVaultServerCoreConfigWithOpts(tb, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, // Default is 3, but we don't need that many + }) +} - This command has subcommands for interacting with the metadata endpoint in - Vault's key-value store. Here are some simple examples, and more detailed - examples are available in the subcommands or the documentation. +// testVaultServerCoreConfig creates a new vault cluster with the given core +// configuration. This is a lower-level test helper. If the seal config supports recovery keys, then +// recovery keys are returned. Otherwise, unseal keys are returned +func testVaultServerCoreConfigWithOpts(tb testing.TB, coreConfig *vault.CoreConfig, opts *vault.TestClusterOptions) (*api.Client, []string, func()) { + tb.Helper() - Create or update a metadata entry for a key: + cluster := vault.NewTestCluster(benchhelpers.TBtoT(tb), coreConfig, opts) + cluster.Start() - $ vault kv metadata put -mount=secret -max-versions=5 -delete-version-after=3h25m19s foo + // Make it easy to get access to the active + core := cluster.Cores[0].Core + vault.TestWaitActive(benchhelpers.TBtoT(tb), core) + + // Get the client already setup for us! + client := cluster.Cores[0].Client + client.SetToken(cluster.RootToken) + + var keys [][]byte + if coreConfig.Seal != nil && coreConfig.Seal.RecoveryKeySupported() { + keys = cluster.RecoveryKeys + } else { + keys = cluster.BarrierKeys + } + + return client, encodeKeys(keys), cluster.Cleanup +} + +// Convert the unseal keys to base64 encoded, since these are how the user +// will get them. +func encodeKeys(rawKeys [][]byte) []string { + keys := make([]string, len(rawKeys)) + for i := range rawKeys { + keys[i] = base64.StdEncoding.EncodeToString(rawKeys[i]) + } + return keys +} - Get the metadata for a key, this provides information about each existing - version: +// testVaultServerUninit creates an uninitialized server. +func testVaultServerUninit(tb testing.TB) (*api.Client, func()) { + tb.Helper() - $ vault kv metadata get -mount=secret foo + inm, err := inmem.NewInmem(nil, defaultVaultLogger) + if err != nil { + tb.Fatal(err) + } - Delete a key and all existing versions: + core, err := vault.NewCore(&vault.CoreConfig{ + DisableMlock: true, + Physical: inm, + CredentialBackends: defaultVaultCredentialBackends, + AuditBackends: defaultVaultAuditBackends, + LogicalBackends: defaultVaultLogicalBackends, + BuiltinRegistry: builtinplugins.Registry, + }) + if err != nil { + tb.Fatal(err) + } - $ vault kv metadata delete -mount=secret foo + ln, addr := vaulthttp.TestServer(tb, core) - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/metadata/foo) can cause confusion: - - $ vault kv metadata get secret/foo + client, err := api.NewClient(&api.Config{ + Address: addr, + }) + if err != nil { + tb.Fatal(err) + } - Please see the individual subcommand help for detailed usage information. -` + closer := func() { + core.Shutdown() + ln.Close() + } - return strings.TrimSpace(helpText) + return client, closer } -func (c *KVMetadataCommand) Run(args []string) int { - return cli.RunResultHelp +// testVaultServerBad creates an http server that returns a 500 on each request +// to simulate failures. +func testVaultServerBad(tb testing.TB) (*api.Client, func()) { + tb.Helper() + + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + tb.Fatal(err) + } + + server := &http.Server{ + Addr: "127.0.0.1:0", + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "500 internal server error", http.StatusInternalServerError) + }), + ReadTimeout: 1 * time.Second, + ReadHeaderTimeout: 1 * time.Second, + WriteTimeout: 1 * time.Second, + IdleTimeout: 1 * time.Second, + } + + go func() { + if err := server.Serve(listener); err != nil && err != http.ErrServerClosed { + tb.Fatal(err) + } + }() + + client, err := api.NewClient(&api.Config{ + Address: "http://" + listener.Addr().String(), + }) + if err != nil { + tb.Fatal(err) + } + + return client, func() { + ctx, done := context.WithTimeout(context.Background(), 5*time.Second) + defer done() + + server.Shutdown(ctx) + } +} + +// testTokenAndAccessor creates a new authentication token capable of being renewed with +// the default policy attached. It returns the token and it's accessor. +func testTokenAndAccessor(tb testing.TB, client *api.Client) (string, string) { + tb.Helper() + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"default"}, + TTL: "30m", + }) + if err != nil { + tb.Fatal(err) + } + if secret == nil || secret.Auth == nil || secret.Auth.ClientToken == "" { + tb.Fatalf("missing auth data: %#v", secret) + } + return secret.Auth.ClientToken, secret.Auth.Accessor +} + +func testClient(tb testing.TB, addr string, token string) *api.Client { + tb.Helper() + config := api.DefaultConfig() + config.Address = addr + client, err := api.NewClient(config) + if err != nil { + tb.Fatal(err) + } + client.SetToken(token) + + return client } diff --git a/command/kv_metadata_delete.go b/command/kv_metadata_delete.go index 8163cc6bde8e..aac2d57bc73f 100644 --- a/command/kv_metadata_delete.go +++ b/command/kv_metadata_delete.go @@ -4,149 +4,946 @@ package command import ( - "fmt" - "path" - "strings" + "os" + "os/signal" + "syscall" - "github.com/mitchellh/cli" - "github.com/posener/complete" -) + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/audit" + "github.com/hashicorp/vault/builtin/plugin" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + "github.com/hashicorp/vault/version" -var ( - _ cli.Command = (*KVMetadataDeleteCommand)(nil) - _ cli.CommandAutocomplete = (*KVMetadataDeleteCommand)(nil) -) + /* + The builtinplugins package is initialized here because it, in turn, + initializes the database plugins. + They register multiple database drivers for the "database/sql" package. + */ + _ "github.com/hashicorp/vault/helper/builtinplugins" -type KVMetadataDeleteCommand struct { - *BaseCommand - flagMount string -} + auditFile "github.com/hashicorp/vault/builtin/audit/file" + auditSocket "github.com/hashicorp/vault/builtin/audit/socket" + auditSyslog "github.com/hashicorp/vault/builtin/audit/syslog" -func (c *KVMetadataDeleteCommand) Synopsis() string { - return "Deletes all versions and metadata for a key in the KV store" -} + credAliCloud "github.com/hashicorp/vault-plugin-auth-alicloud" + credCentrify "github.com/hashicorp/vault-plugin-auth-centrify" + credCF "github.com/hashicorp/vault-plugin-auth-cf" + credGcp "github.com/hashicorp/vault-plugin-auth-gcp/plugin" + credOIDC "github.com/hashicorp/vault-plugin-auth-jwt" + credKerb "github.com/hashicorp/vault-plugin-auth-kerberos" + credOCI "github.com/hashicorp/vault-plugin-auth-oci" + credAws "github.com/hashicorp/vault/builtin/credential/aws" + credCert "github.com/hashicorp/vault/builtin/credential/cert" + credGitHub "github.com/hashicorp/vault/builtin/credential/github" + credLdap "github.com/hashicorp/vault/builtin/credential/ldap" + credOkta "github.com/hashicorp/vault/builtin/credential/okta" + credToken "github.com/hashicorp/vault/builtin/credential/token" + credUserpass "github.com/hashicorp/vault/builtin/credential/userpass" -func (c *KVMetadataDeleteCommand) Help() string { - helpText := ` -Usage: vault kv metadata delete [options] PATH + logicalKv "github.com/hashicorp/vault-plugin-secrets-kv" + logicalDb "github.com/hashicorp/vault/builtin/logical/database" - Deletes all versions and metadata for the provided key. + physAerospike "github.com/hashicorp/vault/physical/aerospike" + physAliCloudOSS "github.com/hashicorp/vault/physical/alicloudoss" + physAzure "github.com/hashicorp/vault/physical/azure" + physCassandra "github.com/hashicorp/vault/physical/cassandra" + physCockroachDB "github.com/hashicorp/vault/physical/cockroachdb" + physConsul "github.com/hashicorp/vault/physical/consul" + physCouchDB "github.com/hashicorp/vault/physical/couchdb" + physDynamoDB "github.com/hashicorp/vault/physical/dynamodb" + physEtcd "github.com/hashicorp/vault/physical/etcd" + physFoundationDB "github.com/hashicorp/vault/physical/foundationdb" + physGCS "github.com/hashicorp/vault/physical/gcs" + physManta "github.com/hashicorp/vault/physical/manta" + physMSSQL "github.com/hashicorp/vault/physical/mssql" + physMySQL "github.com/hashicorp/vault/physical/mysql" + physOCI "github.com/hashicorp/vault/physical/oci" + physPostgreSQL "github.com/hashicorp/vault/physical/postgresql" + physRaft "github.com/hashicorp/vault/physical/raft" + physS3 "github.com/hashicorp/vault/physical/s3" + physSpanner "github.com/hashicorp/vault/physical/spanner" + physSwift "github.com/hashicorp/vault/physical/swift" + physZooKeeper "github.com/hashicorp/vault/physical/zookeeper" + physFile "github.com/hashicorp/vault/sdk/physical/file" + physInmem "github.com/hashicorp/vault/sdk/physical/inmem" - $ vault kv metadata delete -mount=secret foo - - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/metadata/foo) can cause confusion: - - $ vault kv metadata delete secret/foo + sr "github.com/hashicorp/vault/serviceregistration" + csr "github.com/hashicorp/vault/serviceregistration/consul" + ksr "github.com/hashicorp/vault/serviceregistration/kubernetes" +) - Additional flags and more advanced use cases are detailed below. +const ( + // EnvVaultCLINoColor is an env var that toggles colored UI output. + EnvVaultCLINoColor = `VAULT_CLI_NO_COLOR` + // EnvVaultFormat is the output format + EnvVaultFormat = `VAULT_FORMAT` + // EnvVaultLicense is an env var used in Vault Enterprise to provide a license blob + EnvVaultLicense = "VAULT_LICENSE" + // EnvVaultLicensePath is an env var used in Vault Enterprise to provide a + // path to a license file on disk + EnvVaultLicensePath = "VAULT_LICENSE_PATH" + // EnvVaultDetailed is to output detailed information (e.g., ListResponseWithInfo). + EnvVaultDetailed = `VAULT_DETAILED` + // EnvVaultLogFormat is used to specify the log format. Supported values are "standard" and "json" + EnvVaultLogFormat = "VAULT_LOG_FORMAT" + // EnvVaultLogLevel is used to specify the log level applied to logging + // Supported log levels: Trace, Debug, Error, Warn, Info + EnvVaultLogLevel = "VAULT_LOG_LEVEL" + // EnvVaultExperiments defines the experiments to enable for a server as a + // comma separated list. See experiments.ValidExperiments() for the list of + // valid experiments. Not mutable or persisted in storage, only read and + // logged at startup _per node_. This was initially introduced for the events + // system being developed over multiple release cycles. + EnvVaultExperiments = "VAULT_EXPERIMENTS" -` + c.Flags().Help() + // flagNameAddress is the flag used in the base command to read in the + // address of the Vault server. + flagNameAddress = "address" + // flagnameCACert is the flag used in the base command to read in the CA + // cert. + flagNameCACert = "ca-cert" + // flagnameCAPath is the flag used in the base command to read in the CA + // cert path. + flagNameCAPath = "ca-path" + // flagNameClientCert is the flag used in the base command to read in the + // client key + flagNameClientKey = "client-key" + // flagNameClientCert is the flag used in the base command to read in the + // client cert + flagNameClientCert = "client-cert" + // flagNameTLSSkipVerify is the flag used in the base command to read in + // the option to ignore TLS certificate verification. + flagNameTLSSkipVerify = "tls-skip-verify" + // flagTLSServerName is the flag used in the base command to read in + // the TLS server name. + flagTLSServerName = "tls-server-name" + // flagNameAuditNonHMACRequestKeys is the flag name used for auth/secrets enable + flagNameAuditNonHMACRequestKeys = "audit-non-hmac-request-keys" + // flagNameAuditNonHMACResponseKeys is the flag name used for auth/secrets enable + flagNameAuditNonHMACResponseKeys = "audit-non-hmac-response-keys" + // flagNameDescription is the flag name used for tuning the secret and auth mount description parameter + flagNameDescription = "description" + // flagListingVisibility is the flag to toggle whether to show the mount in the UI-specific listing endpoint + flagNameListingVisibility = "listing-visibility" + // flagNamePassthroughRequestHeaders is the flag name used to set passthrough request headers to the backend + flagNamePassthroughRequestHeaders = "passthrough-request-headers" + // flagNameAllowedResponseHeaders is used to set allowed response headers from a plugin + flagNameAllowedResponseHeaders = "allowed-response-headers" + // flagNameTokenType is the flag name used to force a specific token type + flagNameTokenType = "token-type" + // flagNameAllowedManagedKeys is the flag name used for auth/secrets enable + flagNameAllowedManagedKeys = "allowed-managed-keys" + // flagNamePluginVersion selects what version of a plugin should be used. + flagNamePluginVersion = "plugin-version" + // flagNameUserLockoutThreshold is the flag name used for tuning the auth mount lockout threshold parameter + flagNameUserLockoutThreshold = "user-lockout-threshold" + // flagNameUserLockoutDuration is the flag name used for tuning the auth mount lockout duration parameter + flagNameUserLockoutDuration = "user-lockout-duration" + // flagNameUserLockoutCounterResetDuration is the flag name used for tuning the auth mount lockout counter reset parameter + flagNameUserLockoutCounterResetDuration = "user-lockout-counter-reset-duration" + // flagNameUserLockoutDisable is the flag name used for tuning the auth mount disable lockout parameter + flagNameUserLockoutDisable = "user-lockout-disable" + // flagNameDisableRedirects is used to prevent the client from honoring a single redirect as a response to a request + flagNameDisableRedirects = "disable-redirects" + // flagNameCombineLogs is used to specify whether log output should be combined and sent to stdout + flagNameCombineLogs = "combine-logs" + // flagNameLogFile is used to specify the path to the log file that Vault should use for logging + flagNameLogFile = "log-file" + // flagNameLogRotateBytes is the flag used to specify the number of bytes a log file should be before it is rotated. + flagNameLogRotateBytes = "log-rotate-bytes" + // flagNameLogRotateDuration is the flag used to specify the duration after which a log file should be rotated. + flagNameLogRotateDuration = "log-rotate-duration" + // flagNameLogRotateMaxFiles is the flag used to specify the maximum number of older/archived log files to keep. + flagNameLogRotateMaxFiles = "log-rotate-max-files" + // flagNameLogFormat is the flag used to specify the log format. Supported values are "standard" and "json" + flagNameLogFormat = "log-format" + // flagNameLogLevel is used to specify the log level applied to logging + // Supported log levels: Trace, Debug, Error, Warn, Info + flagNameLogLevel = "log-level" + // flagNameDelegatedAuthAccessors allows operators to specify the allowed mount accessors a backend can delegate + // authentication + flagNameDelegatedAuthAccessors = "delegated-auth-accessors" +) - return strings.TrimSpace(helpText) -} +var ( + auditBackends = map[string]audit.Factory{ + "file": auditFile.Factory, + "socket": auditSocket.Factory, + "syslog": auditSyslog.Factory, + } -func (c *KVMetadataDeleteCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP) + credentialBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + } - // Common Options - f := set.NewFlagSet("Common Options") + logicalBackends = map[string]logical.Factory{ + "plugin": plugin.Factory, + "database": logicalDb.Factory, + // This is also available in the plugin catalog, but is here due to the need to + // automatically mount it. + "kv": logicalKv.Factory, + } - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /metadata/ automatically appended between KV - v2 secrets.`, - }) + physicalBackends = map[string]physical.Factory{ + "aerospike": physAerospike.NewAerospikeBackend, + "alicloudoss": physAliCloudOSS.NewAliCloudOSSBackend, + "azure": physAzure.NewAzureBackend, + "cassandra": physCassandra.NewCassandraBackend, + "cockroachdb": physCockroachDB.NewCockroachDBBackend, + "consul": physConsul.NewConsulBackend, + "couchdb_transactional": physCouchDB.NewTransactionalCouchDBBackend, + "couchdb": physCouchDB.NewCouchDBBackend, + "dynamodb": physDynamoDB.NewDynamoDBBackend, + "etcd": physEtcd.NewEtcdBackend, + "file_transactional": physFile.NewTransactionalFileBackend, + "file": physFile.NewFileBackend, + "foundationdb": physFoundationDB.NewFDBBackend, + "gcs": physGCS.NewBackend, + "inmem_ha": physInmem.NewInmemHA, + "inmem_transactional_ha": physInmem.NewTransactionalInmemHA, + "inmem_transactional": physInmem.NewTransactionalInmem, + "inmem": physInmem.NewInmem, + "manta": physManta.NewMantaBackend, + "mssql": physMSSQL.NewMSSQLBackend, + "mysql": physMySQL.NewMySQLBackend, + "oci": physOCI.NewBackend, + "postgresql": physPostgreSQL.NewPostgreSQLBackend, + "s3": physS3.NewS3Backend, + "spanner": physSpanner.NewBackend, + "swift": physSwift.NewSwiftBackend, + "raft": physRaft.NewRaftBackend, + "zookeeper": physZooKeeper.NewZooKeeperBackend, + } - return set -} + serviceRegistrations = map[string]sr.Factory{ + "consul": csr.NewServiceRegistration, + "kubernetes": ksr.NewServiceRegistration, + } -func (c *KVMetadataDeleteCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFiles() -} + loginHandlers = map[string]LoginHandler{ + "alicloud": &credAliCloud.CLIHandler{}, + "aws": &credAws.CLIHandler{}, + "centrify": &credCentrify.CLIHandler{}, + "cert": &credCert.CLIHandler{}, + "cf": &credCF.CLIHandler{}, + "gcp": &credGcp.CLIHandler{}, + "github": &credGitHub.CLIHandler{}, + "kerberos": &credKerb.CLIHandler{}, + "ldap": &credLdap.CLIHandler{}, + "oci": &credOCI.CLIHandler{}, + "oidc": &credOIDC.CLIHandler{}, + "okta": &credOkta.CLIHandler{}, + "pcf": &credCF.CLIHandler{}, // Deprecated. + "radius": &credUserpass.CLIHandler{ + DefaultMount: "radius", + }, + "token": &credToken.CLIHandler{}, + "userpass": &credUserpass.CLIHandler{ + DefaultMount: "userpass", + }, + } +) -func (c *KVMetadataDeleteCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} +func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.CommandFactory { + getBaseCommand := func() *BaseCommand { + return &BaseCommand{ + UI: ui, + tokenHelper: runOpts.TokenHelper, + flagAddress: runOpts.Address, + client: runOpts.Client, + } + } -func (c *KVMetadataDeleteCommand) Run(args []string) int { - f := c.Flags() + commands := map[string]cli.CommandFactory{ + "agent": func() (cli.Command, error) { + return &AgentCommand{ + BaseCommand: &BaseCommand{ + UI: serverCmdUi, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + }, nil + }, + "agent generate-config": func() (cli.Command, error) { + return &AgentGenerateConfigCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "audit": func() (cli.Command, error) { + return &AuditCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "audit disable": func() (cli.Command, error) { + return &AuditDisableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "audit enable": func() (cli.Command, error) { + return &AuditEnableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "audit list": func() (cli.Command, error) { + return &AuditListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth tune": func() (cli.Command, error) { + return &AuthTuneCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth": func() (cli.Command, error) { + return &AuthCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth disable": func() (cli.Command, error) { + return &AuthDisableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth enable": func() (cli.Command, error) { + return &AuthEnableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth help": func() (cli.Command, error) { + return &AuthHelpCommand{ + BaseCommand: getBaseCommand(), + Handlers: loginHandlers, + }, nil + }, + "auth list": func() (cli.Command, error) { + return &AuthListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "auth move": func() (cli.Command, error) { + return &AuthMoveCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "debug": func() (cli.Command, error) { + return &DebugCommand{ + BaseCommand: getBaseCommand(), + ShutdownCh: MakeShutdownCh(), + }, nil + }, + "delete": func() (cli.Command, error) { + return &DeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "events subscribe": func() (cli.Command, error) { + return &EventsSubscribeCommands{ + BaseCommand: getBaseCommand(), + }, nil + }, + "lease": func() (cli.Command, error) { + return &LeaseCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "lease renew": func() (cli.Command, error) { + return &LeaseRenewCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "lease lookup": func() (cli.Command, error) { + return &LeaseLookupCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "lease revoke": func() (cli.Command, error) { + return &LeaseRevokeCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "list": func() (cli.Command, error) { + return &ListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "login": func() (cli.Command, error) { + return &LoginCommand{ + BaseCommand: getBaseCommand(), + Handlers: loginHandlers, + }, nil + }, + "namespace": func() (cli.Command, error) { + return &NamespaceCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace list": func() (cli.Command, error) { + return &NamespaceListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace lookup": func() (cli.Command, error) { + return &NamespaceLookupCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace create": func() (cli.Command, error) { + return &NamespaceCreateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace patch": func() (cli.Command, error) { + return &NamespacePatchCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace delete": func() (cli.Command, error) { + return &NamespaceDeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace lock": func() (cli.Command, error) { + return &NamespaceAPILockCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "namespace unlock": func() (cli.Command, error) { + return &NamespaceAPIUnlockCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator": func() (cli.Command, error) { + return &OperatorCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator diagnose": func() (cli.Command, error) { + return &OperatorDiagnoseCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator generate-root": func() (cli.Command, error) { + return &OperatorGenerateRootCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator init": func() (cli.Command, error) { + return &OperatorInitCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator key-status": func() (cli.Command, error) { + return &OperatorKeyStatusCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator migrate": func() (cli.Command, error) { + return &OperatorMigrateCommand{ + BaseCommand: getBaseCommand(), + PhysicalBackends: physicalBackends, + ShutdownCh: MakeShutdownCh(), + }, nil + }, + "operator raft": func() (cli.Command, error) { + return &OperatorRaftCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft autopilot get-config": func() (cli.Command, error) { + return &OperatorRaftAutopilotGetConfigCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft autopilot set-config": func() (cli.Command, error) { + return &OperatorRaftAutopilotSetConfigCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft autopilot state": func() (cli.Command, error) { + return &OperatorRaftAutopilotStateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft list-peers": func() (cli.Command, error) { + return &OperatorRaftListPeersCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft join": func() (cli.Command, error) { + return &OperatorRaftJoinCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft remove-peer": func() (cli.Command, error) { + return &OperatorRaftRemovePeerCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot": func() (cli.Command, error) { + return &OperatorRaftSnapshotCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot inspect": func() (cli.Command, error) { + return &OperatorRaftSnapshotInspectCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot restore": func() (cli.Command, error) { + return &OperatorRaftSnapshotRestoreCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator raft snapshot save": func() (cli.Command, error) { + return &OperatorRaftSnapshotSaveCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator rekey": func() (cli.Command, error) { + return &OperatorRekeyCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator rotate": func() (cli.Command, error) { + return &OperatorRotateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator seal": func() (cli.Command, error) { + return &OperatorSealCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator step-down": func() (cli.Command, error) { + return &OperatorStepDownCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator usage": func() (cli.Command, error) { + return &OperatorUsageCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator unseal": func() (cli.Command, error) { + return &OperatorUnsealCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "operator members": func() (cli.Command, error) { + return &OperatorMembersCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "patch": func() (cli.Command, error) { + return &PatchCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "path-help": func() (cli.Command, error) { + return &PathHelpCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki": func() (cli.Command, error) { + return &PKICommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki health-check": func() (cli.Command, error) { + return &PKIHealthCheckCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki issue": func() (cli.Command, error) { + return &PKIIssueCACommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki list-intermediates": func() (cli.Command, error) { + return &PKIListIntermediateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki reissue": func() (cli.Command, error) { + return &PKIReIssueCACommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "pki verify-sign": func() (cli.Command, error) { + return &PKIVerifySignCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin": func() (cli.Command, error) { + return &PluginCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin deregister": func() (cli.Command, error) { + return &PluginDeregisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin info": func() (cli.Command, error) { + return &PluginInfoCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin list": func() (cli.Command, error) { + return &PluginListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin register": func() (cli.Command, error) { + return &PluginRegisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin reload": func() (cli.Command, error) { + return &PluginReloadCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin reload-status": func() (cli.Command, error) { + return &PluginReloadStatusCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime": func() (cli.Command, error) { + return &PluginRuntimeCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime register": func() (cli.Command, error) { + return &PluginRuntimeRegisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime deregister": func() (cli.Command, error) { + return &PluginRuntimeDeregisterCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime info": func() (cli.Command, error) { + return &PluginRuntimeInfoCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "plugin runtime list": func() (cli.Command, error) { + return &PluginRuntimeListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "proxy": func() (cli.Command, error) { + return &ProxyCommand{ + BaseCommand: &BaseCommand{ + UI: serverCmdUi, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + }, nil + }, + "policy": func() (cli.Command, error) { + return &PolicyCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy delete": func() (cli.Command, error) { + return &PolicyDeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy fmt": func() (cli.Command, error) { + return &PolicyFmtCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy list": func() (cli.Command, error) { + return &PolicyListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy read": func() (cli.Command, error) { + return &PolicyReadCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "policy write": func() (cli.Command, error) { + return &PolicyWriteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "print": func() (cli.Command, error) { + return &PrintCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "print token": func() (cli.Command, error) { + return &PrintTokenCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "read": func() (cli.Command, error) { + return &ReadCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets": func() (cli.Command, error) { + return &SecretsCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets disable": func() (cli.Command, error) { + return &SecretsDisableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets enable": func() (cli.Command, error) { + return &SecretsEnableCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets list": func() (cli.Command, error) { + return &SecretsListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets move": func() (cli.Command, error) { + return &SecretsMoveCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "secrets tune": func() (cli.Command, error) { + return &SecretsTuneCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "server": func() (cli.Command, error) { + return &ServerCommand{ + BaseCommand: &BaseCommand{ + UI: serverCmdUi, + tokenHelper: runOpts.TokenHelper, + flagAddress: runOpts.Address, + }, + AuditBackends: auditBackends, + CredentialBackends: credentialBackends, + LogicalBackends: logicalBackends, + PhysicalBackends: physicalBackends, - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } + ServiceRegistrations: serviceRegistrations, - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + }, nil + }, + "ssh": func() (cli.Command, error) { + return &SSHCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "status": func() (cli.Command, error) { + return &StatusCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform": func() (cli.Command, error) { + return &TransformCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform import": func() (cli.Command, error) { + return &TransformImportCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transform import-version": func() (cli.Command, error) { + return &TransformImportVersionCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit": func() (cli.Command, error) { + return &TransitCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit import": func() (cli.Command, error) { + return &TransitImportCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "transit import-version": func() (cli.Command, error) { + return &TransitImportVersionCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token": func() (cli.Command, error) { + return &TokenCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token create": func() (cli.Command, error) { + return &TokenCreateCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token capabilities": func() (cli.Command, error) { + return &TokenCapabilitiesCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token lookup": func() (cli.Command, error) { + return &TokenLookupCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token renew": func() (cli.Command, error) { + return &TokenRenewCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "token revoke": func() (cli.Command, error) { + return &TokenRevokeCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "unwrap": func() (cli.Command, error) { + return &UnwrapCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "version": func() (cli.Command, error) { + return &VersionCommand{ + VersionInfo: version.GetVersion(), + BaseCommand: getBaseCommand(), + }, nil + }, + "version-history": func() (cli.Command, error) { + return &VersionHistoryCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "write": func() (cli.Command, error) { + return &WriteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv": func() (cli.Command, error) { + return &KVCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv put": func() (cli.Command, error) { + return &KVPutCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv patch": func() (cli.Command, error) { + return &KVPatchCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv rollback": func() (cli.Command, error) { + return &KVRollbackCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv get": func() (cli.Command, error) { + return &KVGetCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv delete": func() (cli.Command, error) { + return &KVDeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv list": func() (cli.Command, error) { + return &KVListCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv destroy": func() (cli.Command, error) { + return &KVDestroyCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv undelete": func() (cli.Command, error) { + return &KVUndeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv enable-versioning": func() (cli.Command, error) { + return &KVEnableVersioningCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata": func() (cli.Command, error) { + return &KVMetadataCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata put": func() (cli.Command, error) { + return &KVMetadataPutCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata patch": func() (cli.Command, error) { + return &KVMetadataPatchCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata get": func() (cli.Command, error) { + return &KVMetadataGetCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "kv metadata delete": func() (cli.Command, error) { + return &KVMetadataDeleteCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, + "monitor": func() (cli.Command, error) { + return &MonitorCommand{ + BaseCommand: getBaseCommand(), + ShutdownCh: MakeShutdownCh(), + }, nil + }, } - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } + entInitCommands(ui, serverCmdUi, runOpts, commands) + return commands +} - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } +// MakeShutdownCh returns a channel that can be used for shutdown +// notifications for commands. This channel will send a message for every +// SIGINT or SIGTERM received. +func MakeShutdownCh() chan struct{} { + resultCh := make(chan struct{}) - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } + shutdownCh := make(chan os.Signal, 4) + signal.Notify(shutdownCh, os.Interrupt, syscall.SIGTERM) + go func() { + <-shutdownCh + close(resultCh) + }() + return resultCh +} - if !v2 { - c.UI.Error("Metadata not supported on KV Version 1") - return 1 - } +// MakeSighupCh returns a channel that can be used for SIGHUP +// reloading. This channel will send a message for every +// SIGHUP received. +func MakeSighupCh() chan struct{} { + resultCh := make(chan struct{}) - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) - if secret, err := client.Logical().Delete(fullPath); err != nil { - c.UI.Error(fmt.Sprintf("Error deleting %s: %s", fullPath, err)) - if secret != nil { - OutputSecret(c.UI, secret) + signalCh := make(chan os.Signal, 4) + signal.Notify(signalCh, syscall.SIGHUP) + go func() { + for { + <-signalCh + resultCh <- struct{}{} } - return 2 - } - - c.UI.Info(fmt.Sprintf("Success! Data deleted (if it existed) at: %s", fullPath)) - return 0 + }() + return resultCh } diff --git a/command/kv_metadata_get.go b/command/kv_metadata_get.go index 54d73c0b3a22..1858da83be36 100644 --- a/command/kv_metadata_get.go +++ b/command/kv_metadata_get.go @@ -1,197 +1,19 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "path" - "sort" - "strconv" - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVMetadataGetCommand)(nil) - _ cli.CommandAutocomplete = (*KVMetadataGetCommand)(nil) -) - -type KVMetadataGetCommand struct { - *BaseCommand - flagMount string -} - -func (c *KVMetadataGetCommand) Synopsis() string { - return "Retrieves key metadata from the KV store" -} - -func (c *KVMetadataGetCommand) Help() string { - helpText := ` -Usage: vault kv metadata get [options] KEY - - Retrieves the metadata from Vault's key-value store at the given key name. If no - key exists with that name, an error is returned. - - $ vault kv metadata get -mount=secret foo - - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/metadata/foo) can cause confusion: - - $ vault kv metadata get secret/foo - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *KVMetadataGetCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - // Common Options - f := set.NewFlagSet("Common Options") - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /metadata/ automatically appended between KV - v2 secrets.`, - }) - - return set -} - -func (c *KVMetadataGetCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *KVMetadataGetCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVMetadataGetCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } - - if !v2 { - c.UI.Error("Metadata not supported on KV Version 1") - return 1 - } - - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) - secret, err := client.Logical().Read(fullPath) - if err != nil { - c.UI.Error(fmt.Sprintf("Error reading %s: %s", fullPath, err)) - return 2 - } - if secret == nil { - c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) - return 2 - } - - if c.flagField != "" { - return PrintRawField(c.UI, secret, c.flagField) - } - - // If we have wrap info print the secret normally. - if secret.WrapInfo != nil || c.flagFormat != "table" { - return OutputSecret(c.UI, secret) - } - - versionsRaw, ok := secret.Data["versions"] - if !ok || versionsRaw == nil { - c.UI.Error(fmt.Sprintf("No value found at %s", fullPath)) - OutputSecret(c.UI, secret) - return 2 - } - versions := versionsRaw.(map[string]interface{}) - - delete(secret.Data, "versions") - - outputPath(c.UI, fullPath, "Metadata Path") - - c.UI.Info(getHeaderForMap("Metadata", secret.Data)) - OutputSecret(c.UI, secret) - - versionKeys := []int{} - for k := range versions { - i, err := strconv.Atoi(k) - if err != nil { - c.UI.Error(fmt.Sprintf("Error parsing version %s", k)) - return 2 - } - - versionKeys = append(versionKeys, i) - } - - sort.Ints(versionKeys) - - for _, v := range versionKeys { - c.UI.Info("\n" + getHeaderForMap(fmt.Sprintf("Version %d", v), versions[strconv.Itoa(v)].(map[string]interface{}))) - OutputData(c.UI, versions[strconv.Itoa(v)]) - } - - return 0 -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
+

+ Create a path filter config for + {{this.model.config.id}} +

+
+
+ +
+ + + + + \ No newline at end of file diff --git a/command/kv_metadata_patch.go b/command/kv_metadata_patch.go index 74c46476d0de..03b5dbd18de0 100644 --- a/command/kv_metadata_patch.go +++ b/command/kv_metadata_patch.go @@ -1,262 +1,20 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "context" - "fmt" - "io" - "path" - "strings" - "time" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVMetadataPutCommand)(nil) - _ cli.CommandAutocomplete = (*KVMetadataPutCommand)(nil) -) - -type KVMetadataPatchCommand struct { - *BaseCommand - - flagMaxVersions int - flagCASRequired BoolPtr - flagDeleteVersionAfter time.Duration - flagCustomMetadata map[string]string - flagRemoveCustomMetadata []string - flagMount string - testStdin io.Reader // for tests -} - -func (c *KVMetadataPatchCommand) Synopsis() string { - return "Patches key settings in the KV store" -} - -func (c *KVMetadataPatchCommand) Help() string { - helpText := ` -Usage: vault kv metadata patch [options] KEY - - This command can be used to create a blank key in the key-value store or to - update key configuration for a specified key. - - Create a key in the key-value store with no data: - - $ vault kv metadata patch -mount=secret foo - - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/metadata/foo) can cause confusion: - - $ vault kv metadata patch secret/foo - - Set a max versions setting on the key: - - $ vault kv metadata patch -mount=secret -max-versions=5 foo - - Set delete-version-after on the key: - - $ vault kv metadata patch -mount=secret -delete-version-after=3h25m19s foo - - Require Check-and-Set for this key: - - $ vault kv metadata patch -mount=secret -cas-required foo - - Set custom metadata on the key: - - $ vault kv metadata patch -mount=secret -custom-metadata=foo=abc -custom-metadata=bar=123 foo - - To remove custom meta data from the corresponding path in the key-value store, kv metadata patch can be used. - - $ vault kv metadata patch -mount=secret -remove-custom-metadata=bar foo - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *KVMetadataPatchCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - // Common Options - f := set.NewFlagSet("Common Options") - - f.IntVar(&IntVar{ - Name: "max-versions", - Target: &c.flagMaxVersions, - Default: -1, - Usage: `The number of versions to keep. If not set, the backend’s configured max version is used.`, - }) - - f.BoolPtrVar(&BoolPtrVar{ - Name: "cas-required", - Target: &c.flagCASRequired, - Usage: `If true the key will require the cas parameter to be set on all write requests. If false, the backend’s configuration will be used.`, - }) - - f.DurationVar(&DurationVar{ - Name: "delete-version-after", - Target: &c.flagDeleteVersionAfter, - Default: -1, - EnvVar: "", - Completion: complete.PredictAnything, - Usage: `Specifies the length of time before a version is deleted. - If not set, the backend's configured delete-version-after is used. Cannot be - greater than the backend's delete-version-after. The delete-version-after is - specified as a numeric string with a suffix like "30s" or - "3h25m19s".`, - }) - - f.StringMapVar(&StringMapVar{ - Name: "custom-metadata", - Target: &c.flagCustomMetadata, - Default: map[string]string{}, - Usage: `Specifies arbitrary version-agnostic key=value metadata meant to describe a secret. - This can be specified multiple times to add multiple pieces of metadata.`, - }) - - f.StringSliceVar(&StringSliceVar{ - Name: "remove-custom-metadata", - Target: &c.flagRemoveCustomMetadata, - Default: []string{}, - Usage: "Key to remove from custom metadata. To specify multiple values, specify this flag multiple times.", - }) - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /metadata/ automatically appended between KV - v2 secrets.`, - }) - - return set -} - -func (c *KVMetadataPatchCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *KVMetadataPatchCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVMetadataPatchCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } - if !v2 { - c.UI.Error("Metadata not supported on KV Version 1") - return 1 - } - - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) - - data := make(map[string]interface{}, 0) - - if c.flagMaxVersions >= 0 { - data["max_versions"] = c.flagMaxVersions - } - - if c.flagCASRequired.IsSet() { - data["cas_required"] = c.flagCASRequired.Get() - } - - if c.flagDeleteVersionAfter >= 0 { - data["delete_version_after"] = c.flagDeleteVersionAfter.String() - } - - customMetadata := make(map[string]interface{}) - - for key, value := range c.flagCustomMetadata { - customMetadata[key] = value - } - - for _, key := range c.flagRemoveCustomMetadata { - // A null in a JSON merge patch payload will remove the associated key - customMetadata[key] = nil - } - - data["custom_metadata"] = customMetadata - - secret, err := client.Logical().JSONMergePatch(context.Background(), fullPath, data) - if err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", fullPath, err)) - - if secret != nil { - OutputSecret(c.UI, secret) - } - return 2 - } - - if secret == nil { - // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { - c.UI.Info(fmt.Sprintf("Success! Data written to: %s", fullPath)) - } - return 0 - } - - return OutputSecret(c.UI, secret) -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + +
+

+ Edit path filter config for + {{this.model.config.id}} +

+
+
+ +
+ + + + + \ No newline at end of file diff --git a/command/kv_metadata_patch_test.go b/command/kv_metadata_patch_test.go index a8dc583780c5..f674c7df3625 100644 --- a/command/kv_metadata_patch_test.go +++ b/command/kv_metadata_patch_test.go @@ -1,299 +1,17 @@ // Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 +// SPDX-License-Identifier: MPL-2.0 -package command +package pluginruntimeutil -import ( - "encoding/json" - "io" - "strings" - "testing" +import "github.com/hashicorp/vault/sdk/helper/consts" - "github.com/go-test/deep" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" -) - -func testKVMetadataPatchCommand(tb testing.TB) (*cli.MockUi, *KVMetadataPatchCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &KVMetadataPatchCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} - -func kvMetadataPatchWithRetry(t *testing.T, client *api.Client, args []string, stdin *io.PipeReader) (int, string) { - t.Helper() - - return retryKVCommand(t, func() (int, string) { - ui, cmd := testKVMetadataPatchCommand(t) - cmd.client = client - - if stdin != nil { - cmd.testStdin = stdin - } - - code := cmd.Run(args) - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - - return code, combined - }) -} - -func kvMetadataPutWithRetry(t *testing.T, client *api.Client, args []string, stdin *io.PipeReader) (int, string) { - t.Helper() - - return retryKVCommand(t, func() (int, string) { - ui, cmd := testKVMetadataPutCommand(t) - cmd.client = client - - if stdin != nil { - cmd.testStdin = stdin - } - - code := cmd.Run(args) - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - - return code, combined - }) -} - -func TestKvMetadataPatchCommand_EmptyArgs(t *testing.T) { - client, closer := testVaultServer(t) - defer closer() - - if err := client.Sys().Mount("kv/", &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatalf("kv-v2 mount error: %#v", err) - } - - args := make([]string, 0) - code, combined := kvMetadataPatchWithRetry(t, client, args, nil) - - expectedCode := 1 - expectedOutput := "Not enough arguments" - - if code != expectedCode { - t.Fatalf("expected code to be %d but was %d for patch cmd with args %#v", expectedCode, code, args) - } - - if !strings.Contains(combined, expectedOutput) { - t.Fatalf("expected output to be %q but was %q for patch cmd with args %#v", expectedOutput, combined, args) - } -} - -func TestKvMetadataPatchCommand_Flags(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - args []string - out string - code int - expectedUpdates map[string]interface{} - }{ - { - "cas_required_success", - []string{"-cas-required=true"}, - "Success!", - 0, - map[string]interface{}{ - "cas_required": true, - }, - }, - { - "cas_required_invalid", - []string{"-cas-required=12345"}, - "invalid boolean value", - 1, - map[string]interface{}{}, - }, - { - "custom_metadata_success", - []string{"-custom-metadata=baz=ghi"}, - "Success!", - 0, - map[string]interface{}{ - "custom_metadata": map[string]interface{}{ - "foo": "abc", - "bar": "def", - "baz": "ghi", - }, - }, - }, - { - "remove-custom_metadata", - []string{"-custom-metadata=baz=ghi", "-remove-custom-metadata=foo"}, - "Success!", - 0, - map[string]interface{}{ - "custom_metadata": map[string]interface{}{ - "bar": "def", - "baz": "ghi", - }, - }, - }, - { - "remove-custom_metadata-multiple", - []string{"-custom-metadata=baz=ghi", "-remove-custom-metadata=foo", "-remove-custom-metadata=bar"}, - "Success!", - 0, - map[string]interface{}{ - "custom_metadata": map[string]interface{}{ - "baz": "ghi", - }, - }, - }, - { - "delete_version_after_success", - []string{"-delete-version-after=5s"}, - "Success!", - 0, - map[string]interface{}{ - "delete_version_after": "5s", - }, - }, - { - "delete_version_after_invalid", - []string{"-delete-version-after=false"}, - "invalid duration", - 1, - map[string]interface{}{}, - }, - { - "max_versions_success", - []string{"-max-versions=10"}, - "Success!", - 0, - map[string]interface{}{ - "max_versions": json.Number("10"), - }, - }, - { - "max_versions_invalid", - []string{"-max-versions=false"}, - "invalid syntax", - 1, - map[string]interface{}{}, - }, - { - "multiple_flags_success", - []string{"-max-versions=20", "-custom-metadata=baz=123"}, - "Success!", - 0, - map[string]interface{}{ - "max_versions": json.Number("20"), - "custom_metadata": map[string]interface{}{ - "foo": "abc", - "bar": "def", - "baz": "123", - }, - }, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - client, closer := testVaultServer(t) - defer closer() - - basePath := t.Name() + "/" - secretPath := basePath + "my-secret" - metadataPath := basePath + "metadata/" + "my-secret" - - if err := client.Sys().Mount(basePath, &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatalf("kv-v2 mount error: %#v", err) - } - - putArgs := []string{"-cas-required=true", "-custom-metadata=foo=abc", "-custom-metadata=bar=def", secretPath} - code, combined := kvMetadataPutWithRetry(t, client, putArgs, nil) - - if code != 0 { - t.Fatalf("initial metadata put failed, code: %d, output: %s", code, combined) - } - - initialMetadata, err := client.Logical().Read(metadataPath) - if err != nil { - t.Fatalf("metadata read failed, err: %#v", err) - } - - patchArgs := append(tc.args, secretPath) - - code, combined = kvMetadataPatchWithRetry(t, client, patchArgs, nil) - - if !strings.Contains(combined, tc.out) { - t.Fatalf("expected output to be %q but was %q for patch cmd with args %#v", tc.out, combined, patchArgs) - } - if code != tc.code { - t.Fatalf("expected code to be %d but was %d for patch cmd with args %#v", tc.code, code, patchArgs) - } - - patchedMetadata, err := client.Logical().Read(metadataPath) - if err != nil { - t.Fatalf("metadata read failed, err: %#v", err) - } - - for k, v := range patchedMetadata.Data { - var expectedVal interface{} - - if inputVal, ok := tc.expectedUpdates[k]; ok { - expectedVal = inputVal - } else { - expectedVal = initialMetadata.Data[k] - } - - if diff := deep.Equal(expectedVal, v); len(diff) > 0 { - t.Fatalf("patched %q mismatch, diff: %#v", k, diff) - } - } - }) - } -} - -func TestKvMetadataPatchCommand_CasWarning(t *testing.T) { - client, closer := testVaultServer(t) - defer closer() - - basePath := "kv/" - if err := client.Sys().Mount(basePath, &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatalf("kv-v2 mount error: %#v", err) - } - - secretPath := basePath + "my-secret" - - args := []string{"-cas-required=true", secretPath} - code, combined := kvMetadataPutWithRetry(t, client, args, nil) - - if code != 0 { - t.Fatalf("metadata put failed, code: %d, output: %s", code, combined) - } - - casConfig := map[string]interface{}{ - "cas_required": true, - } - - _, err := client.Logical().Write(basePath+"config", casConfig) - if err != nil { - t.Fatalf("config write failed, err: #%v", err) - } - - args = []string{"-cas-required=false", secretPath} - code, combined = kvMetadataPatchWithRetry(t, client, args, nil) - - if code != 0 { - t.Fatalf("expected code to be 0 but was %d for patch cmd with args %#v", code, args) - } - - expectedOutput := "\"cas_required\" set to false, but is mandated by backend config" - if !strings.Contains(combined, expectedOutput) { - t.Fatalf("expected output to be %q but was %q for patch cmd with args %#v", expectedOutput, combined, args) - } +// PluginRuntimeConfig defines the metadata needed to run a plugin runtime +type PluginRuntimeConfig struct { + Name string `json:"name" structs:"name"` + Type consts.PluginRuntimeType `json:"type" structs:"type"` + OCIRuntime string `json:"oci_runtime" structs:"oci_runtime"` + CgroupParent string `json:"cgroup_parent" structs:"cgroup_parent"` + CPU int64 `json:"cpu" structs:"cpu"` + Memory int64 `json:"memory" structs:"memory"` + Rootless bool `json:"rootless" structs:"rootlesss"` } diff --git a/command/kv_metadata_put.go b/command/kv_metadata_put.go index 1cd8375320a1..a479656adc4d 100644 --- a/command/kv_metadata_put.go +++ b/command/kv_metadata_put.go @@ -1,238 +1,147 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "io" - "path" - "strings" - "time" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVMetadataPutCommand)(nil) - _ cli.CommandAutocomplete = (*KVMetadataPutCommand)(nil) -) - -type KVMetadataPutCommand struct { - *BaseCommand - - flagMaxVersions int - flagCASRequired BoolPtr - flagDeleteVersionAfter time.Duration - flagCustomMetadata map[string]string - flagMount string - testStdin io.Reader // for tests -} - -func (c *KVMetadataPutCommand) Synopsis() string { - return "Sets or updates key settings in the KV store" -} - -func (c *KVMetadataPutCommand) Help() string { - helpText := ` -Usage: vault kv metadata put [options] KEY - - This command can be used to create a blank key in the key-value store or to - update key configuration for a specified key. - - Create a key in the key-value store with no data: - - $ vault kv metadata put -mount=secret foo - - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/metadata/foo) can cause confusion: - - $ vault kv metadata put secret/foo - - Set a max versions setting on the key: - - $ vault kv metadata put -mount=secret -max-versions=5 foo - - Set delete-version-after on the key: - - $ vault kv metadata put -mount=secret -delete-version-after=3h25m19s foo - - Require Check-and-Set for this key: - - $ vault kv metadata put -mount=secret -cas-required foo - - Set custom metadata on the key: - - $ vault kv metadata put -mount=secret -custom-metadata=foo=abc -custom-metadata=bar=123 foo - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *KVMetadataPutCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) - - // Common Options - f := set.NewFlagSet("Common Options") - - f.IntVar(&IntVar{ - Name: "max-versions", - Target: &c.flagMaxVersions, - Default: -1, - Usage: `The number of versions to keep. If not set, the backend’s configured max version is used.`, - }) - - f.BoolPtrVar(&BoolPtrVar{ - Name: "cas-required", - Target: &c.flagCASRequired, - Usage: `If true the key will require the cas parameter to be set on all write requests. If false, the backend’s configuration will be used.`, - }) - - f.DurationVar(&DurationVar{ - Name: "delete-version-after", - Target: &c.flagDeleteVersionAfter, - Default: -1, - EnvVar: "", - Completion: complete.PredictAnything, - Usage: `Specifies the length of time before a version is deleted. - If not set, the backend's configured delete-version-after is used. Cannot be - greater than the backend's delete-version-after. The delete-version-after is - specified as a numeric string with a suffix like "30s" or - "3h25m19s".`, - }) - - f.StringMapVar(&StringMapVar{ - Name: "custom-metadata", - Target: &c.flagCustomMetadata, - Default: map[string]string{}, - Usage: "Specifies arbitrary version-agnostic key=value metadata meant to describe a secret." + - "This can be specified multiple times to add multiple pieces of metadata.", - }) - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /metadata/ automatically appended between KV - v2 secrets.`, - }) - - return set -} - -func (c *KVMetadataPutCommand) AutocompleteArgs() complete.Predictor { - return nil -} - -func (c *KVMetadataPutCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVMetadataPutCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) - return 1 - case len(args) > 1: - c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) - return 1 - } - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } - - if !v2 { - c.UI.Error("Metadata not supported on KV Version 1") - return 1 - } - - fullPath := addPrefixToKVPath(partialPath, mountPath, "metadata", false) - data := map[string]interface{}{} - - if c.flagMaxVersions >= 0 { - data["max_versions"] = c.flagMaxVersions - } - - if c.flagDeleteVersionAfter >= 0 { - data["delete_version_after"] = c.flagDeleteVersionAfter.String() - } - - if c.flagCASRequired.IsSet() { - data["cas_required"] = c.flagCASRequired.Get() - } - - if len(c.flagCustomMetadata) > 0 { - data["custom_metadata"] = c.flagCustomMetadata - } - - secret, err := client.Logical().Write(fullPath, data) - if err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", fullPath, err)) - if secret != nil { - OutputSecret(c.UI, secret) - } - return 2 - } - if secret == nil { - // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { - c.UI.Info(fmt.Sprintf("Success! Data written to: %s", fullPath)) - } - return 0 - } - - return OutputSecret(c.UI, secret) -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { setupEngine } from 'ember-engines/test-support'; +import { setupMirage } from 'ember-cli-mirage/test-support'; +import { render, click, fillIn } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import { Response } from 'miragejs'; +import sinon from 'sinon'; +import { generateBreadcrumbs } from 'vault/tests/helpers/ldap'; + +const selectors = { + radioCard: '[data-test-radio-card="OpenLDAP"]', + save: '[data-test-config-save]', + binddn: '[data-test-field="binddn"] input', + bindpass: '[data-test-field="bindpass"] input', +}; + +module('Integration | Component | ldap | Page::Configure', function (hooks) { + setupRenderingTest(hooks); + setupEngine(hooks, 'ldap'); + setupMirage(hooks); + + const fillAndSubmit = async (rotate) => { + await click(selectors.radioCard); + await fillIn(selectors.binddn, 'foo'); + await fillIn(selectors.bindpass, 'bar'); + await click(selectors.save); + await click(`[data-test-save-${rotate}-rotate]`); + }; + + hooks.beforeEach(function () { + this.store = this.owner.lookup('service:store'); + this.newModel = this.store.createRecord('ldap/config', { backend: 'ldap-new' }); + this.existingConfig = { + schema: 'openldap', + binddn: 'cn=vault,ou=Users,dc=hashicorp,dc=com', + bindpass: 'foobar', + }; + this.store.pushPayload('ldap/config', { + modelName: 'ldap/config', + backend: 'ldap-edit', + ...this.existingConfig, + }); + this.editModel = this.store.peekRecord('ldap/config', 'ldap-edit'); + this.breadcrumbs = generateBreadcrumbs('ldap', 'configure'); + this.model = this.newModel; // most of the tests use newModel but set this to editModel when needed + this.renderComponent = () => { + return render(hbs``, { + owner: this.engine, + }); + }; + this.transitionStub = sinon.stub(this.owner.lookup('service:router'), 'transitionTo'); + }); + + test('it should render empty state when schema is not selected', async function (assert) { + await this.renderComponent(); + + assert.dom('[data-test-empty-state-title]').hasText('Choose an option', 'Empty state title renders'); + assert + .dom('[data-test-empty-state-message]') + .hasText('Pick an option above to see available configuration options', 'Empty state title renders'); + assert.dom(selectors.save).isDisabled('Save button is disabled when schema is not selected'); + + await click(selectors.radioCard); + assert + .dom('[data-test-component="empty-state"]') + .doesNotExist('Empty state is hidden when schema is selected'); + }); + + test('it should render validation messages for invalid form', async function (assert) { + await this.renderComponent(); + + await click(selectors.radioCard); + await click(selectors.save); + + assert + .dom('[data-test-field="binddn"] [data-test-inline-error-message]') + .hasText('Administrator distinguished name is required.', 'Validation message renders for binddn'); + assert + .dom('[data-test-field="bindpass"] [data-test-inline-error-message]') + .hasText('Administrator password is required.', 'Validation message renders for bindpass'); + assert + .dom('[data-test-invalid-form-message]') + .hasText('There are 2 errors with this form.', 'Invalid form message renders'); + }); + + test('it should save new configuration without rotating root password', async function (assert) { + assert.expect(2); + + this.server.post('/ldap-new/config', () => { + assert.ok(true, 'POST request made to save config'); + return new Response(204, {}); + }); + + await this.renderComponent(); + await fillAndSubmit('without'); + + assert.ok( + this.transitionStub.calledWith('vault.cluster.secrets.backend.ldap.configuration'), + 'Transitions to configuration route on save success' + ); + }); + + test('it should save new configuration and rotate root password', async function (assert) { + assert.expect(3); + + this.server.post('/ldap-new/config', () => { + assert.ok(true, 'POST request made to save config'); + return new Response(204, {}); + }); + this.server.post('/ldap-new/rotate-root', () => { + assert.ok(true, 'POST request made to rotate root password'); + return new Response(204, {}); + }); + + await this.renderComponent(); + await fillAndSubmit('with'); + + assert.ok( + this.transitionStub.calledWith('vault.cluster.secrets.backend.ldap.configuration'), + 'Transitions to configuration route on save success' + ); + }); + + test('it should populate fields when editing form', async function (assert) { + this.model = this.editModel; + + await this.renderComponent(); + + assert.dom(selectors.radioCard).isChecked('Correct radio card is checked for schema value'); + assert.dom(selectors.binddn).hasValue(this.existingConfig.binddn, 'binddn value renders'); + + await fillIn(selectors.binddn, 'foobar'); + await click('[data-test-config-cancel]'); + + assert.strictEqual(this.model.binddn, this.existingConfig.binddn, 'Model is rolled back on cancel'); + assert.ok( + this.transitionStub.calledWith('vault.cluster.secrets.backend.ldap.configuration'), + 'Transitions to configuration route on save success' + ); + }); +}); diff --git a/command/kv_metadata_put_test.go b/command/kv_metadata_put_test.go index a4068e23c886..393b8eb51e99 100644 --- a/command/kv_metadata_put_test.go +++ b/command/kv_metadata_put_test.go @@ -1,204 +1,109 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "encoding/json" - "strings" - "testing" - - "github.com/go-test/deep" - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" -) - -func testKVMetadataPutCommand(tb testing.TB) (*cli.MockUi, *KVMetadataPutCommand) { - tb.Helper() - - ui := cli.NewMockUi() - return ui, &KVMetadataPutCommand{ - BaseCommand: &BaseCommand{ - UI: ui, - }, - } -} - -func TestKvMetadataPutCommand_DeleteVersionAfter(t *testing.T) { - client, closer := testVaultServer(t) - defer closer() - - basePath := t.Name() + "/" - if err := client.Sys().Mount(basePath, &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatal(err) - } - - ui, cmd := testKVMetadataPutCommand(t) - cmd.client = client - - // Set a limit of 1s first. - code := cmd.Run([]string{"-delete-version-after=1s", basePath + "secret/my-secret"}) - if code != 0 { - t.Fatalf("expected %d but received %d", 0, code) - } - - metaFullPath := basePath + "metadata/secret/my-secret" - combined := ui.OutputWriter.String() + ui.ErrorWriter.String() - success := "Success! Data written to: " + metaFullPath - if !strings.Contains(combined, success) { - t.Fatalf("expected %q but received %q", success, combined) - } - - secret, err := client.Logical().Read(metaFullPath) - if err != nil { - t.Fatal(err) - } - if secret.Data["delete_version_after"] != "1s" { - t.Fatalf("expected 1s but received %q", secret.Data["delete_version_after"]) - } - - // Now verify that we can return it to 0s. - ui, cmd = testKVMetadataPutCommand(t) - cmd.client = client - - // Set a limit of 1s first. - code = cmd.Run([]string{"-delete-version-after=0", basePath + "secret/my-secret"}) - if code != 0 { - t.Errorf("expected %d but received %d", 0, code) - } - - combined = ui.OutputWriter.String() + ui.ErrorWriter.String() - if !strings.Contains(combined, success) { - t.Errorf("expected %q but received %q", success, combined) - } - - secret, err = client.Logical().Read(metaFullPath) - if err != nil { - t.Fatal(err) - } - if secret.Data["delete_version_after"] != "0s" { - t.Fatalf("expected 0s but received %q", secret.Data["delete_version_after"]) - } -} - -func TestKvMetadataPutCommand_CustomMetadata(t *testing.T) { - client, closer := testVaultServer(t) - defer closer() - - basePath := t.Name() + "/" - secretPath := basePath + "secret/my-secret" - - if err := client.Sys().Mount(basePath, &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatalf("kv-v2 mount error: %#v", err) - } - - ui, cmd := testKVMetadataPutCommand(t) - cmd.client = client - - exitStatus := cmd.Run([]string{"-custom-metadata=foo=abc", "-custom-metadata=bar=123", secretPath}) - - if exitStatus != 0 { - t.Fatalf("Expected 0 exit status but received %d", exitStatus) - } - - metaFullPath := basePath + "metadata/secret/my-secret" - commandOutput := ui.OutputWriter.String() + ui.ErrorWriter.String() - expectedOutput := "Success! Data written to: " + metaFullPath - - if !strings.Contains(commandOutput, expectedOutput) { - t.Fatalf("Expected command output %q but received %q", expectedOutput, commandOutput) - } - - metadata, err := client.Logical().Read(metaFullPath) - if err != nil { - t.Fatalf("Metadata read error: %#v", err) - } - - // JSON output from read decoded into map[string]interface{} - expectedCustomMetadata := map[string]interface{}{ - "foo": "abc", - "bar": "123", - } - - if diff := deep.Equal(metadata.Data["custom_metadata"], expectedCustomMetadata); len(diff) > 0 { - t.Fatal(diff) - } - - ui, cmd = testKVMetadataPutCommand(t) - cmd.client = client - - // Overwrite entire custom metadata with a single key - exitStatus = cmd.Run([]string{"-custom-metadata=baz=abc123", secretPath}) - - if exitStatus != 0 { - t.Fatalf("Expected 0 exit status but received %d", exitStatus) - } - - commandOutput = ui.OutputWriter.String() + ui.ErrorWriter.String() - - if !strings.Contains(commandOutput, expectedOutput) { - t.Fatalf("Expected command output %q but received %q", expectedOutput, commandOutput) - } - - metadata, err = client.Logical().Read(metaFullPath) - - if err != nil { - t.Fatalf("Metadata read error: %#v", err) - } - - expectedCustomMetadata = map[string]interface{}{ - "baz": "abc123", - } - - if diff := deep.Equal(metadata.Data["custom_metadata"], expectedCustomMetadata); len(diff) > 0 { - t.Fatal(diff) - } -} - -func TestKvMetadataPutCommand_UnprovidedFlags(t *testing.T) { - client, closer := testVaultServer(t) - defer closer() - - basePath := t.Name() + "/" - secretPath := basePath + "my-secret" - - if err := client.Sys().Mount(basePath, &api.MountInput{ - Type: "kv-v2", - }); err != nil { - t.Fatalf("kv-v2 mount error: %#v", err) - } - - _, cmd := testKVMetadataPutCommand(t) - cmd.client = client - - args := []string{"-cas-required=true", "-max-versions=10", secretPath} - code, _ := kvMetadataPutWithRetry(t, client, args, nil) - - if code != 0 { - t.Fatalf("expected 0 exit status but received %d", code) - } - - args = []string{"-custom-metadata=foo=bar", secretPath} - code, _ = kvMetadataPutWithRetry(t, client, args, nil) - - if code != 0 { - t.Fatalf("expected 0 exit status but received %d", code) - } - - secret, err := client.Logical().Read(basePath + "metadata/" + "my-secret") - if err != nil { - t.Fatal(err) - } - - if secret.Data["cas_required"] != true { - t.Fatalf("expected cas_required to be true but received %#v", secret.Data["cas_required"]) - } - - if secret.Data["max_versions"] != json.Number("10") { - t.Fatalf("expected max_versions to be 10 but received %#v", secret.Data["max_versions"]) - } -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + + + + + +

Configure LDAP

+
+
+ +
+ +
+ + {{#each this.schemaOptions as |option|}} + + + {{option.title}} + {{option.description}} + + {{/each}} + + +
+ + +

Schema Options

+
+ + {{#if @model.schema}} +
+ +
+ {{else}} + + {{/if}} +
+ +
+ +
+ + + {{#if this.invalidFormMessage}} + + {{/if}} +
+
+ +{{#if this.showRotatePrompt}} + + + Rotate your root password? + + +

+ It’s best practice to rotate the administrator (root) password immediately after the initial configuration of the + LDAP engine. The rotation will update the password both in Vault and your directory server. Once rotated, + only Vault knows the new root password. +

+
+

+ Would you like to rotate your new credentials? You can also do this later. +

+
+ + + + + + +
+{{/if}} \ No newline at end of file diff --git a/command/kv_patch.go b/command/kv_patch.go index da96088d112f..139bc100162b 100644 --- a/command/kv_patch.go +++ b/command/kv_patch.go @@ -1,409 +1,159 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "context" - "fmt" - "io" - "os" - "path" - "strings" - - "github.com/hashicorp/vault/api" - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVPatchCommand)(nil) - _ cli.CommandAutocomplete = (*KVPatchCommand)(nil) -) - -type KVPatchCommand struct { - *BaseCommand - - flagCAS int - flagMethod string - flagMount string - testStdin io.Reader // for tests - flagRemoveData []string -} - -func (c *KVPatchCommand) Synopsis() string { - return "Sets or updates data in the KV store without overwriting" -} - -func (c *KVPatchCommand) Help() string { - helpText := ` -Usage: vault kv patch [options] KEY [DATA] - - *NOTE*: This is only supported for KV v2 engine mounts. - - Writes the data to the corresponding path in the key-value store. The data can be of - any type. - - $ vault kv patch -mount=secret foo bar=baz - - The deprecated path-like syntax can also be used, but this should be avoided, - as the fact that it is not actually the full API path to - the secret (secret/data/foo) can cause confusion: - - $ vault kv patch secret/foo bar=baz - - The data can also be consumed from a file on disk by prefixing with the "@" - symbol. For example: - - $ vault kv patch -mount=secret foo @data.json - - Or it can be read from stdin using the "-" symbol: - - $ echo "abcd1234" | vault kv patch -mount=secret foo bar=- - - To perform a Check-And-Set operation, specify the -cas flag with the - appropriate version number corresponding to the key you want to perform - the CAS operation on: - - $ vault kv patch -mount=secret -cas=1 foo bar=baz - - By default, this operation will attempt an HTTP PATCH operation. If your - policy does not allow that, it will fall back to a read/local update/write approach. - If you wish to specify which method this command should use, you may do so - with the -method flag. When -method=patch is specified, only an HTTP PATCH - operation will be tried. If it fails, the entire command will fail. - - $ vault kv patch -mount=secret -method=patch foo bar=baz - - When -method=rw is specified, only a read/local update/write approach will be tried. - This was the default behavior previous to Vault 1.9. - - $ vault kv patch -mount=secret -method=rw foo bar=baz - - To remove data from the corresponding path in the key-value store, kv patch can be used. - - $ vault kv patch -mount=secret -remove-data=bar foo - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *KVPatchCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) - - // Patch specific options - f := set.NewFlagSet("Common Options") - - f.IntVar(&IntVar{ - Name: "cas", - Target: &c.flagCAS, - Default: 0, - Usage: `Specifies to use a Check-And-Set operation. If set to 0 or not - set, the patch will be allowed. If the index is non-zero the patch will - only be allowed if the key’s current version matches the version - specified in the cas parameter.`, - }) - - f.StringVar(&StringVar{ - Name: "method", - Target: &c.flagMethod, - Usage: `Specifies which method of patching to use. If set to "patch", then - an HTTP PATCH request will be issued. If set to "rw", then a read will be - performed, then a local update, followed by a remote update.`, - }) - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV - v2 secrets.`, - }) - - f.StringSliceVar(&StringSliceVar{ - Name: "remove-data", - Target: &c.flagRemoveData, - Default: []string{}, - Usage: "Key to remove from data. To specify multiple values, specify this flag multiple times.", - }) - - return set -} - -func (c *KVPatchCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFiles() -} - -func (c *KVPatchCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVPatchCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - // Pull our fake stdin if needed - stdin := (io.Reader)(os.Stdin) - if c.testStdin != nil { - stdin = c.testStdin - } - - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected >1, got %d)", len(args))) - return 1 - case len(c.flagRemoveData) == 0 && len(args) == 1: - c.UI.Error("Must supply data") - return 1 - } - - var err error - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - newData, err := parseArgsData(stdin, args[1:]) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) - return 1 - } - - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } - - if !v2 { - c.UI.Error("K/V engine mount must be version 2 for patch support") - return 2 - } - - fullPath := addPrefixToKVPath(partialPath, mountPath, "data", false) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - // collecting data to be removed - if newData == nil { - newData = make(map[string]interface{}) - } - - for _, key := range c.flagRemoveData { - // A null in a JSON merge patch payload will remove the associated key - newData[key] = nil - } - - // Check the method and behave accordingly - var secret *api.Secret - var code int - - switch c.flagMethod { - case "rw": - secret, code = c.readThenWrite(client, fullPath, newData) - case "patch": - secret, code = c.mergePatch(client, fullPath, newData, false) - case "": - secret, code = c.mergePatch(client, fullPath, newData, true) - default: - c.UI.Error(fmt.Sprintf("Unsupported method provided to -method flag: %s", c.flagMethod)) - return 2 - } - - if code != 0 { - return code - } - if secret == nil { - // Don't output anything if there's no secret - return 0 - } - - if c.flagField != "" { - return PrintRawField(c.UI, secret, c.flagField) - } - - // If the secret is wrapped, return the wrapped response. - if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { - return OutputSecret(c.UI, secret) - } - - if Format(c.UI) == "table" { - outputPath(c.UI, fullPath, "Secret Path") - metadata := secret.Data - c.UI.Info(getHeaderForMap("Metadata", metadata)) - return OutputData(c.UI, metadata) - } - - return OutputSecret(c.UI, secret) -} - -func (c *KVPatchCommand) readThenWrite(client *api.Client, path string, newData map[string]interface{}) (*api.Secret, int) { - // First, do a read. - // Note that we don't want to see curl output for the read request. - curOutputCurl := client.OutputCurlString() - client.SetOutputCurlString(false) - outputPolicy := client.OutputPolicy() - client.SetOutputPolicy(false) - secret, err := kvReadRequest(client, path, nil) - if err != nil { - c.UI.Error(fmt.Sprintf("Error doing pre-read at %s: %s", path, err)) - return nil, 2 - } - client.SetOutputCurlString(curOutputCurl) - client.SetOutputPolicy(outputPolicy) - - // Make sure a value already exists - if secret == nil || secret.Data == nil { - c.UI.Error(fmt.Sprintf("No value found at %s", path)) - return nil, 2 - } - - // Verify metadata found - rawMeta, ok := secret.Data["metadata"] - if !ok || rawMeta == nil { - c.UI.Error(fmt.Sprintf("No metadata found at %s; patch only works on existing data", path)) - return nil, 2 - } - meta, ok := rawMeta.(map[string]interface{}) - if !ok { - c.UI.Error(fmt.Sprintf("Metadata found at %s is not the expected type (JSON object)", path)) - return nil, 2 - } - if meta == nil { - c.UI.Error(fmt.Sprintf("No metadata found at %s; patch only works on existing data", path)) - return nil, 2 - } - - // Verify old data found - rawData, ok := secret.Data["data"] - if !ok || rawData == nil { - c.UI.Error(fmt.Sprintf("No data found at %s; patch only works on existing data", path)) - return nil, 2 - } - data, ok := rawData.(map[string]interface{}) - if !ok { - c.UI.Error(fmt.Sprintf("Data found at %s is not the expected type (JSON object)", path)) - return nil, 2 - } - if data == nil { - c.UI.Error(fmt.Sprintf("No data found at %s; patch only works on existing data", path)) - return nil, 2 - } - - // Copy new data over - for k, v := range newData { - data[k] = v - } - - secret, err = client.Logical().Write(path, map[string]interface{}{ - "data": data, - "options": map[string]interface{}{ - "cas": meta["version"], - }, - }) - if err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) - return nil, 2 - } - - if secret == nil { - // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { - c.UI.Info(fmt.Sprintf("Success! Data written to: %s", path)) - } - return nil, 0 - } - - if c.flagField != "" { - return nil, PrintRawField(c.UI, secret, c.flagField) - } - - return secret, 0 -} - -func (c *KVPatchCommand) mergePatch(client *api.Client, path string, newData map[string]interface{}, rwFallback bool) (*api.Secret, int) { - data := map[string]interface{}{ - "data": newData, - "options": map[string]interface{}{}, - } - - if c.flagCAS > 0 { - data["options"].(map[string]interface{})["cas"] = c.flagCAS - } - - secret, err := client.Logical().JSONMergePatch(context.Background(), path, data) - if err != nil { - // If it's a 405, that probably means the server is running a pre-1.9 - // Vault version that doesn't support the HTTP PATCH method. - // Fall back to the old way of doing it if the user didn't specify a -method. - // If they did, and it was "patch", then just error. - if re, ok := err.(*api.ResponseError); ok && re.StatusCode == 405 && rwFallback { - return c.readThenWrite(client, path, newData) - } - // If it's a 403, that probably means they don't have the patch capability in their policy. Fall back to - // the old way of doing it if the user didn't specify a -method. If they did, and it was "patch", then just error. - if re, ok := err.(*api.ResponseError); ok && re.StatusCode == 403 && rwFallback { - c.UI.Warn(fmt.Sprintf("Data was written to %s but we recommend that you add the \"patch\" capability to your ACL policy in order to use HTTP PATCH in the future.", path)) - return c.readThenWrite(client, path, newData) - } - - c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) - return nil, 2 - } - - if secret == nil { - // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { - c.UI.Info(fmt.Sprintf("Success! Data written to: %s", path)) - } - return nil, 0 - } - - if c.flagField != "" { - return nil, PrintRawField(c.UI, secret, c.flagField) - } - - return secret, 0 -} +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'ember-qunit'; +import { render, click, find } from '@ember/test-helpers'; +import hbs from 'htmlbars-inline-precompile'; +import sinon from 'sinon'; + +const SELECTORS = { + modalToggle: '[data-test-confirm-action-trigger]', + title: '[data-test-confirm-action-title]', + message: '[data-test-confirm-action-message]', + confirm: '[data-test-confirm-button]', + cancel: '[data-test-confirm-cancel-button]', +}; +module('Integration | Component | confirm-action', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + this.onConfirm = sinon.spy(); + }); + + test('it renders defaults and calls onConfirmAction', async function (assert) { + await render(hbs` + + `); + + assert.dom(SELECTORS.modalToggle).hasText('DELETE', 'renders button text'); + await click(SELECTORS.modalToggle); + // hasClass assertion wasn't working so this is the workaround + assert.strictEqual( + find('#confirm-action-modal').className, + 'hds-modal hds-modal--size-small hds-modal--color-critical has-text-left', + 'renders critical modal color by default' + ); + assert.strictEqual( + find(SELECTORS.confirm).className, + 'hds-button hds-button--size-medium hds-button--color-critical', + 'renders critical confirm button' + ); + assert.dom(SELECTORS.title).hasText('Are you sure?', 'renders default title'); + assert + .dom(SELECTORS.message) + .hasText('You will not be able to recover it later.', 'renders default body text'); + await click(SELECTORS.cancel); + assert.false(this.onConfirm.called, 'does not call the action when Cancel is clicked'); + await click(SELECTORS.modalToggle); + await click(SELECTORS.confirm); + assert.true(this.onConfirm.called, 'calls the action when Confirm is clicked'); + assert.dom(SELECTORS.title).doesNotExist('modal closes after confirm is clicked'); + }); + + test('it renders isInDropdown defaults and calls onConfirmAction', async function (assert) { + await render(hbs` + + `); + + assert.dom(`li ${SELECTORS.modalToggle}`).exists('element renders inside
  • '); + assert.dom(SELECTORS.modalToggle).hasClass('hds-confirm-action-critical', 'button has dropdown styling'); + await click(SELECTORS.modalToggle); + assert.dom(SELECTORS.title).hasText('Are you sure?', 'renders default title'); + assert + .dom(SELECTORS.message) + .hasText('You will not be able to recover it later.', 'renders default body text'); + await click('[data-test-confirm-cancel-button]'); + assert.false(this.onConfirm.called, 'does not call the action when Cancel is clicked'); + await click(SELECTORS.modalToggle); + await click(SELECTORS.confirm); + assert.true(this.onConfirm.called, 'calls the action when Confirm is clicked'); + assert.dom(SELECTORS.title).doesNotExist('modal closes after confirm is clicked'); + }); + + test('it renders loading state', async function (assert) { + await render(hbs` + + `); + + await click(SELECTORS.modalToggle); + + assert.dom(SELECTORS.confirm).isDisabled('disables confirm button when loading'); + assert.dom('[data-test-confirm-button] [data-test-icon="loading"]').exists('it renders loading icon'); + }); + + test('it renders disabledMessage modal', async function (assert) { + this.condition = true; + await render(hbs` + + `); + + await click(SELECTORS.modalToggle); + assert.strictEqual( + find('#confirm-action-modal').className, + 'hds-modal hds-modal--size-small hds-modal--color-neutral has-text-left', + 'renders critical modal color by default' + ); + assert.dom(SELECTORS.title).hasText('Not allowed', 'renders disabled title'); + assert + .dom(SELECTORS.message) + .hasText('This is the reason you cannot do the thing', 'renders disabled message as body text'); + assert.dom(SELECTORS.confirm).doesNotExist('does not render confirm action button'); + assert.dom(SELECTORS.cancel).hasText('Close'); + }); + + test('it renders passed args', async function (assert) { + this.condition = false; + await render(hbs` + + `); + + // hasClass assertion wasn't working so this is the workaround + assert.strictEqual( + find(SELECTORS.modalToggle).className, + 'hds-button hds-button--size-medium hds-button--color-secondary', + 'renders @buttonColor classes' + ); + await click(SELECTORS.modalToggle); + assert.strictEqual( + find('#confirm-action-modal').className, + 'hds-modal hds-modal--size-small hds-modal--color-warning has-text-left', + 'renders warning modal' + ); + assert.strictEqual( + find(SELECTORS.confirm).className, + 'hds-button hds-button--size-medium hds-button--color-primary', + 'renders primary confirm button' + ); + assert.dom(SELECTORS.title).hasText('Do this?', 'renders passed title'); + assert.dom(SELECTORS.message).hasText('Are you really, really sure?', 'renders passed body text'); + assert.dom(SELECTORS.confirm).hasText('Confirm'); + }); +}); diff --git a/command/kv_put.go b/command/kv_put.go index d450b4415ccc..2c22eedd8ea8 100644 --- a/command/kv_put.go +++ b/command/kv_put.go @@ -1,235 +1,68 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "fmt" - "io" - "os" - "path" - "strings" - - "github.com/mitchellh/cli" - "github.com/posener/complete" -) - -var ( - _ cli.Command = (*KVPutCommand)(nil) - _ cli.CommandAutocomplete = (*KVPutCommand)(nil) -) - -type KVPutCommand struct { - *BaseCommand - - flagCAS int - flagMount string - testStdin io.Reader // for tests -} - -func (c *KVPutCommand) Synopsis() string { - return "Sets or updates data in the KV store" -} - -func (c *KVPutCommand) Help() string { - helpText := ` -Usage: vault kv put [options] KEY [DATA] - - Writes the data to the given path in the key-value store. The data can be of - any type. - - $ vault kv put -mount=secret foo bar=baz - - The deprecated path-like syntax can also be used, but this should be avoided - for KV v2, as the fact that it is not actually the full API path to - the secret (secret/data/foo) can cause confusion: - - $ vault kv put secret/foo bar=baz - - The data can also be consumed from a file on disk by prefixing with the "@" - symbol. For example: - - $ vault kv put -mount=secret foo @data.json - - Or it can be read from stdin using the "-" symbol: - - $ echo "abcd1234" | vault kv put -mount=secret foo bar=- - - To perform a Check-And-Set operation, specify the -cas flag with the - appropriate version number corresponding to the key you want to perform - the CAS operation on: - - $ vault kv put -mount=secret -cas=1 foo bar=baz - - Additional flags and more advanced use cases are detailed below. - -` + c.Flags().Help() - return strings.TrimSpace(helpText) -} - -func (c *KVPutCommand) Flags() *FlagSets { - set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) - - // Common Options - f := set.NewFlagSet("Common Options") - - f.IntVar(&IntVar{ - Name: "cas", - Target: &c.flagCAS, - Default: -1, - Usage: `Specifies to use a Check-And-Set operation. If not set the write - will be allowed. If set to 0 a write will only be allowed if the key - doesn’t exist. If the index is non-zero the write will only be allowed - if the key’s current version matches the version specified in the cas - parameter.`, - }) - - f.StringVar(&StringVar{ - Name: "mount", - Target: &c.flagMount, - Default: "", // no default, because the handling of the next arg is determined by whether this flag has a value - Usage: `Specifies the path where the KV backend is mounted. If specified, - the next argument will be interpreted as the secret path. If this flag is - not specified, the next argument will be interpreted as the combined mount - path and secret path, with /data/ automatically appended between KV - v2 secrets.`, - }) - - return set -} - -func (c *KVPutCommand) AutocompleteArgs() complete.Predictor { - return c.PredictVaultFolders() -} - -func (c *KVPutCommand) AutocompleteFlags() complete.Flags { - return c.Flags().Completions() -} - -func (c *KVPutCommand) Run(args []string) int { - f := c.Flags() - - if err := f.Parse(args); err != nil { - c.UI.Error(err.Error()) - return 1 - } - - args = f.Args() - // Pull our fake stdin if needed - stdin := (io.Reader)(os.Stdin) - if c.testStdin != nil { - stdin = c.testStdin - } - - switch { - case len(args) < 1: - c.UI.Error(fmt.Sprintf("Not enough arguments (expected >1, got %d)", len(args))) - return 1 - case len(args) == 1: - c.UI.Error("Must supply data") - return 1 - } - - var err error - - client, err := c.Client() - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - data, err := parseArgsData(stdin, args[1:]) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) - return 1 - } - - // If true, we're working with "-mount=secret foo" syntax. - // If false, we're using "secret/foo" syntax. - mountFlagSyntax := c.flagMount != "" - - var ( - mountPath string - partialPath string - v2 bool - ) - - // Parse the paths and grab the KV version - if mountFlagSyntax { - // In this case, this arg is the secret path (e.g. "foo"). - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(sanitizePath(c.flagMount), client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - - if v2 { - partialPath = path.Join(mountPath, partialPath) - } - } else { - // In this case, this arg is a path-like combination of mountPath/secretPath. - // (e.g. "secret/foo") - partialPath = sanitizePath(args[0]) - mountPath, v2, err = isKVv2(partialPath, client) - if err != nil { - c.UI.Error(err.Error()) - return 2 - } - } - - // Add /data to v2 paths only - var fullPath string - if v2 { - fullPath = addPrefixToKVPath(partialPath, mountPath, "data", false) - data = map[string]interface{}{ - "data": data, - "options": map[string]interface{}{}, - } - - if c.flagCAS > -1 { - data["options"].(map[string]interface{})["cas"] = c.flagCAS - } - } else { - // v1 - if mountFlagSyntax { - fullPath = path.Join(mountPath, partialPath) - } else { - fullPath = partialPath - } - } - - secret, err := client.Logical().Write(fullPath, data) - if err != nil { - c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", fullPath, err)) - if secret != nil { - OutputSecret(c.UI, secret) - } - return 2 - } - if secret == nil { - // Don't output anything unless using the "table" format - if Format(c.UI) == "table" { - c.UI.Info(fmt.Sprintf("Success! Data written to: %s", fullPath)) - } - return 0 - } - - if c.flagField != "" { - return PrintRawField(c.UI, secret, c.flagField) - } - - // If the secret is wrapped, return the wrapped response. - if secret.WrapInfo != nil && secret.WrapInfo.TTL != 0 { - return OutputSecret(c.UI, secret) - } - - if Format(c.UI) == "table" { - outputPath(c.UI, fullPath, "Secret Path") - metadata := secret.Data - c.UI.Info(getHeaderForMap("Metadata", metadata)) - return OutputData(c.UI, metadata) - } - - return OutputSecret(c.UI, secret) -} +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +{{#if @isInDropdown}} + {{! Hds component renders
  • and - -
    - - Cancel - -
    - - \ No newline at end of file +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SecretsMoveCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsMoveCommand)(nil) +) + +const ( + MountMigrationStatusSuccess = "success" + MountMigrationStatusFailure = "failure" +) + +type SecretsMoveCommand struct { + *BaseCommand +} + +func (c *SecretsMoveCommand) Synopsis() string { + return "Move a secrets engine to a new path" +} + +func (c *SecretsMoveCommand) Help() string { + helpText := ` +Usage: vault secrets move [options] SOURCE DESTINATION + + Moves an existing secrets engine to a new path. Any leases from the old + secrets engine are revoked, but all configuration associated with the engine + is preserved. It initiates the migration and intermittently polls its status, + exiting if a final state is reached. + + This command works within or across namespaces, both source and destination paths + can be prefixed with a namespace heirarchy relative to the current namespace. + + WARNING! Moving a secrets engine will revoke any leases from the + old engine. + + Move the secrets engine at secret/ to generic/: + + $ vault secrets move secret/ generic/ + + Move the secrets engine at ns1/secret/ across namespaces to ns2/generic/, + where ns1 and ns2 are child namespaces of the current namespace: + + $ vault secrets move ns1/secret/ ns2/generic/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SecretsMoveCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP) +} + +func (c *SecretsMoveCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultMounts() +} + +func (c *SecretsMoveCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *SecretsMoveCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 2: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 2, got %d)", len(args))) + return 1 + case len(args) > 2: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 2, got %d)", len(args))) + return 1 + } + + // Grab the source and destination + source := ensureTrailingSlash(args[0]) + destination := ensureTrailingSlash(args[1]) + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + remountResp, err := client.Sys().StartRemount(source, destination) + if err != nil { + c.UI.Error(fmt.Sprintf("Error moving secrets engine %s to %s: %s", source, destination, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Started moving secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + + // Poll the status endpoint with the returned migration ID + // Exit if a terminal status is reached, else wait and retry + for { + remountStatusResp, err := client.Sys().RemountStatus(remountResp.MigrationID) + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking migration status of secrets engine %s to %s: %s", source, destination, err)) + return 2 + } + if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusSuccess { + c.UI.Output(fmt.Sprintf("Success! Finished moving secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + return 0 + } + if remountStatusResp.MigrationInfo.MigrationStatus == MountMigrationStatusFailure { + c.UI.Error(fmt.Sprintf("Failure! Error encountered moving secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + return 0 + } + c.UI.Output(fmt.Sprintf("Waiting for terminal status in migration of secrets engine %s to %s, with migration ID %s", source, destination, remountResp.MigrationID)) + time.Sleep(10 * time.Second) + } + + return 0 +} diff --git a/ui/lib/replication/addon/templates/mode/secondaries/config-edit.hbs b/ui/lib/replication/addon/templates/mode/secondaries/config-edit.hbs index 77a4b57f33d4..ed7a5a5c629c 100644 --- a/ui/lib/replication/addon/templates/mode/secondaries/config-edit.hbs +++ b/ui/lib/replication/addon/templates/mode/secondaries/config-edit.hbs @@ -1,29 +1,142 @@ -{{! - Copyright (c) HashiCorp, Inc. - SPDX-License-Identifier: BUSL-1.1 -~}} - - -
    -

    - Edit path filter config for - {{this.model.config.id}} -

    -
    -
    - -
    -
    -
    - -
    -
    - - Cancel - -
    -
    -
    - \ No newline at end of file +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testSecretsMoveCommand(tb testing.TB) (*cli.MockUi, *SecretsMoveCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SecretsMoveCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSecretsMoveCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar", "baz"}, + "Too many arguments", + 1, + }, + { + "non_existent", + []string{"not_real", "over_here"}, + "Error moving secrets engine not_real/ to over_here/", + 2, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsMoveCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsMoveCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/", "generic/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Finished moving secrets engine secret/ to generic/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + if _, ok := mounts["generic/"]; !ok { + t.Errorf("expected mount at generic/: %#v", mounts) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testSecretsMoveCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "secret/", "generic/", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error moving secrets engine secret/ to generic/:" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testSecretsMoveCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/ui/lib/replication/addon/templates/mode/secondaries/revoke.hbs b/ui/lib/replication/addon/templates/mode/secondaries/revoke.hbs index fa52a6becde6..7dcf3feb9293 100644 --- a/ui/lib/replication/addon/templates/mode/secondaries/revoke.hbs +++ b/ui/lib/replication/addon/templates/mode/secondaries/revoke.hbs @@ -1,40 +1,267 @@ -{{! - Copyright (c) HashiCorp, Inc. - SPDX-License-Identifier: BUSL-1.1 -~}} - -
    -

    - Revoke a secondary token -

    -
    - -
    - -
    - -
    -

    - The secondary id to revoke; given initially to generate a secondary token. -

    -
    -
    -
    - -
    -
    - {{#unless this.isRevoking}} - - Cancel - - {{/unless}} -
    -
    \ No newline at end of file +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "flag" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SecretsTuneCommand)(nil) + _ cli.CommandAutocomplete = (*SecretsTuneCommand)(nil) +) + +type SecretsTuneCommand struct { + *BaseCommand + + flagAuditNonHMACRequestKeys []string + flagAuditNonHMACResponseKeys []string + flagDefaultLeaseTTL time.Duration + flagDescription string + flagListingVisibility string + flagMaxLeaseTTL time.Duration + flagPassthroughRequestHeaders []string + flagAllowedResponseHeaders []string + flagOptions map[string]string + flagVersion int + flagPluginVersion string + flagAllowedManagedKeys []string + flagDelegatedAuthAccessors []string +} + +func (c *SecretsTuneCommand) Synopsis() string { + return "Tune a secrets engine configuration" +} + +func (c *SecretsTuneCommand) Help() string { + helpText := ` +Usage: vault secrets tune [options] PATH + + Tunes the configuration options for the secrets engine at the given PATH. + The argument corresponds to the PATH where the secrets engine is enabled, + not the TYPE! + + Tune the default lease for the PKI secrets engine: + + $ vault secrets tune -default-lease-ttl=72h pki/ + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SecretsTuneCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACRequestKeys, + Target: &c.flagAuditNonHMACRequestKeys, + Usage: "Key that will not be HMAC'd by audit devices in the request data " + + "object. To specify multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAuditNonHMACResponseKeys, + Target: &c.flagAuditNonHMACResponseKeys, + Usage: "Key that will not be HMAC'd by audit devices in the response data " + + "object. To specify multiple values, specify this flag multiple times.", + }) + + f.DurationVar(&DurationVar{ + Name: "default-lease-ttl", + Target: &c.flagDefaultLeaseTTL, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The default lease TTL for this secrets engine. If unspecified, " + + "this defaults to the Vault server's globally configured default lease " + + "TTL, or a previously configured value for the secrets engine.", + }) + + f.StringVar(&StringVar{ + Name: flagNameDescription, + Target: &c.flagDescription, + Usage: "Human-friendly description of this secret engine. This overrides the " + + "current stored value, if any.", + }) + + f.StringVar(&StringVar{ + Name: flagNameListingVisibility, + Target: &c.flagListingVisibility, + Usage: "Determines the visibility of the mount in the UI-specific listing " + + "endpoint.", + }) + + f.DurationVar(&DurationVar{ + Name: "max-lease-ttl", + Target: &c.flagMaxLeaseTTL, + Default: 0, + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "The maximum lease TTL for this secrets engine. If unspecified, " + + "this defaults to the Vault server's globally configured maximum lease " + + "TTL, or a previously configured value for the secrets engine.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNamePassthroughRequestHeaders, + Target: &c.flagPassthroughRequestHeaders, + Usage: "Request header value that will be sent to the plugin. To specify " + + "multiple values, specify this flag multiple times.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedResponseHeaders, + Target: &c.flagAllowedResponseHeaders, + Usage: "Response header value that plugins will be allowed to set. To " + + "specify multiple values, specify this flag multiple times.", + }) + + f.StringMapVar(&StringMapVar{ + Name: "options", + Target: &c.flagOptions, + Completion: complete.PredictAnything, + Usage: "Key-value pair provided as key=value for the mount options. " + + "This can be specified multiple times.", + }) + + f.IntVar(&IntVar{ + Name: "version", + Target: &c.flagVersion, + Default: 0, + Usage: "Select the version of the engine to run. Not supported by all engines.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameAllowedManagedKeys, + Target: &c.flagAllowedManagedKeys, + Usage: "Managed key name(s) that the mount in question is allowed to access. " + + "Note that multiple keys may be specified by providing this option multiple times, " + + "each time with 1 key.", + }) + + f.StringVar(&StringVar{ + Name: flagNamePluginVersion, + Target: &c.flagPluginVersion, + Default: "", + Usage: "Select the semantic version of the plugin to run. The new version must be registered in " + + "the plugin catalog, and will not start running until the plugin is reloaded.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: flagNameDelegatedAuthAccessors, + Target: &c.flagDelegatedAuthAccessors, + Usage: "A list of permitted authentication accessors this backend can delegate authentication to. " + + "Note that multiple values may be specified by providing this option multiple times, " + + "each time with 1 accessor.", + }) + + return set +} + +func (c *SecretsTuneCommand) AutocompleteArgs() complete.Predictor { + return c.PredictVaultMounts() +} + +func (c *SecretsTuneCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *SecretsTuneCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + switch { + case len(args) < 1: + c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1, got %d)", len(args))) + return 1 + case len(args) > 1: + c.UI.Error(fmt.Sprintf("Too many arguments (expected 1, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 2 + } + + if c.flagVersion > 0 { + if c.flagOptions == nil { + c.flagOptions = make(map[string]string) + } + c.flagOptions["version"] = strconv.Itoa(c.flagVersion) + } + + // Append a trailing slash to indicate it's a path in output + mountPath := ensureTrailingSlash(sanitizePath(args[0])) + + mountConfigInput := api.MountConfigInput{ + DefaultLeaseTTL: ttlToAPI(c.flagDefaultLeaseTTL), + MaxLeaseTTL: ttlToAPI(c.flagMaxLeaseTTL), + Options: c.flagOptions, + } + + // Set these values only if they are provided in the CLI + f.Visit(func(fl *flag.Flag) { + if fl.Name == flagNameAuditNonHMACRequestKeys { + mountConfigInput.AuditNonHMACRequestKeys = c.flagAuditNonHMACRequestKeys + } + + if fl.Name == flagNameAuditNonHMACResponseKeys { + mountConfigInput.AuditNonHMACResponseKeys = c.flagAuditNonHMACResponseKeys + } + + if fl.Name == flagNameDescription { + mountConfigInput.Description = &c.flagDescription + } + + if fl.Name == flagNameListingVisibility { + mountConfigInput.ListingVisibility = c.flagListingVisibility + } + + if fl.Name == flagNamePassthroughRequestHeaders { + mountConfigInput.PassthroughRequestHeaders = c.flagPassthroughRequestHeaders + } + + if fl.Name == flagNameAllowedResponseHeaders { + mountConfigInput.AllowedResponseHeaders = c.flagAllowedResponseHeaders + } + + if fl.Name == flagNameAllowedManagedKeys { + mountConfigInput.AllowedManagedKeys = c.flagAllowedManagedKeys + } + + if fl.Name == flagNamePluginVersion { + mountConfigInput.PluginVersion = c.flagPluginVersion + } + + if fl.Name == flagNameDelegatedAuthAccessors { + mountConfigInput.DelegatedAuthAccessors = c.flagDelegatedAuthAccessors + } + }) + + if err := client.Sys().TuneMount(mountPath, mountConfigInput); err != nil { + c.UI.Error(fmt.Sprintf("Error tuning secrets engine %s: %s", mountPath, err)) + return 2 + } + + c.UI.Output(fmt.Sprintf("Success! Tuned the secrets engine at: %s", mountPath)) + return 0 +} diff --git a/ui/mirage/handlers/base.js b/ui/mirage/handlers/base.js index 2693f4a7592b..5c1670db92d7 100644 --- a/ui/mirage/handlers/base.js +++ b/ui/mirage/handlers/base.js @@ -1,87 +1,370 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -// base handlers used in mirage config when a specific handler is not specified -const EXPIRY_DATE = '2021-05-12T23:20:50.52Z'; - -export default function (server) { - server.get('/sys/internal/ui/feature-flags', (db) => { - const featuresResponse = db.features.first(); - return { - data: { - feature_flags: featuresResponse ? featuresResponse.feature_flags : null, - }, - }; - }); - - server.get('/sys/health', function () { - return { - initialized: true, - sealed: false, - standby: false, - license: { - expiry: '2021-05-12T23:20:50.52Z', - state: 'stored', - }, - performance_standby: false, - replication_performance_mode: 'disabled', - replication_dr_mode: 'disabled', - server_time_utc: 1622562585, - version: '1.9.0+ent', - cluster_name: 'vault-cluster-e779cd7c', - cluster_id: '5f20f5ab-acea-0481-787e-71ec2ff5a60b', - last_wal: 121, - }; - }); - - server.get('/sys/license/status', function () { - return { - data: { - autoloading_used: false, - persisted_autoload: { - expiration_time: EXPIRY_DATE, - features: ['DR Replication', 'Namespaces', 'Lease Count Quotas', 'Automated Snapshots'], - license_id: '0eca7ef8-ebc0-f875-315e-3cc94a7870cf', - performance_standby_count: 0, - start_time: '2020-04-28T00:00:00Z', - }, - autoloaded: { - expiration_time: EXPIRY_DATE, - features: ['DR Replication', 'Namespaces', 'Lease Count Quotas', 'Automated Snapshots'], - license_id: '0eca7ef8-ebc0-f875-315e-3cc94a7870cf', - performance_standby_count: 0, - start_time: '2020-04-28T00:00:00Z', - }, - }, - }; - }); - - server.get('sys/namespaces', function () { - return { - data: { - keys: [ - 'ns1/', - 'ns2/', - 'ns3/', - 'ns4/', - 'ns5/', - 'ns6/', - 'ns7/', - 'ns8/', - 'ns9/', - 'ns10/', - 'ns11/', - 'ns12/', - 'ns13/', - 'ns14/', - 'ns15/', - 'ns16/', - 'ns17/', - 'ns18/', - ], - }, - }; - }); +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" +) + +func testSecretsTuneCommand(tb testing.TB) (*cli.MockUi, *SecretsTuneCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SecretsTuneCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSecretsTuneCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + out string + code int + }{ + { + "not_enough_args", + []string{}, + "Not enough arguments", + 1, + }, + { + "too_many_args", + []string{"foo", "bar"}, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("protect_downgrade", func(t *testing.T) { + t.Parallel() + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().Mount("kv", &api.MountInput{ + Type: "kv", + Options: map[string]string{ + "version": "2", + }, + }); err != nil { + t.Fatal(err) + } + + // confirm default max_versions + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := mounts["kv/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "kv"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := "2"; mountInfo.Options["version"] != exp { + t.Errorf("expected %q to be %q", mountInfo.Options["version"], exp) + } + + if exp := ""; mountInfo.Options["max_versions"] != exp { + t.Errorf("expected %s to be empty", mountInfo.Options["max_versions"]) + } + + // omitting the version should not cause a downgrade + code := cmd.Run([]string{ + "-options", "max_versions=2", + "kv/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the secrets engine at: kv/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err = client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok = mounts["kv/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "2"; mountInfo.Options["version"] != exp { + t.Errorf("expected %q to be %q", mountInfo.Options["version"], exp) + } + if exp := "kv"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := "2"; mountInfo.Options["max_versions"] != exp { + t.Errorf("expected %s to be %s", mountInfo.Options["max_versions"], exp) + } + }) + + t.Run("integration", func(t *testing.T) { + t.Run("flags_all", func(t *testing.T) { + t.Parallel() + pluginDir, cleanup := corehelpers.MakeTestPluginDir(t) + defer cleanup(t) + + client, _, closer := testVaultServerPluginDir(t, pluginDir) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().Mount("mount_tune_integration", &api.MountInput{ + Type: "pki", + }); err != nil { + t.Fatal(err) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + mountInfo, ok := mounts["mount_tune_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + + if exp := ""; mountInfo.PluginVersion != exp { + t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) + } + + _, _, version := testPluginCreateAndRegisterVersioned(t, client, pluginDir, "pki", api.PluginTypeSecrets) + + code := cmd.Run([]string{ + "-description", "new description", + "-default-lease-ttl", "30m", + "-max-lease-ttl", "1h", + "-audit-non-hmac-request-keys", "foo,bar", + "-audit-non-hmac-response-keys", "foo,bar", + "-passthrough-request-headers", "authorization", + "-passthrough-request-headers", "www-authentication", + "-allowed-response-headers", "authorization,www-authentication", + "-allowed-managed-keys", "key1,key2", + "-listing-visibility", "unauth", + "-plugin-version", version, + "mount_tune_integration/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the secrets engine at: mount_tune_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err = client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok = mounts["mount_tune_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "new description"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + if exp := "pki"; mountInfo.Type != exp { + t.Errorf("expected %q to be %q", mountInfo.Type, exp) + } + if exp := version; mountInfo.PluginVersion != exp { + t.Errorf("expected %q to be %q", mountInfo.PluginVersion, exp) + } + if exp := 1800; mountInfo.Config.DefaultLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.DefaultLeaseTTL, exp) + } + if exp := 3600; mountInfo.Config.MaxLeaseTTL != exp { + t.Errorf("expected %d to be %d", mountInfo.Config.MaxLeaseTTL, exp) + } + if diff := deep.Equal([]string{"authorization", "www-authentication"}, mountInfo.Config.PassthroughRequestHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values for PassthroughRequestHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"authorization,www-authentication"}, mountInfo.Config.AllowedResponseHeaders); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedResponseHeaders. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACRequestKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACRequestKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"foo,bar"}, mountInfo.Config.AuditNonHMACResponseKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AuditNonHMACResponseKeys. Difference is: %v", diff) + } + if diff := deep.Equal([]string{"key1,key2"}, mountInfo.Config.AllowedManagedKeys); len(diff) > 0 { + t.Errorf("Failed to find expected values in AllowedManagedKeys. Difference is: %v", diff) + } + }) + + t.Run("flags_description", func(t *testing.T) { + t.Parallel() + t.Run("not_provided", func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().Mount("mount_tune_integration", &api.MountInput{ + Type: "pki", + Description: "initial description", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-default-lease-ttl", "30m", + "mount_tune_integration/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the secrets engine at: mount_tune_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := mounts["mount_tune_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := "initial description"; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + }) + + t.Run("provided_empty", func(t *testing.T) { + client, closer := testVaultServer(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + // Mount + if err := client.Sys().Mount("mount_tune_integration", &api.MountInput{ + Type: "pki", + Description: "initial description", + }); err != nil { + t.Fatal(err) + } + + code := cmd.Run([]string{ + "-description", "", + "mount_tune_integration/", + }) + if exp := 0; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Success! Tuned the secrets engine at: mount_tune_integration/" + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + + mounts, err := client.Sys().ListMounts() + if err != nil { + t.Fatal(err) + } + + mountInfo, ok := mounts["mount_tune_integration/"] + if !ok { + t.Fatalf("expected mount to exist") + } + if exp := ""; mountInfo.Description != exp { + t.Errorf("expected %q to be %q", mountInfo.Description, exp) + } + }) + }) + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testSecretsTuneCommand(t) + cmd.client = client + + code := cmd.Run([]string{ + "pki/", + }) + if exp := 2; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error tuning secrets engine pki/: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testSecretsTuneCommand(t) + assertNoTabs(t, cmd) + }) } diff --git a/ui/mirage/handlers/reduced-disclosure.js b/ui/mirage/handlers/reduced-disclosure.js index c0be1b072bfb..0b760bd86646 100644 --- a/ui/mirage/handlers/reduced-disclosure.js +++ b/ui/mirage/handlers/reduced-disclosure.js @@ -1,18 +1,3410 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import modifyPassthroughResponse from '../helpers/modify-passthrough-response'; - -export default function (server) { - server.get('/sys/health', (schema, req) => - modifyPassthroughResponse(req, { version: '', cluster_name: '' }) - ); - server.get('/sys/seal-status', (schema, req) => - modifyPassthroughResponse(req, { version: '', cluster_name: '', build_date: '' }) - ); - server.get('sys/replication/status', () => new Response(404)); - server.get('sys/replication/dr/status', () => new Response(404)); - server.get('sys/replication/performance/status', () => new Response(404)); +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "runtime/pprof" + "sort" + "strconv" + "strings" + "sync" + "time" + + systemd "github.com/coreos/go-systemd/daemon" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/cli" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-kms-wrapping/entropy/v2" + wrapping "github.com/hashicorp/go-kms-wrapping/v2" + aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-secure-stdlib/gatedwriter" + "github.com/hashicorp/go-secure-stdlib/mlock" + "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/go-secure-stdlib/reloadutil" + "github.com/hashicorp/vault/audit" + config2 "github.com/hashicorp/vault/command/config" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/builtinplugins" + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/experiments" + loghelper "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/metricsutil" + "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + "github.com/hashicorp/vault/helper/useragent" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/internalshared/listenerutil" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/sdk/physical" + sr "github.com/hashicorp/vault/serviceregistration" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/hcp_link" + vaultseal "github.com/hashicorp/vault/vault/seal" + "github.com/hashicorp/vault/version" + "github.com/mitchellh/go-testing-interface" + "github.com/posener/complete" + "github.com/sasha-s/go-deadlock" + "go.uber.org/atomic" + "golang.org/x/net/http/httpproxy" + "google.golang.org/grpc/grpclog" +) + +var ( + _ cli.Command = (*ServerCommand)(nil) + _ cli.CommandAutocomplete = (*ServerCommand)(nil) +) + +var memProfilerEnabled = false + +const ( + storageMigrationLock = "core/migration" + + // Even though there are more types than the ones below, the following consts + // are declared internally for value comparison and reusability. + storageTypeRaft = "raft" + storageTypeConsul = "consul" +) + +type ServerCommand struct { + *BaseCommand + logFlags logFlags + + AuditBackends map[string]audit.Factory + CredentialBackends map[string]logical.Factory + LogicalBackends map[string]logical.Factory + PhysicalBackends map[string]physical.Factory + + ServiceRegistrations map[string]sr.Factory + + ShutdownCh chan struct{} + SighupCh chan struct{} + SigUSR2Ch chan struct{} + + WaitGroup *sync.WaitGroup + + logWriter io.Writer + logGate *gatedwriter.Writer + logger hclog.InterceptLogger + + cleanupGuard sync.Once + + reloadFuncsLock *sync.RWMutex + reloadFuncs *map[string][]reloadutil.ReloadFunc + startedCh chan (struct{}) // for tests + reloadedCh chan (struct{}) // for tests + licenseReloadedCh chan (error) // for tests + + allLoggers []hclog.Logger + + flagConfigs []string + flagRecovery bool + flagExperiments []string + flagDev bool + flagDevTLS bool + flagDevTLSCertDir string + flagDevTLSSANs []string + flagDevRootTokenID string + flagDevListenAddr string + flagDevNoStoreToken bool + flagDevPluginDir string + flagDevPluginInit bool + flagDevHA bool + flagDevLatency int + flagDevLatencyJitter int + flagDevLeasedKV bool + flagDevKVV1 bool + flagDevSkipInit bool + flagDevThreeNode bool + flagDevFourCluster bool + flagDevTransactional bool + flagDevAutoSeal bool + flagDevClusterJson string + flagTestVerifyOnly bool + flagTestServerConfig bool + flagDevConsul bool + flagExitOnCoreShutdown bool +} + +func (c *ServerCommand) Synopsis() string { + return "Start a Vault server" +} + +func (c *ServerCommand) Help() string { + helpText := ` +Usage: vault server [options] + + This command starts a Vault server that responds to API requests. By default, + Vault will start in a "sealed" state. The Vault cluster must be initialized + before use, usually by the "vault operator init" command. Each Vault server must + also be unsealed using the "vault operator unseal" command or the API before the + server can respond to requests. + + Start a server with a configuration file: + + $ vault server -config=/etc/vault/config.hcl + + Run in "dev" mode: + + $ vault server -dev -dev-root-token-id="root" + + For a full list of examples, please see the documentation. + +` + c.Flags().Help() + return strings.TrimSpace(helpText) +} + +func (c *ServerCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP) + + f := set.NewFlagSet("Command Options") + + // Augment with the log flags + f.addLogFlags(&c.logFlags) + + f.StringSliceVar(&StringSliceVar{ + Name: "config", + Target: &c.flagConfigs, + Completion: complete.PredictOr( + complete.PredictFiles("*.hcl"), + complete.PredictFiles("*.json"), + complete.PredictDirs("*"), + ), + Usage: "Path to a configuration file or directory of configuration " + + "files. This flag can be specified multiple times to load multiple " + + "configurations. If the path is a directory, all files which end in " + + ".hcl or .json are loaded.", + }) + + f.BoolVar(&BoolVar{ + Name: "exit-on-core-shutdown", + Target: &c.flagExitOnCoreShutdown, + Default: false, + Usage: "Exit the vault server if the vault core is shutdown.", + }) + + f.BoolVar(&BoolVar{ + Name: "recovery", + Target: &c.flagRecovery, + Usage: "Enable recovery mode. In this mode, Vault is used to perform recovery actions. " + + "Using a recovery token, \"sys/raw\" API can be used to manipulate the storage.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "experiment", + Target: &c.flagExperiments, + Completion: complete.PredictSet(experiments.ValidExperiments()...), + Usage: "Name of an experiment to enable. Experiments should NOT be used in production, and " + + "the associated APIs may have backwards incompatible changes between releases. This " + + "flag can be specified multiple times to specify multiple experiments. This can also be " + + fmt.Sprintf("specified via the %s environment variable as a comma-separated list. ", EnvVaultExperiments) + + "Valid experiments are: " + strings.Join(experiments.ValidExperiments(), ", "), + }) + + f = set.NewFlagSet("Dev Options") + + f.BoolVar(&BoolVar{ + Name: "dev", + Target: &c.flagDev, + Usage: "Enable development mode. In this mode, Vault runs in-memory and " + + "starts unsealed. As the name implies, do not run \"dev\" mode in " + + "production.", + }) + + f.BoolVar(&BoolVar{ + Name: "dev-tls", + Target: &c.flagDevTLS, + Usage: "Enable TLS development mode. In this mode, Vault runs in-memory and " + + "starts unsealed, with a generated TLS CA, certificate and key. " + + "As the name implies, do not run \"dev-tls\" mode in " + + "production.", + }) + + f.StringVar(&StringVar{ + Name: "dev-tls-cert-dir", + Target: &c.flagDevTLSCertDir, + Default: "", + Usage: "Directory where generated TLS files are created if `-dev-tls` is " + + "specified. If left unset, files are generated in a temporary directory.", + }) + + f.StringSliceVar(&StringSliceVar{ + Name: "dev-tls-san", + Target: &c.flagDevTLSSANs, + Default: nil, + Usage: "Additional Subject Alternative Name (as a DNS name or IP address) " + + "to generate the certificate with if `-dev-tls` is specified. The " + + "certificate will always use localhost, localhost4, localhost6, " + + "localhost.localdomain, and the host name as alternate DNS names, " + + "and 127.0.0.1 as an alternate IP address. This flag can be specified " + + "multiple times to specify multiple SANs.", + }) + + f.StringVar(&StringVar{ + Name: "dev-root-token-id", + Target: &c.flagDevRootTokenID, + Default: "", + EnvVar: "VAULT_DEV_ROOT_TOKEN_ID", + Usage: "Initial root token. This only applies when running in \"dev\" " + + "mode.", + }) + + f.StringVar(&StringVar{ + Name: "dev-listen-address", + Target: &c.flagDevListenAddr, + Default: "127.0.0.1:8200", + EnvVar: "VAULT_DEV_LISTEN_ADDRESS", + Usage: "Address to bind to in \"dev\" mode.", + }) + f.BoolVar(&BoolVar{ + Name: "dev-no-store-token", + Target: &c.flagDevNoStoreToken, + Default: false, + Usage: "Do not persist the dev root token to the token helper " + + "(usually the local filesystem) for use in future requests. " + + "The token will only be displayed in the command output.", + }) + + // Internal-only flags to follow. + // + // Why hello there little source code reader! Welcome to the Vault source + // code. The remaining options are intentionally undocumented and come with + // no warranty or backwards-compatibility promise. Do not use these flags + // in production. Do not build automation using these flags. Unless you are + // developing against Vault, you should not need any of these flags. + + f.StringVar(&StringVar{ + Name: "dev-plugin-dir", + Target: &c.flagDevPluginDir, + Default: "", + Completion: complete.PredictDirs("*"), + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-plugin-init", + Target: &c.flagDevPluginInit, + Default: true, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-ha", + Target: &c.flagDevHA, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-transactional", + Target: &c.flagDevTransactional, + Default: false, + Hidden: true, + }) + + f.IntVar(&IntVar{ + Name: "dev-latency", + Target: &c.flagDevLatency, + Hidden: true, + }) + + f.IntVar(&IntVar{ + Name: "dev-latency-jitter", + Target: &c.flagDevLatencyJitter, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-leased-kv", + Target: &c.flagDevLeasedKV, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-kv-v1", + Target: &c.flagDevKVV1, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-auto-seal", + Target: &c.flagDevAutoSeal, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-skip-init", + Target: &c.flagDevSkipInit, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-three-node", + Target: &c.flagDevThreeNode, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-four-cluster", + Target: &c.flagDevFourCluster, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "dev-consul", + Target: &c.flagDevConsul, + Default: false, + Hidden: true, + }) + + f.StringVar(&StringVar{ + Name: "dev-cluster-json", + Target: &c.flagDevClusterJson, + Usage: "File to write cluster definition to", + }) + + // TODO: should the below flags be public? + f.BoolVar(&BoolVar{ + Name: "test-verify-only", + Target: &c.flagTestVerifyOnly, + Default: false, + Hidden: true, + }) + + f.BoolVar(&BoolVar{ + Name: "test-server-config", + Target: &c.flagTestServerConfig, + Default: false, + Hidden: true, + }) + + // End internal-only flags. + + return set +} + +func (c *ServerCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *ServerCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *ServerCommand) flushLog() { + c.logger.(hclog.OutputResettable).ResetOutputWithFlush(&hclog.LoggerOptions{ + Output: c.logWriter, + }, c.logGate) +} + +func (c *ServerCommand) parseConfig() (*server.Config, []configutil.ConfigError, error) { + var configErrors []configutil.ConfigError + // Load the configuration + var config *server.Config + for _, path := range c.flagConfigs { + current, err := server.LoadConfig(path) + if err != nil { + return nil, nil, fmt.Errorf("error loading configuration from %s: %w", path, err) + } + + configErrors = append(configErrors, current.Validate(path)...) + + if config == nil { + config = current + } else { + config = config.Merge(current) + } + } + + if config != nil && config.Entropy != nil && config.Entropy.Mode == configutil.EntropyAugmentation && constants.IsFIPS() { + c.UI.Warn("WARNING: Entropy Augmentation is not supported in FIPS 140-2 Inside mode; disabling from server configuration!\n") + config.Entropy = nil + } + + return config, configErrors, nil +} + +func (c *ServerCommand) runRecoveryMode() int { + config, configErrors, err := c.parseConfig() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Ensure at least one config was found. + if config == nil { + c.UI.Output(wrapAtLength( + "No configuration files found. Please provide configurations with the " + + "-config flag. If you are supplying the path to a directory, please " + + "ensure the directory contains files with the .hcl or .json " + + "extension.")) + return 1 + } + + // Update the 'log' related aspects of shared config based on config/env var/cli + c.flags.applyLogConfigOverrides(config.SharedConfig) + l, err := c.configureLogging(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + c.logger = l + c.allLoggers = append(c.allLoggers, l) + + // reporting Errors found in the config + for _, cErr := range configErrors { + c.logger.Warn(cErr.String()) + } + + // Ensure logging is flushed if initialization fails + defer c.flushLog() + + // create GRPC logger + namedGRPCLogFaker := c.logger.Named("grpclogfaker") + grpclog.SetLogger(&grpclogFaker{ + logger: namedGRPCLogFaker, + log: os.Getenv("VAULT_GRPC_LOGGING") != "", + }) + + if config.Storage == nil { + c.UI.Output("A storage backend must be specified") + return 1 + } + + if config.DefaultMaxRequestDuration != 0 { + vault.DefaultMaxRequestDuration = config.DefaultMaxRequestDuration + } + + logProxyEnvironmentVariables(c.logger) + + // Initialize the storage backend + factory, exists := c.PhysicalBackends[config.Storage.Type] + if !exists { + c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type)) + return 1 + } + if config.Storage.Type == storageTypeRaft || (config.HAStorage != nil && config.HAStorage.Type == storageTypeRaft) { + if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" { + config.ClusterAddr = envCA + } + + if len(config.ClusterAddr) == 0 { + c.UI.Error("Cluster address must be set when using raft storage") + return 1 + } + } + + namedStorageLogger := c.logger.Named("storage." + config.Storage.Type) + backend, err := factory(config.Storage.Config, namedStorageLogger) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err)) + return 1 + } + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + var barrierSeal vault.Seal + var sealConfigError error + + if len(config.Seals) == 0 { + config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.WrapperTypeShamir.String()}) + } + + if len(config.Seals) > 1 { + c.UI.Error("Only one seal block is accepted in recovery mode") + return 1 + } + + ctx := context.Background() + existingSealGenerationInfo, err := vault.PhysicalSealGenInfo(ctx, backend) + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting seal generation info: %v", err)) + return 1 + } + + hasPartialPaths, err := hasPartiallyWrappedPaths(ctx, backend) + if err != nil { + c.UI.Error(fmt.Sprintf("Cannot determine if there are partially seal wrapped entries in storage: %v", err)) + return 1 + } + setSealResponse, err := setSeal(c, config, infoKeys, info, existingSealGenerationInfo, hasPartialPaths) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + if setSealResponse.barrierSeal == nil { + c.UI.Error(fmt.Sprintf("Error setting up seal: %v", setSealResponse.sealConfigError)) + return 1 + } + barrierSeal = setSealResponse.barrierSeal + + // Ensure that the seal finalizer is called, even if using verify-only + defer func() { + err = barrierSeal.Finalize(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) + } + }() + + coreConfig := &vault.CoreConfig{ + Physical: backend, + StorageType: config.Storage.Type, + Seal: barrierSeal, + LogLevel: config.LogLevel, + Logger: c.logger, + DisableMlock: config.DisableMlock, + RecoveryMode: c.flagRecovery, + ClusterAddr: config.ClusterAddr, + } + + core, newCoreError := vault.NewCore(coreConfig) + if newCoreError != nil { + if vault.IsFatalError(newCoreError) { + c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError)) + return 1 + } + } + + if err := core.InitializeRecovery(ctx); err != nil { + c.UI.Error(fmt.Sprintf("Error initializing core in recovery mode: %s", err)) + return 1 + } + + // Compile server information for output later + infoKeys = append(infoKeys, "storage") + info["storage"] = config.Storage.Type + + if coreConfig.ClusterAddr != "" { + info["cluster address"] = coreConfig.ClusterAddr + infoKeys = append(infoKeys, "cluster address") + } + + // Initialize the listeners + lns := make([]listenerutil.Listener, 0, len(config.Listeners)) + for _, lnConfig := range config.Listeners { + ln, _, _, err := server.NewListener(lnConfig, c.logGate, c.UI) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err)) + return 1 + } + + lns = append(lns, listenerutil.Listener{ + Listener: ln, + Config: lnConfig, + }) + } + + listenerCloseFunc := func() { + for _, ln := range lns { + ln.Listener.Close() + } + } + + defer c.cleanupGuard.Do(listenerCloseFunc) + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + + infoKeys = append(infoKeys, "recovery mode") + info["recovery mode"] = "true" + + infoKeys = append(infoKeys, "go version") + info["go version"] = runtime.Version() + + fipsStatus := entGetFIPSInfoKey() + if fipsStatus != "" { + infoKeys = append(infoKeys, "fips") + info["fips"] = fipsStatus + } + + // Server configuration output + padding := 24 + + sort.Strings(infoKeys) + c.UI.Output("==> Vault server configuration:\n") + + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + strings.Title(k), + info[k])) + } + + c.UI.Output("") + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + return 0 + } + + for _, ln := range lns { + handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{ + Core: core, + ListenerConfig: ln.Config, + DisablePrintableCheck: config.DisablePrintableCheck, + RecoveryMode: c.flagRecovery, + RecoveryToken: atomic.NewString(""), + }) + + server := &http.Server{ + Handler: handler, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: c.logger.StandardLogger(nil), + } + + go server.Serve(ln.Listener) + } + + if sealConfigError != nil { + init, err := core.InitializedLocally(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) + return 1 + } + if init { + c.UI.Error("Vault is initialized but no Seal key could be loaded") + return 1 + } + } + + if newCoreError != nil { + c.UI.Warn(wrapAtLength( + "WARNING! A non-fatal error occurred during initialization. Please " + + "check the logs for more information.")) + c.UI.Warn("") + } + + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault server started! Log data will stream in below:\n") + } + + c.flushLog() + + for { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault shutdown triggered") + + c.cleanupGuard.Do(listenerCloseFunc) + + if err := core.Shutdown(); err != nil { + c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) + } + + return 0 + + case <-c.SigUSR2Ch: + buf := make([]byte, 32*1024*1024) + n := runtime.Stack(buf[:], true) + c.logger.Info("goroutine trace", "stack", string(buf[:n])) + } + } +} + +func logProxyEnvironmentVariables(logger hclog.Logger) { + proxyCfg := httpproxy.FromEnvironment() + cfgMap := map[string]string{ + "http_proxy": proxyCfg.HTTPProxy, + "https_proxy": proxyCfg.HTTPSProxy, + "no_proxy": proxyCfg.NoProxy, + } + for k, v := range cfgMap { + u, err := url.Parse(v) + if err != nil { + // Env vars may contain URLs or host:port values. We only care + // about the former. + continue + } + if _, ok := u.User.Password(); ok { + u.User = url.UserPassword("redacted-username", "redacted-password") + } else if user := u.User.Username(); user != "" { + u.User = url.User("redacted-username") + } + cfgMap[k] = u.String() + } + logger.Info("proxy environment", "http_proxy", cfgMap["http_proxy"], + "https_proxy", cfgMap["https_proxy"], "no_proxy", cfgMap["no_proxy"]) +} + +type quiescenceSink struct { + t *time.Timer +} + +func (q quiescenceSink) Accept(name string, level hclog.Level, msg string, args ...interface{}) { + q.t.Reset(100 * time.Millisecond) +} + +func (c *ServerCommand) setupStorage(config *server.Config) (physical.Backend, error) { + // Ensure that a backend is provided + if config.Storage == nil { + return nil, errors.New("A storage backend must be specified") + } + + // Initialize the backend + factory, exists := c.PhysicalBackends[config.Storage.Type] + if !exists { + return nil, fmt.Errorf("Unknown storage type %s", config.Storage.Type) + } + + // Do any custom configuration needed per backend + switch config.Storage.Type { + case storageTypeConsul: + if config.ServiceRegistration == nil { + // If Consul is configured for storage and service registration is unconfigured, + // use Consul for service registration without requiring additional configuration. + // This maintains backward-compatibility. + config.ServiceRegistration = &server.ServiceRegistration{ + Type: "consul", + Config: config.Storage.Config, + } + } + case storageTypeRaft: + if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" { + config.ClusterAddr = envCA + } + if len(config.ClusterAddr) == 0 { + return nil, errors.New("Cluster address must be set when using raft storage") + } + } + + namedStorageLogger := c.logger.Named("storage." + config.Storage.Type) + c.allLoggers = append(c.allLoggers, namedStorageLogger) + backend, err := factory(config.Storage.Config, namedStorageLogger) + if err != nil { + return nil, fmt.Errorf("Error initializing storage of type %s: %w", config.Storage.Type, err) + } + + return backend, nil +} + +func beginServiceRegistration(c *ServerCommand, config *server.Config) (sr.ServiceRegistration, error) { + sdFactory, ok := c.ServiceRegistrations[config.ServiceRegistration.Type] + if !ok { + return nil, fmt.Errorf("Unknown service_registration type %s", config.ServiceRegistration.Type) + } + + namedSDLogger := c.logger.Named("service_registration." + config.ServiceRegistration.Type) + c.allLoggers = append(c.allLoggers, namedSDLogger) + + // Since we haven't even begun starting Vault's core yet, + // we know that Vault is in its pre-running state. + state := sr.State{ + VaultVersion: version.GetVersion().VersionNumber(), + IsInitialized: false, + IsSealed: true, + IsActive: false, + IsPerformanceStandby: false, + } + var err error + configSR, err := sdFactory(config.ServiceRegistration.Config, namedSDLogger, state) + if err != nil { + return nil, fmt.Errorf("Error initializing service_registration of type %s: %s", config.ServiceRegistration.Type, err) + } + + return configSR, nil +} + +// InitListeners returns a response code, error message, Listeners, and a TCP Address list. +func (c *ServerCommand) InitListeners(config *server.Config, disableClustering bool, infoKeys *[]string, info *map[string]string) (int, []listenerutil.Listener, []*net.TCPAddr, error) { + clusterAddrs := []*net.TCPAddr{} + + // Initialize the listeners + lns := make([]listenerutil.Listener, 0, len(config.Listeners)) + + c.reloadFuncsLock.Lock() + + defer c.reloadFuncsLock.Unlock() + + var errMsg error + for i, lnConfig := range config.Listeners { + ln, props, reloadFunc, err := server.NewListener(lnConfig, c.logGate, c.UI) + if err != nil { + errMsg = fmt.Errorf("Error initializing listener of type %s: %s", lnConfig.Type, err) + return 1, nil, nil, errMsg + } + + if reloadFunc != nil { + relSlice := (*c.reloadFuncs)[fmt.Sprintf("listener|%s", lnConfig.Type)] + relSlice = append(relSlice, reloadFunc) + (*c.reloadFuncs)[fmt.Sprintf("listener|%s", lnConfig.Type)] = relSlice + } + + if !disableClustering && lnConfig.Type == "tcp" { + addr := lnConfig.ClusterAddress + if addr != "" { + tcpAddr, err := net.ResolveTCPAddr("tcp", lnConfig.ClusterAddress) + if err != nil { + errMsg = fmt.Errorf("Error resolving cluster_address: %s", err) + return 1, nil, nil, errMsg + } + clusterAddrs = append(clusterAddrs, tcpAddr) + } else { + tcpAddr, ok := ln.Addr().(*net.TCPAddr) + if !ok { + errMsg = fmt.Errorf("Failed to parse tcp listener") + return 1, nil, nil, errMsg + } + clusterAddr := &net.TCPAddr{ + IP: tcpAddr.IP, + Port: tcpAddr.Port + 1, + } + clusterAddrs = append(clusterAddrs, clusterAddr) + addr = clusterAddr.String() + } + props["cluster address"] = addr + } + + if lnConfig.MaxRequestSize == 0 { + lnConfig.MaxRequestSize = vaulthttp.DefaultMaxRequestSize + } + props["max_request_size"] = fmt.Sprintf("%d", lnConfig.MaxRequestSize) + + if lnConfig.MaxRequestDuration == 0 { + lnConfig.MaxRequestDuration = vault.DefaultMaxRequestDuration + } + props["max_request_duration"] = lnConfig.MaxRequestDuration.String() + + if lnConfig.ChrootNamespace != "" { + props["chroot_namespace"] = lnConfig.ChrootNamespace + } + + lns = append(lns, listenerutil.Listener{ + Listener: ln, + Config: lnConfig, + }) + + // Store the listener props for output later + key := fmt.Sprintf("listener %d", i+1) + propsList := make([]string, 0, len(props)) + for k, v := range props { + propsList = append(propsList, fmt.Sprintf( + "%s: %q", k, v)) + } + sort.Strings(propsList) + *infoKeys = append(*infoKeys, key) + (*info)[key] = fmt.Sprintf( + "%s (%s)", lnConfig.Type, strings.Join(propsList, ", ")) + + } + if !disableClustering { + if c.logger.IsDebug() { + c.logger.Debug("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs) + } + } + return 0, lns, clusterAddrs, nil +} + +func configureDevTLS(c *ServerCommand) (func(), *server.Config, string, error) { + var devStorageType string + + switch { + case c.flagDevConsul: + devStorageType = "consul" + case c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional_ha" + case !c.flagDevHA && c.flagDevTransactional: + devStorageType = "inmem_transactional" + case c.flagDevHA && !c.flagDevTransactional: + devStorageType = "inmem_ha" + default: + devStorageType = "inmem" + } + + var certDir string + var err error + var config *server.Config + var f func() + + if c.flagDevTLS { + if c.flagDevTLSCertDir != "" { + if _, err = os.Stat(c.flagDevTLSCertDir); err != nil { + return nil, nil, "", err + } + + certDir = c.flagDevTLSCertDir + } else { + if certDir, err = os.MkdirTemp("", "vault-tls"); err != nil { + return nil, nil, certDir, err + } + } + extraSANs := c.flagDevTLSSANs + host, _, err := net.SplitHostPort(c.flagDevListenAddr) + if err == nil { + // 127.0.0.1 is the default, and already included in the SANs. + // Empty host means listen on all interfaces, but users should use the + // -dev-tls-san flag to get the right SANs in that case. + if host != "" && host != "127.0.0.1" { + extraSANs = append(extraSANs, host) + } + } + config, err = server.DevTLSConfig(devStorageType, certDir, extraSANs) + + f = func() { + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename)); err != nil { + c.UI.Error(err.Error()) + } + + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevCertFilename)); err != nil { + c.UI.Error(err.Error()) + } + + if err := os.Remove(fmt.Sprintf("%s/%s", certDir, server.VaultDevKeyFilename)); err != nil { + c.UI.Error(err.Error()) + } + + // Only delete temp directories we made. + if c.flagDevTLSCertDir == "" { + if err := os.Remove(certDir); err != nil { + c.UI.Error(err.Error()) + } + } + } + + } else { + config, err = server.DevConfig(devStorageType) + } + + return f, config, certDir, err +} + +func (c *ServerCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Don't exit just because we saw a potential deadlock. + deadlock.Opts.OnPotentialDeadlock = func() {} + + c.logGate = gatedwriter.NewWriter(os.Stderr) + c.logWriter = c.logGate + + if c.logFlags.flagCombineLogs { + c.logWriter = os.Stdout + } + + if c.flagRecovery { + return c.runRecoveryMode() + } + + // Automatically enable dev mode if other dev flags are provided. + if c.flagDevConsul || c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode || c.flagDevFourCluster || c.flagDevAutoSeal || c.flagDevKVV1 || c.flagDevTLS { + c.flagDev = true + } + + // Validation + if !c.flagDev { + switch { + case len(c.flagConfigs) == 0: + c.UI.Error("Must specify at least one config path using -config") + return 1 + case c.flagDevRootTokenID != "": + c.UI.Warn(wrapAtLength( + "You cannot specify a custom root token ID outside of \"dev\" mode. " + + "Your request has been ignored.")) + c.flagDevRootTokenID = "" + } + } + + // Load the configuration + var config *server.Config + var certDir string + if c.flagDev { + df, cfg, dir, err := configureDevTLS(c) + if df != nil { + defer df() + } + + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + config = cfg + certDir = dir + + if c.flagDevListenAddr != "" { + config.Listeners[0].Address = c.flagDevListenAddr + } + config.Listeners[0].Telemetry.UnauthenticatedMetricsAccess = true + } + + parsedConfig, configErrors, err := c.parseConfig() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + if config == nil { + config = parsedConfig + } else { + config = config.Merge(parsedConfig) + } + + // Ensure at least one config was found. + if config == nil { + c.UI.Output(wrapAtLength( + "No configuration files found. Please provide configurations with the " + + "-config flag. If you are supplying the path to a directory, please " + + "ensure the directory contains files with the .hcl or .json " + + "extension.")) + return 1 + } + + f.applyLogConfigOverrides(config.SharedConfig) + + // Set 'trace' log level for the following 'dev' clusters + if c.flagDevThreeNode || c.flagDevFourCluster { + config.LogLevel = "trace" + } + + l, err := c.configureLogging(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + c.logger = l + c.allLoggers = append(c.allLoggers, l) + + // reporting Errors found in the config + for _, cErr := range configErrors { + c.logger.Warn(cErr.String()) + } + + // Ensure logging is flushed if initialization fails + defer c.flushLog() + + // create GRPC logger + namedGRPCLogFaker := c.logger.Named("grpclogfaker") + c.allLoggers = append(c.allLoggers, namedGRPCLogFaker) + grpclog.SetLogger(&grpclogFaker{ + logger: namedGRPCLogFaker, + log: os.Getenv("VAULT_GRPC_LOGGING") != "", + }) + + if memProfilerEnabled { + c.startMemProfiler() + } + + if config.DefaultMaxRequestDuration != 0 { + vault.DefaultMaxRequestDuration = config.DefaultMaxRequestDuration + } + + logProxyEnvironmentVariables(c.logger) + + if envMlock := os.Getenv("VAULT_DISABLE_MLOCK"); envMlock != "" { + var err error + config.DisableMlock, err = strconv.ParseBool(envMlock) + if err != nil { + c.UI.Output("Error parsing the environment variable VAULT_DISABLE_MLOCK") + return 1 + } + } + + if envLicensePath := os.Getenv(EnvVaultLicensePath); envLicensePath != "" { + config.LicensePath = envLicensePath + } + if envLicense := os.Getenv(EnvVaultLicense); envLicense != "" { + config.License = envLicense + } + + if err := server.ExperimentsFromEnvAndCLI(config, EnvVaultExperiments, c.flagExperiments); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + for _, experiment := range config.Experiments { + if experiments.IsUnused(experiment) { + c.UI.Warn(fmt.Sprintf("WARNING! Experiment %s is no longer used", experiment)) + } + } + + // If mlockall(2) isn't supported, show a warning. We disable this in dev + // because it is quite scary to see when first using Vault. We also disable + // this if the user has explicitly disabled mlock in configuration. + if !c.flagDev && !config.DisableMlock && !mlock.Supported() { + c.UI.Warn(wrapAtLength( + "WARNING! mlock is not supported on this system! An mlockall(2)-like " + + "syscall to prevent memory from being swapped to disk is not " + + "supported on this system. For better security, only run Vault on " + + "systems where this call is supported. If you are running Vault " + + "in a Docker container, provide the IPC_LOCK cap to the container.")) + } + + inmemMetrics, metricSink, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{ + Config: config.Telemetry, + Ui: c.UI, + ServiceName: "vault", + DisplayName: "Vault", + UserAgent: useragent.String(), + ClusterName: config.ClusterName, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err)) + return 1 + } + metricsHelper := metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled) + + // Initialize the storage backend + var backend physical.Backend + if !c.flagDev || config.Storage != nil { + backend, err = c.setupStorage(config) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + // Prevent server startup if migration is active + // TODO: Use OpenTelemetry to integrate this into Diagnose + if c.storageMigrationActive(backend) { + return 1 + } + } + + // Initialize the Service Discovery, if there is one + var configSR sr.ServiceRegistration + if config.ServiceRegistration != nil { + configSR, err = beginServiceRegistration(c, config) + if err != nil { + c.UI.Output(err.Error()) + return 1 + } + } + + infoKeys := make([]string, 0, 10) + info := make(map[string]string) + info["log level"] = config.LogLevel + infoKeys = append(infoKeys, "log level") + + // returns a slice of env vars formatted as "key=value" + envVars := os.Environ() + var envVarKeys []string + for _, v := range envVars { + splitEnvVars := strings.Split(v, "=") + envVarKeys = append(envVarKeys, splitEnvVars[0]) + } + + sort.Strings(envVarKeys) + + key := "environment variables" + info[key] = strings.Join(envVarKeys, ", ") + infoKeys = append(infoKeys, key) + + if len(config.Experiments) != 0 { + expKey := "experiments" + info[expKey] = strings.Join(config.Experiments, ", ") + infoKeys = append(infoKeys, expKey) + } + + ctx := context.Background() + + setSealResponse, secureRandomReader, err := c.configureSeals(ctx, config, backend, infoKeys, info) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + currentSeals := setSealResponse.getCreatedSeals() + defer c.finalizeSeals(ctx, ¤tSeals) + + coreConfig := createCoreConfig(c, config, backend, configSR, setSealResponse.barrierSeal, setSealResponse.unwrapSeal, metricsHelper, metricSink, secureRandomReader) + if c.flagDevThreeNode { + return c.enableThreeNodeDevCluster(&coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR")) + } + + if c.flagDevFourCluster { + return entEnableFourClusterDev(c, &coreConfig, info, infoKeys, os.Getenv("VAULT_DEV_TEMP_DIR")) + } + + if allowPendingRemoval := os.Getenv(consts.EnvVaultAllowPendingRemovalMounts); allowPendingRemoval != "" { + var err error + coreConfig.PendingRemovalMountsAllowed, err = strconv.ParseBool(allowPendingRemoval) + if err != nil { + c.UI.Warn(wrapAtLength("WARNING! failed to parse " + + consts.EnvVaultAllowPendingRemovalMounts + " env var: " + + "defaulting to false.")) + } + } + + // Initialize the separate HA storage backend, if it exists + disableClustering, err := initHaBackend(c, config, &coreConfig, backend) + if err != nil { + c.UI.Output(err.Error()) + return 1 + } + + // Determine the redirect address from environment variables + err = determineRedirectAddr(c, &coreConfig, config) + if err != nil { + c.UI.Output(err.Error()) + } + + // After the redirect bits are sorted out, if no cluster address was + // explicitly given, derive one from the redirect addr + err = findClusterAddress(c, &coreConfig, config, disableClustering) + if err != nil { + c.UI.Output(err.Error()) + return 1 + } + + // Override the UI enabling config by the environment variable + if enableUI := os.Getenv("VAULT_UI"); enableUI != "" { + var err error + coreConfig.EnableUI, err = strconv.ParseBool(enableUI) + if err != nil { + c.UI.Output("Error parsing the environment variable VAULT_UI") + return 1 + } + } + + // If ServiceRegistration is configured, then the backend must support HA + isBackendHA := coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() + if !c.flagDev && (coreConfig.GetServiceRegistration() != nil) && !isBackendHA { + c.UI.Output("service_registration is configured, but storage does not support HA") + return 1 + } + + // Apply any enterprise configuration onto the coreConfig. + entAdjustCoreConfig(config, &coreConfig) + + if !entCheckStorageType(&coreConfig) { + c.UI.Warn("") + c.UI.Warn(wrapAtLength(fmt.Sprintf("WARNING: storage configured to use %q which is not supported for Vault Enterprise, must be \"raft\" or \"consul\"", coreConfig.StorageType))) + c.UI.Warn("") + } + + if !c.flagDev { + inMemStorageTypes := []string{ + "inmem", "inmem_ha", "inmem_transactional", "inmem_transactional_ha", + } + + if strutil.StrListContains(inMemStorageTypes, coreConfig.StorageType) { + c.UI.Warn("") + c.UI.Warn(wrapAtLength(fmt.Sprintf("WARNING: storage configured to use %q which should NOT be used in production", coreConfig.StorageType))) + c.UI.Warn("") + } + } + + // Initialize the core + core, newCoreError := vault.NewCore(&coreConfig) + if newCoreError != nil { + if vault.IsFatalError(newCoreError) { + c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError)) + return 1 + } + c.UI.Warn(wrapAtLength( + "WARNING! A non-fatal error occurred during initialization. Please " + + "check the logs for more information.")) + c.UI.Warn("") + + } + + // Copy the reload funcs pointers back + c.reloadFuncs = coreConfig.ReloadFuncs + c.reloadFuncsLock = coreConfig.ReloadFuncsLock + + // Compile server information for output later + info["storage"] = config.Storage.Type + info["mlock"] = fmt.Sprintf( + "supported: %v, enabled: %v", + mlock.Supported(), !config.DisableMlock && mlock.Supported()) + infoKeys = append(infoKeys, "mlock", "storage") + + if coreConfig.ClusterAddr != "" { + info["cluster address"] = coreConfig.ClusterAddr + infoKeys = append(infoKeys, "cluster address") + } + if coreConfig.RedirectAddr != "" { + info["api address"] = coreConfig.RedirectAddr + infoKeys = append(infoKeys, "api address") + } + + if config.HAStorage != nil { + info["HA storage"] = config.HAStorage.Type + infoKeys = append(infoKeys, "HA storage") + } else { + // If the storage supports HA, then note it + if coreConfig.HAPhysical != nil { + if coreConfig.HAPhysical.HAEnabled() { + info["storage"] += " (HA available)" + } else { + info["storage"] += " (HA disabled)" + } + } + } + + status, lns, clusterAddrs, errMsg := c.InitListeners(config, disableClustering, &infoKeys, &info) + + if status != 0 { + c.UI.Output("Error parsing listener configuration.") + c.UI.Error(errMsg.Error()) + return 1 + } + + // Make sure we close all listeners from this point on + listenerCloseFunc := func() { + for _, ln := range lns { + ln.Listener.Close() + } + } + + defer c.cleanupGuard.Do(listenerCloseFunc) + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + infoKeys = append(infoKeys, "recovery mode") + info["recovery mode"] = "false" + + infoKeys = append(infoKeys, "go version") + info["go version"] = runtime.Version() + + fipsStatus := entGetFIPSInfoKey() + if fipsStatus != "" { + infoKeys = append(infoKeys, "fips") + info["fips"] = fipsStatus + } + + if config.HCPLinkConf != nil { + infoKeys = append(infoKeys, "HCP organization") + info["HCP organization"] = config.HCPLinkConf.Resource.Organization + + infoKeys = append(infoKeys, "HCP project") + info["HCP project"] = config.HCPLinkConf.Resource.Project + + infoKeys = append(infoKeys, "HCP resource ID") + info["HCP resource ID"] = config.HCPLinkConf.Resource.ID + } + + infoKeys = append(infoKeys, "administrative namespace") + info["administrative namespace"] = config.AdministrativeNamespacePath + + sort.Strings(infoKeys) + c.UI.Output("==> Vault server configuration:\n") + + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%24s: %s", + strings.Title(k), + info[k])) + } + + c.UI.Output("") + + // Tests might not want to start a vault server and just want to verify + // the configuration. + if c.flagTestVerifyOnly { + return 0 + } + + // This needs to happen before we first unseal, so before we trigger dev + // mode if it's set + core.SetClusterListenerAddrs(clusterAddrs) + core.SetClusterHandler(vaulthttp.Handler.Handler(&vault.HandlerProperties{ + Core: core, + ListenerConfig: &configutil.Listener{}, + })) + + // Attempt unsealing in a background goroutine. This is needed for when a + // Vault cluster with multiple servers is configured with auto-unseal but is + // uninitialized. Once one server initializes the storage backend, this + // goroutine will pick up the unseal keys and unseal this instance. + if !core.IsInSealMigrationMode(true) { + go runUnseal(c, core, ctx) + } + + // When the underlying storage is raft, kick off retry join if it was specified + // in the configuration + // TODO: Should we also support retry_join for ha_storage? + if config.Storage.Type == storageTypeRaft { + if err := core.InitiateRetryJoin(ctx); err != nil { + c.UI.Error(fmt.Sprintf("Failed to initiate raft retry join, %q", err.Error())) + return 1 + } + } + + // Perform initialization of HTTP server after the verifyOnly check. + + // Instantiate the wait group + c.WaitGroup = &sync.WaitGroup{} + + // If service discovery is available, run service discovery + err = runListeners(c, &coreConfig, config, configSR) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // If we're in Dev mode, then initialize the core + clusterJson := &testcluster.ClusterJson{} + err = initDevCore(c, &coreConfig, config, core, certDir, clusterJson) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Initialize the HTTP servers + err = startHttpServers(c, core, config, lns) + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + hcpLogger := c.logger.Named("hcp-connectivity") + hcpLink, err := hcp_link.NewHCPLink(config.HCPLinkConf, core, hcpLogger) + if err != nil { + c.logger.Error("failed to establish HCP connection", "error", err) + } else if hcpLink != nil { + c.logger.Trace("established HCP connection") + } + + if c.flagTestServerConfig { + return 0 + } + + if setSealResponse.sealConfigError != nil { + init, err := core.InitializedLocally(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err)) + return 1 + } + if init { + c.UI.Error("Vault is initialized but no Seal key could be loaded") + return 1 + } + } + + // Output the header that the server has started + if !c.logFlags.flagCombineLogs { + c.UI.Output("==> Vault server started! Log data will stream in below:\n") + } + + // Inform any tests that the server is ready + select { + case c.startedCh <- struct{}{}: + default: + } + + // Release the log gate. + c.flushLog() + + // Write out the PID to the file now that server has successfully started + if err := c.storePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error storing PID: %s", err)) + return 1 + } + + // Notify systemd that the server is ready (if applicable) + c.notifySystemd(systemd.SdNotifyReady) + + if c.flagDev { + protocol := "http://" + if c.flagDevTLS { + protocol = "https://" + } + clusterJson.Nodes = []testcluster.ClusterNode{ + { + APIAddress: protocol + config.Listeners[0].Address, + }, + } + if c.flagDevTLS { + clusterJson.CACertPath = fmt.Sprintf("%s/%s", certDir, server.VaultDevCAFilename) + } + + if c.flagDevClusterJson != "" && !c.flagDevThreeNode { + b, err := jsonutil.EncodeJSON(clusterJson) + if err != nil { + c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) + return 1 + } + err = os.WriteFile(c.flagDevClusterJson, b, 0o600) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) + return 1 + } + } + } + + defer func() { + if err := c.removePidFile(config.PidFile); err != nil { + c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err)) + } + }() + + var coreShutdownDoneCh <-chan struct{} + if c.flagExitOnCoreShutdown { + coreShutdownDoneCh = core.ShutdownDone() + } + + // Wait for shutdown + shutdownTriggered := false + retCode := 0 + + for !shutdownTriggered { + select { + case <-coreShutdownDoneCh: + c.UI.Output("==> Vault core was shut down") + retCode = 1 + shutdownTriggered = true + case <-c.ShutdownCh: + c.UI.Output("==> Vault shutdown triggered") + shutdownTriggered = true + case <-c.SighupCh: + c.UI.Output("==> Vault reload triggered") + + // Notify systemd that the server is reloading config + c.notifySystemd(systemd.SdNotifyReloading) + + // Check for new log level + var config *server.Config + var configErrors []configutil.ConfigError + for _, path := range c.flagConfigs { + current, err := server.LoadConfig(path) + if err != nil { + c.logger.Error("could not reload config", "path", path, "error", err) + goto RUNRELOADFUNCS + } + + configErrors = append(configErrors, current.Validate(path)...) + + if config == nil { + config = current + } else { + config = config.Merge(current) + } + } + + // Ensure at least one config was found. + if config == nil { + c.logger.Error("no config found at reload time") + goto RUNRELOADFUNCS + } + + // reporting Errors found in the config + for _, cErr := range configErrors { + c.logger.Warn(cErr.String()) + } + + if !cmp.Equal(core.GetCoreConfigInternal().Seals, config.Seals) { + setSealResponse, err = c.reloadSeals(ctx, core, config) + if err != nil { + c.UI.Error(fmt.Errorf("error reloading seal config: %s", err).Error()) + config.Seals = core.GetCoreConfigInternal().Seals + } else { + // finalize the old seals and set the new seals as the current ones + c.finalizeSeals(ctx, ¤tSeals) + currentSeals = setSealResponse.getCreatedSeals() + } + } + + core.SetConfig(config) + + // reloading custom response headers to make sure we have + // the most up to date headers after reloading the config file + if err = core.ReloadCustomResponseHeaders(); err != nil { + c.logger.Error(err.Error()) + } + + // Setting log request with the new value in the config after reload + core.ReloadLogRequestsLevel() + + // reloading HCP link + hcpLink, err = c.reloadHCPLink(hcpLink, config, core, hcpLogger) + if err != nil { + c.logger.Error(err.Error()) + } + + // Reload log level for loggers + if config.LogLevel != "" { + level, err := loghelper.ParseLogLevel(config.LogLevel) + if err != nil { + c.logger.Error("unknown log level found on reload", "level", config.LogLevel) + goto RUNRELOADFUNCS + } + core.SetLogLevel(level) + } + + RUNRELOADFUNCS: + if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs, core); err != nil { + c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) + } + + // Reload license file + if err = core.EntReloadLicense(); err != nil { + c.UI.Error(err.Error()) + } + + if err := core.ReloadCensus(); err != nil { + c.UI.Error(err.Error()) + } + select { + case c.licenseReloadedCh <- err: + default: + } + + // Let the managedKeyRegistry react to configuration changes (i.e. + // changes in kms_libraries) + core.ReloadManagedKeyRegistryConfig() + + // Notify systemd that the server has completed reloading config + c.notifySystemd(systemd.SdNotifyReady) + + case <-c.SigUSR2Ch: + logWriter := c.logger.StandardWriter(&hclog.StandardLoggerOptions{}) + pprof.Lookup("goroutine").WriteTo(logWriter, 2) + + if os.Getenv("VAULT_STACKTRACE_WRITE_TO_FILE") != "" { + c.logger.Info("Writing stacktrace to file") + + dir := "" + path := os.Getenv("VAULT_STACKTRACE_FILE_PATH") + if path != "" { + if _, err := os.Stat(path); err != nil { + c.logger.Error("Checking stacktrace path failed", "error", err) + continue + } + dir = path + } else { + dir, err = os.MkdirTemp("", "vault-stacktrace") + if err != nil { + c.logger.Error("Could not create temporary directory for stacktrace", "error", err) + continue + } + } + + f, err := os.CreateTemp(dir, "stacktrace") + if err != nil { + c.logger.Error("Could not create stacktrace file", "error", err) + continue + } + + if err := pprof.Lookup("goroutine").WriteTo(f, 2); err != nil { + f.Close() + c.logger.Error("Could not write stacktrace to file", "error", err) + continue + } + + c.logger.Info(fmt.Sprintf("Wrote stacktrace to: %s", f.Name())) + f.Close() + } + + // We can only get pprof outputs via the API but sometimes Vault can get + // into a state where it cannot process requests so we can get pprof outputs + // via SIGUSR2. + if os.Getenv("VAULT_PPROF_WRITE_TO_FILE") != "" { + dir := "" + path := os.Getenv("VAULT_PPROF_FILE_PATH") + if path != "" { + if _, err := os.Stat(path); err != nil { + c.logger.Error("Checking pprof path failed", "error", err) + continue + } + dir = path + } else { + dir, err = os.MkdirTemp("", "vault-pprof") + if err != nil { + c.logger.Error("Could not create temporary directory for pprof", "error", err) + continue + } + } + + dumps := []string{"goroutine", "heap", "allocs", "threadcreate"} + for _, dump := range dumps { + pFile, err := os.Create(filepath.Join(dir, dump)) + if err != nil { + c.logger.Error("error creating pprof file", "name", dump, "error", err) + break + } + + err = pprof.Lookup(dump).WriteTo(pFile, 0) + if err != nil { + c.logger.Error("error generating pprof data", "name", dump, "error", err) + pFile.Close() + break + } + pFile.Close() + } + + c.logger.Info(fmt.Sprintf("Wrote pprof files to: %s", dir)) + } + } + } + // Notify systemd that the server is shutting down + c.notifySystemd(systemd.SdNotifyStopping) + + // Stop the listeners so that we don't process further client requests. + c.cleanupGuard.Do(listenerCloseFunc) + + if hcpLink != nil { + if err := hcpLink.Shutdown(); err != nil { + c.UI.Error(fmt.Sprintf("Error with HCP Link shutdown: %v", err.Error())) + } + } + + // Finalize will wait until after Vault is sealed, which means the + // request forwarding listeners will also be closed (and also + // waited for). + if err := core.Shutdown(); err != nil { + c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) + } + + // Wait for dependent goroutines to complete + c.WaitGroup.Wait() + return retCode +} + +func (c *ServerCommand) configureSeals(ctx context.Context, config *server.Config, backend physical.Backend, infoKeys []string, info map[string]string) (*SetSealResponse, io.Reader, error) { + existingSealGenerationInfo, err := vault.PhysicalSealGenInfo(ctx, backend) + if err != nil { + return nil, nil, fmt.Errorf("Error getting seal generation info: %v", err) + } + + hasPartialPaths, err := hasPartiallyWrappedPaths(ctx, backend) + if err != nil { + return nil, nil, fmt.Errorf("Cannot determine if there are partially seal wrapped entries in storage: %v", err) + } + setSealResponse, err := setSeal(c, config, infoKeys, info, existingSealGenerationInfo, hasPartialPaths) + if err != nil { + return nil, nil, err + } + if setSealResponse.sealConfigWarning != nil { + c.UI.Warn(fmt.Sprintf("Warnings during seal configuration: %v", setSealResponse.sealConfigWarning)) + } + + if setSealResponse.barrierSeal == nil { + return nil, nil, errors.New("Could not create barrier seal! Most likely proper Seal configuration information was not set, but no error was generated.") + } + + // prepare a secure random reader for core + entropyAugLogger := c.logger.Named("entropy-augmentation") + var entropySources []*configutil.EntropySourcerInfo + for _, sealWrapper := range setSealResponse.barrierSeal.GetAccess().GetEnabledSealWrappersByPriority() { + if s, ok := sealWrapper.Wrapper.(entropy.Sourcer); ok { + entropySources = append(entropySources, &configutil.EntropySourcerInfo{ + Sourcer: s, + Name: sealWrapper.Name, + }) + } + } + secureRandomReader, err := configutil.CreateSecureRandomReaderFunc(config.SharedConfig, entropySources, entropyAugLogger) + if err != nil { + return nil, nil, err + } + + return setSealResponse, secureRandomReader, nil +} + +func (c *ServerCommand) finalizeSeals(ctx context.Context, seals *[]*vault.Seal) { + for _, seal := range *seals { + // Ensure that the seal finalizer is called, even if using verify-only + err := (*seal).Finalize(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err)) + } + } +} + +// configureLogging takes the configuration and attempts to parse config values into 'log' friendly configuration values +// If all goes to plan, a logger is created and setup. +func (c *ServerCommand) configureLogging(config *server.Config) (hclog.InterceptLogger, error) { + // Parse all the log related config + logLevel, err := loghelper.ParseLogLevel(config.LogLevel) + if err != nil { + return nil, err + } + + logFormat, err := loghelper.ParseLogFormat(config.LogFormat) + if err != nil { + return nil, err + } + + logRotateDuration, err := parseutil.ParseDurationSecond(config.LogRotateDuration) + if err != nil { + return nil, err + } + + logCfg, err := loghelper.NewLogConfig("vault") + if err != nil { + return nil, err + } + logCfg.LogLevel = logLevel + logCfg.LogFormat = logFormat + logCfg.LogFilePath = config.LogFile + logCfg.LogRotateDuration = logRotateDuration + logCfg.LogRotateBytes = config.LogRotateBytes + logCfg.LogRotateMaxFiles = config.LogRotateMaxFiles + + return loghelper.Setup(logCfg, c.logWriter) +} + +func (c *ServerCommand) reloadHCPLink(hcpLinkVault *hcp_link.HCPLinkVault, conf *server.Config, core *vault.Core, hcpLogger hclog.Logger) (*hcp_link.HCPLinkVault, error) { + // trigger a shutdown + if hcpLinkVault != nil { + err := hcpLinkVault.Shutdown() + if err != nil { + return nil, err + } + } + + if conf.HCPLinkConf == nil { + // if cloud stanza is not configured, we should not show anything + // in the seal-status related to HCP link + core.SetHCPLinkStatus("", "") + return nil, nil + } + + // starting HCP link + hcpLink, err := hcp_link.NewHCPLink(conf.HCPLinkConf, core, hcpLogger) + if err != nil { + return nil, fmt.Errorf("failed to restart HCP Link and it is no longer running, %w", err) + } + + return hcpLink, nil +} + +func (c *ServerCommand) notifySystemd(status string) { + sent, err := systemd.SdNotify(false, status) + if err != nil { + c.logger.Error("error notifying systemd", "error", err) + } else { + if sent { + c.logger.Debug("sent systemd notification", "notification", status) + } else { + c.logger.Debug("would have sent systemd notification (systemd not present)", "notification", status) + } + } +} + +func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) { + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + + var recoveryConfig *vault.SealConfig + barrierConfig := &vault.SealConfig{ + SecretShares: 1, + SecretThreshold: 1, + Name: "shamir", + } + + if core.SealAccess().RecoveryKeySupported() { + recoveryConfig = &vault.SealConfig{ + SecretShares: 1, + SecretThreshold: 1, + } + } + + if core.SealAccess().StoredKeysSupported() != vaultseal.StoredKeysNotSupported { + barrierConfig.StoredShares = 1 + } + + // Initialize it with a basic single key + init, err := core.Initialize(ctx, &vault.InitParams{ + BarrierConfig: barrierConfig, + RecoveryConfig: recoveryConfig, + }) + if err != nil { + return nil, err + } + + // Handle unseal with stored keys + if core.SealAccess().StoredKeysSupported() == vaultseal.StoredKeysSupportedGeneric { + err := core.UnsealWithStoredKeys(ctx) + if err != nil { + return nil, err + } + } else { + // Copy the key so that it can be zeroed + key := make([]byte, len(init.SecretShares[0])) + copy(key, init.SecretShares[0]) + + // Unseal the core + unsealed, err := core.Unseal(key) + if err != nil { + return nil, err + } + if !unsealed { + return nil, fmt.Errorf("failed to unseal Vault for dev mode") + } + } + + isLeader, _, _, err := core.Leader() + if err != nil && err != vault.ErrHANotEnabled { + return nil, fmt.Errorf("failed to check active status: %w", err) + } + if err == nil { + leaderCount := 5 + for !isLeader { + if leaderCount == 0 { + buf := make([]byte, 1<<16) + runtime.Stack(buf, true) + return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s", buf) + } + time.Sleep(1 * time.Second) + isLeader, _, _, err = core.Leader() + if err != nil { + return nil, fmt.Errorf("failed to check active status: %w", err) + } + leaderCount-- + } + } + + // Generate a dev root token if one is provided in the flag + if coreConfig.DevToken != "" { + req := &logical.Request{ + ID: "dev-gen-root", + Operation: logical.UpdateOperation, + ClientToken: init.RootToken, + Path: "auth/token/create", + Data: map[string]interface{}{ + "id": coreConfig.DevToken, + "policies": []string{"root"}, + "no_parent": true, + "no_default_policy": true, + }, + } + resp, err := core.HandleRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to create root token with ID %q: %w", coreConfig.DevToken, err) + } + if resp == nil { + return nil, fmt.Errorf("nil response when creating root token with ID %q", coreConfig.DevToken) + } + if resp.Auth == nil { + return nil, fmt.Errorf("nil auth when creating root token with ID %q", coreConfig.DevToken) + } + + init.RootToken = resp.Auth.ClientToken + + req.ID = "dev-revoke-init-root" + req.Path = "auth/token/revoke-self" + req.Data = nil + _, err = core.HandleRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to revoke initial root token: %w", err) + } + } + + // Set the token + if !c.flagDevNoStoreToken { + tokenHelper, err := c.TokenHelper() + if err != nil { + return nil, err + } + if err := tokenHelper.Store(init.RootToken); err != nil { + return nil, err + } + } + + kvVer := "2" + if c.flagDevKVV1 || c.flagDevLeasedKV { + kvVer = "1" + } + req := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: init.RootToken, + Path: "sys/mounts/secret", + Data: map[string]interface{}{ + "type": "kv", + "path": "secret/", + "description": "key/value secret storage", + "options": map[string]string{ + "version": kvVer, + }, + }, + } + resp, err := core.HandleRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("error creating default K/V store: %w", err) + } + if resp.IsError() { + return nil, fmt.Errorf("failed to create default K/V store: %w", resp.Error()) + } + + return init, nil +} + +func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int { + conf, opts := teststorage.ClusterSetup(base, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + BaseListenAddress: c.flagDevListenAddr, + Logger: c.logger, + TempDir: tempDir, + DefaultHandlerProperties: vault.HandlerProperties{ + ListenerConfig: &configutil.Listener{ + Profiling: configutil.ListenerProfiling{ + UnauthenticatedPProfAccess: true, + }, + Telemetry: configutil.ListenerTelemetry{ + UnauthenticatedMetricsAccess: true, + }, + }, + }, + }, nil) + testCluster := vault.NewTestCluster(&testing.RuntimeT{}, conf, opts) + defer c.cleanupGuard.Do(testCluster.Cleanup) + + if constants.IsEnterprise { + err := testcluster.WaitForActiveNodeAndPerfStandbys(context.Background(), testCluster) + if err != nil { + c.UI.Error(fmt.Sprintf("perf standbys didn't become ready: %v", err)) + return 1 + } + } + + info["cluster parameters path"] = testCluster.TempDir + infoKeys = append(infoKeys, "cluster parameters path") + + for i, core := range testCluster.Cores { + info[fmt.Sprintf("node %d api address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String()) + infoKeys = append(infoKeys, fmt.Sprintf("node %d api address", i)) + } + + infoKeys = append(infoKeys, "version") + verInfo := version.GetVersion() + info["version"] = verInfo.FullVersionNumber(false) + if verInfo.Revision != "" { + info["version sha"] = strings.Trim(verInfo.Revision, "'") + infoKeys = append(infoKeys, "version sha") + } + + infoKeys = append(infoKeys, "cgo") + info["cgo"] = "disabled" + if version.CgoEnabled { + info["cgo"] = "enabled" + } + + infoKeys = append(infoKeys, "go version") + info["go version"] = runtime.Version() + + fipsStatus := entGetFIPSInfoKey() + if fipsStatus != "" { + infoKeys = append(infoKeys, "fips") + info["fips"] = fipsStatus + } + + // Server configuration output + padding := 24 + + sort.Strings(infoKeys) + c.UI.Output("==> Vault server configuration:\n") + + for _, k := range infoKeys { + c.UI.Output(fmt.Sprintf( + "%s%s: %s", + strings.Repeat(" ", padding-len(k)), + strings.Title(k), + info[k])) + } + + c.UI.Output("") + + for _, core := range testCluster.Cores { + core.Server.Handler = vaulthttp.Handler.Handler(&vault.HandlerProperties{ + Core: core.Core, + ListenerConfig: &configutil.Listener{}, + }) + core.SetClusterHandler(core.Server.Handler) + } + + testCluster.Start() + + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + + if base.DevToken != "" { + req := &logical.Request{ + ID: "dev-gen-root", + Operation: logical.UpdateOperation, + ClientToken: testCluster.RootToken, + Path: "auth/token/create", + Data: map[string]interface{}{ + "id": base.DevToken, + "policies": []string{"root"}, + "no_parent": true, + "no_default_policy": true, + }, + } + resp, err := testCluster.Cores[0].HandleRequest(ctx, req) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err)) + return 1 + } + if resp == nil { + c.UI.Error(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken)) + return 1 + } + if resp.Auth == nil { + c.UI.Error(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken)) + return 1 + } + + testCluster.RootToken = resp.Auth.ClientToken + + req.ID = "dev-revoke-init-root" + req.Path = "auth/token/revoke-self" + req.Data = nil + _, err = testCluster.Cores[0].HandleRequest(ctx, req) + if err != nil { + c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err)) + return 1 + } + } + + // Set the token + tokenHelper, err := c.TokenHelper() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting token helper: %s", err)) + return 1 + } + if err := tokenHelper.Store(testCluster.RootToken); err != nil { + c.UI.Error(fmt.Sprintf("Error storing in token helper: %s", err)) + return 1 + } + + if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0o600); err != nil { + c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err)) + return 1 + } + + c.UI.Output(fmt.Sprintf( + "==> Three node dev mode is enabled\n\n" + + "The unseal key and root token are reproduced below in case you\n" + + "want to seal/unseal the Vault or play with authentication.\n", + )) + + for i, key := range testCluster.BarrierKeys { + c.UI.Output(fmt.Sprintf( + "Unseal Key %d: %s", + i+1, base64.StdEncoding.EncodeToString(key), + )) + } + + c.UI.Output(fmt.Sprintf( + "\nRoot Token: %s\n", testCluster.RootToken, + )) + + c.UI.Output(fmt.Sprintf( + "\nUseful env vars:\n"+ + "VAULT_TOKEN=%s\n"+ + "VAULT_ADDR=%s\n"+ + "VAULT_CACERT=%s/ca_cert.pem\n", + testCluster.RootToken, + testCluster.Cores[0].Client.Address(), + testCluster.TempDir, + )) + + if c.flagDevClusterJson != "" { + clusterJson := testcluster.ClusterJson{ + Nodes: []testcluster.ClusterNode{}, + CACertPath: filepath.Join(testCluster.TempDir, "ca_cert.pem"), + RootToken: testCluster.RootToken, + } + for _, core := range testCluster.Cores { + clusterJson.Nodes = append(clusterJson.Nodes, testcluster.ClusterNode{ + APIAddress: core.Client.Address(), + }) + } + b, err := jsonutil.EncodeJSON(clusterJson) + if err != nil { + c.UI.Error(fmt.Sprintf("Error encoding cluster.json: %s", err)) + return 1 + } + err = os.WriteFile(c.flagDevClusterJson, b, 0o600) + if err != nil { + c.UI.Error(fmt.Sprintf("Error writing cluster.json %q: %s", c.flagDevClusterJson, err)) + return 1 + } + } + + // Output the header that the server has started + c.UI.Output("==> Vault server started! Log data will stream in below:\n") + + // Inform any tests that the server is ready + select { + case c.startedCh <- struct{}{}: + default: + } + + // Release the log gate. + c.flushLog() + + // Wait for shutdown + shutdownTriggered := false + + for !shutdownTriggered { + select { + case <-c.ShutdownCh: + c.UI.Output("==> Vault shutdown triggered") + + // Stop the listeners so that we don't process further client requests. + c.cleanupGuard.Do(testCluster.Cleanup) + + // Finalize will wait until after Vault is sealed, which means the + // request forwarding listeners will also be closed (and also + // waited for). + for _, core := range testCluster.Cores { + if err := core.Shutdown(); err != nil { + c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err)) + } + } + + shutdownTriggered = true + + case <-c.SighupCh: + c.UI.Output("==> Vault reload triggered") + for _, core := range testCluster.Cores { + if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil, core.Core); err != nil { + c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err)) + } + } + } + } + + return 0 +} + +// addPlugin adds any plugins to the catalog +func (c *ServerCommand) addPlugin(path, token string, core *vault.Core) error { + // Get the sha256 of the file at the given path. + pluginSum := func(p string) (string, error) { + hasher := sha256.New() + f, err := os.Open(p) + if err != nil { + return "", err + } + defer f.Close() + if _, err := io.Copy(hasher, f); err != nil { + return "", err + } + return hex.EncodeToString(hasher.Sum(nil)), nil + } + + // Mount any test plugins. We do this explicitly before we inform tests of + // a completely booted server intentionally. + sha256sum, err := pluginSum(path) + if err != nil { + return err + } + + // Default the name to the basename of the binary + name := filepath.Base(path) + + // File a request against core to enable the plugin + req := &logical.Request{ + Operation: logical.UpdateOperation, + ClientToken: token, + Path: fmt.Sprintf("sys/plugins/catalog/%s", name), + Data: map[string]interface{}{ + "sha256": sha256sum, + "command": name, + }, + } + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + if _, err := core.HandleRequest(ctx, req); err != nil { + return err + } + + return nil +} + +// detectRedirect is used to attempt redirect address detection +func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect, + config *server.Config, +) (string, error) { + // Get the hostname + host, err := detect.DetectHostAddr() + if err != nil { + return "", err + } + + // set [] for ipv6 addresses + if strings.Contains(host, ":") && !strings.Contains(host, "]") { + host = "[" + host + "]" + } + + // Default the port and scheme + scheme := "https" + port := 8200 + + // Attempt to detect overrides + for _, list := range config.Listeners { + // Only attempt TCP + if list.Type != "tcp" { + continue + } + + // Check if TLS is disabled + if list.TLSDisable { + scheme = "http" + } + + // Check for address override + addr := list.Address + if addr == "" { + addr = "127.0.0.1:8200" + } + + // Check for localhost + hostStr, portStr, err := net.SplitHostPort(addr) + if err != nil { + continue + } + if hostStr == "127.0.0.1" { + host = hostStr + } + + // Check for custom port + listPort, err := strconv.Atoi(portStr) + if err != nil { + continue + } + port = listPort + } + + // Build a URL + url := &url.URL{ + Scheme: scheme, + Host: fmt.Sprintf("%s:%d", host, port), + } + + // Return the URL string + return url.String(), nil +} + +func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reloadutil.ReloadFunc, configPath []string, core *vault.Core) error { + lock.RLock() + defer lock.RUnlock() + + var reloadErrors *multierror.Error + + for k, relFuncs := range *reloadFuncs { + switch { + case strings.HasPrefix(k, "listener|"): + for _, relFunc := range relFuncs { + if relFunc != nil { + if err := relFunc(); err != nil { + reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("error encountered reloading listener: %w", err)) + } + } + } + + case strings.HasPrefix(k, "audit_file|"): + for _, relFunc := range relFuncs { + if relFunc != nil { + if err := relFunc(); err != nil { + reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("error encountered reloading file audit device at path %q: %w", strings.TrimPrefix(k, "audit_file|"), err)) + } + } + } + } + } + + // Set Introspection Endpoint to enabled with new value in the config after reload + core.ReloadIntrospectionEndpointEnabled() + + // Send a message that we reloaded. This prevents "guessing" sleep times + // in tests. + select { + case c.reloadedCh <- struct{}{}: + default: + } + + return reloadErrors.ErrorOrNil() +} + +// storePidFile is used to write out our PID to a file if necessary +func (c *ServerCommand) storePidFile(pidPath string) error { + // Quit fast if no pidfile + if pidPath == "" { + return nil + } + + // Open the PID file + pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) + if err != nil { + return fmt.Errorf("could not open pid file: %w", err) + } + defer pidFile.Close() + + // Write out the PID + pid := os.Getpid() + _, err = pidFile.WriteString(fmt.Sprintf("%d", pid)) + if err != nil { + return fmt.Errorf("could not write to pid file: %w", err) + } + return nil +} + +// removePidFile is used to cleanup the PID file if necessary +func (c *ServerCommand) removePidFile(pidPath string) error { + if pidPath == "" { + return nil + } + return os.Remove(pidPath) +} + +// storageMigrationActive checks and warns against in-progress storage migrations. +// This function will block until storage is available. +func (c *ServerCommand) storageMigrationActive(backend physical.Backend) bool { + first := true + + for { + migrationStatus, err := CheckStorageMigration(backend) + if err == nil { + if migrationStatus != nil { + startTime := migrationStatus.Start.Format(time.RFC3339) + c.UI.Error(wrapAtLength(fmt.Sprintf("ERROR! Storage migration in progress (started: %s). "+ + "Server startup is prevented until the migration completes. Use 'vault operator migrate -reset' "+ + "to force clear the migration lock.", startTime))) + return true + } + return false + } + if first { + first = false + c.UI.Warn("\nWARNING! Unable to read storage migration status.") + + // unexpected state, so stop buffering log messages + c.flushLog() + } + c.logger.Warn("storage migration check error", "error", err.Error()) + + timer := time.NewTimer(2 * time.Second) + select { + case <-timer.C: + case <-c.ShutdownCh: + timer.Stop() + return true + } + } +} + +type StorageMigrationStatus struct { + Start time.Time `json:"start"` +} + +func CheckStorageMigration(b physical.Backend) (*StorageMigrationStatus, error) { + entry, err := b.Get(context.Background(), storageMigrationLock) + if err != nil { + return nil, err + } + + if entry == nil { + return nil, nil + } + + var status StorageMigrationStatus + if err := jsonutil.DecodeJSON(entry.Value, &status); err != nil { + return nil, err + } + + return &status, nil +} + +type SetSealResponse struct { + barrierSeal vault.Seal + unwrapSeal vault.Seal + + // sealConfigError is present if there was an error configuring wrappers, other than KeyNotFound. + sealConfigError error + sealConfigWarning error +} + +func (r *SetSealResponse) getCreatedSeals() []*vault.Seal { + var ret []*vault.Seal + if r.barrierSeal != nil { + ret = append(ret, &r.barrierSeal) + } + if r.unwrapSeal != nil { + ret = append(ret, &r.unwrapSeal) + } + return ret +} + +// setSeal return barrierSeal, barrierWrapper, unwrapSeal, all the created seals, and all the provided seals from the configs so we can close them in Run +// The two errors are the sealConfigError and the regular error +func setSeal(c *ServerCommand, config *server.Config, infoKeys []string, info map[string]string, existingSealGenerationInfo *vaultseal.SealGenerationInfo, hasPartiallyWrappedPaths bool) (*SetSealResponse, error) { + if c.flagDevAutoSeal { + access, _ := vaultseal.NewTestSeal(nil) + barrierSeal := vault.NewAutoSeal(access) + + return &SetSealResponse{barrierSeal: barrierSeal}, nil + } + + // Handle the case where no seal is provided + switch len(config.Seals) { + case 0: + config.Seals = append(config.Seals, &configutil.KMS{ + Type: vault.SealConfigTypeShamir.String(), + Priority: 1, + Name: "shamir", + }) + default: + allSealsDisabled := true + for _, c := range config.Seals { + if !c.Disabled { + allSealsDisabled = false + } else if c.Type == vault.SealConfigTypeShamir.String() { + return nil, errors.New("shamir seals cannot be set disabled (they should simply not be set)") + } + } + // If all seals are disabled assume they want to + // migrate to a shamir seal and simply didn't provide it + if allSealsDisabled { + config.Seals = append(config.Seals, &configutil.KMS{ + Type: vault.SealConfigTypeShamir.String(), + Priority: 1, + Name: "shamir", + }) + } + } + + var sealConfigError error + var sealConfigWarning error + recordSealConfigError := func(err error) { + sealConfigError = errors.Join(sealConfigError, err) + } + recordSealConfigWarning := func(err error) { + sealConfigWarning = errors.Join(sealConfigWarning, err) + } + enabledSealWrappers := make([]*vaultseal.SealWrapper, 0) + disabledSealWrappers := make([]*vaultseal.SealWrapper, 0) + allSealKmsConfigs := make([]*configutil.KMS, 0) + + type infoKeysAndMap struct { + keys []string + theMap map[string]string + } + sealWrapperInfoKeysMap := make(map[string]infoKeysAndMap) + + configuredSeals := 0 + for _, configSeal := range config.Seals { + sealTypeEnvVarName := "VAULT_SEAL_TYPE" + if configSeal.Priority > 1 { + sealTypeEnvVarName = sealTypeEnvVarName + "_" + configSeal.Name + } + + if !configSeal.Disabled && os.Getenv(sealTypeEnvVarName) != "" { + sealType := os.Getenv(sealTypeEnvVarName) + configSeal.Type = sealType + } + + sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", configSeal.Type)) + c.allLoggers = append(c.allLoggers, sealLogger) + + allSealKmsConfigs = append(allSealKmsConfigs, configSeal) + var wrapperInfoKeys []string + wrapperInfoMap := map[string]string{} + wrapper, wrapperConfigError := configutil.ConfigureWrapper(configSeal, &wrapperInfoKeys, &wrapperInfoMap, sealLogger) + if wrapperConfigError == nil { + // for some reason configureWrapper in kms.go returns nil wrapper and nil error for wrapping.WrapperTypeShamir + if wrapper == nil { + wrapper = aeadwrapper.NewShamirWrapper() + } + configuredSeals++ + } else if server.IsMultisealSupported() { + recordSealConfigWarning(fmt.Errorf("error configuring seal: %v", wrapperConfigError)) + } else { + // It seems that we are checking for this particular error here is to distinguish between a + // mis-configured seal vs one that fails for another reason. Apparently the only other reason is + // a key not found error. It seems the intention is for the key not found error to be returned + // as a seal specific error later + if !errwrap.ContainsType(wrapperConfigError, new(logical.KeyNotFoundError)) { + return nil, fmt.Errorf("error parsing Seal configuration: %s", wrapperConfigError) + } else { + sealLogger.Error("error configuring seal", "name", configSeal.Name, "err", wrapperConfigError) + recordSealConfigError(wrapperConfigError) + } + } + + sealWrapper := vaultseal.NewSealWrapper( + wrapper, + configSeal.Priority, + configSeal.Name, + configSeal.Type, + configSeal.Disabled, + wrapperConfigError == nil, + ) + + if configSeal.Disabled { + disabledSealWrappers = append(disabledSealWrappers, sealWrapper) + } else { + enabledSealWrappers = append(enabledSealWrappers, sealWrapper) + } + + sealWrapperInfoKeysMap[sealWrapper.Name] = infoKeysAndMap{ + keys: wrapperInfoKeys, + theMap: wrapperInfoMap, + } + } + + if len(enabledSealWrappers) == 0 && len(disabledSealWrappers) == 0 && sealConfigWarning != nil { + // All of them errored out, so warnings are now errors + recordSealConfigError(sealConfigWarning) + sealConfigWarning = nil + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Set the info keys, this modifies the function arguments `info` and `infoKeys` + // TODO(SEALHA): Why are we doing this? What is its use? + appendWrapperInfoKeys := func(prefix string, sealWrappers []*vaultseal.SealWrapper) { + if len(sealWrappers) == 0 { + return + } + useName := false + if len(sealWrappers) > 1 { + useName = true + } + for _, sealWrapper := range sealWrappers { + if useName { + prefix = fmt.Sprintf("%s %s ", prefix, sealWrapper.Name) + } + for _, k := range sealWrapperInfoKeysMap[sealWrapper.Name].keys { + infoKeys = append(infoKeys, prefix+k) + info[prefix+k] = sealWrapperInfoKeysMap[sealWrapper.Name].theMap[k] + } + } + } + appendWrapperInfoKeys("", enabledSealWrappers) + appendWrapperInfoKeys("Old", disabledSealWrappers) + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Compute seal generation + sealGenerationInfo, err := c.computeSealGenerationInfo(existingSealGenerationInfo, allSealKmsConfigs, hasPartiallyWrappedPaths) + if err != nil { + return nil, err + } + + //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Create the Seals + + containsShamir := func(sealWrappers []*vaultseal.SealWrapper) bool { + for _, si := range sealWrappers { + if vault.SealConfigTypeShamir.IsSameAs(si.SealConfigType) { + return true + } + } + return false + } + + var barrierSeal vault.Seal + var unwrapSeal vault.Seal + + sealLogger := c.logger + switch { + case len(enabledSealWrappers) == 0: + return nil, errors.Join(sealConfigWarning, errors.New("no enabled Seals in configuration")) + case configuredSeals == 0: + return nil, errors.Join(sealConfigWarning, errors.New("no seals were successfully initialized")) + case len(enabledSealWrappers) == 1 && containsShamir(enabledSealWrappers): + // The barrier seal is Shamir. If there are any disabled seals, then we put them all in the same + // autoSeal. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewDefaultSeal(a) + if len(disabledSealWrappers) > 0 { + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewAutoSeal(a) + } + + case len(disabledSealWrappers) == 1 && containsShamir(disabledSealWrappers): + // The unwrap seal is Shamir, we are migrating to an autoSeal. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewAutoSeal(a) + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewDefaultSeal(a) + + case server.IsMultisealSupported(): + // We know we are not using Shamir seal, that we are not migrating away from one, and multi seal is supported, + // so just put enabled and disabled wrappers on the same seal Access + allSealWrappers := append(enabledSealWrappers, disabledSealWrappers...) + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, allSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewAutoSeal(a) + if configuredSeals < len(enabledSealWrappers) { + c.UI.Warn("WARNING: running with fewer than all configured seals during unseal. Will not be fully highly available until errors are corrected and Vault restarted.") + } + case len(enabledSealWrappers) == 1: + // We may have multiple seals disabled, but we know Shamir is not one of them. + a, err := vaultseal.NewAccess(sealLogger, sealGenerationInfo, enabledSealWrappers) + if err != nil { + return nil, err + } + barrierSeal = vault.NewAutoSeal(a) + if len(disabledSealWrappers) > 0 { + a, err = vaultseal.NewAccess(sealLogger, sealGenerationInfo, disabledSealWrappers) + if err != nil { + return nil, err + } + unwrapSeal = vault.NewAutoSeal(a) + } + + default: + // We know there are multiple enabled seals but multi seal is not supported. + return nil, errors.Join(sealConfigWarning, errors.New("error: more than one enabled seal found")) + } + + return &SetSealResponse{ + barrierSeal: barrierSeal, + unwrapSeal: unwrapSeal, + sealConfigError: sealConfigError, + sealConfigWarning: sealConfigWarning, + }, nil +} + +func (c *ServerCommand) computeSealGenerationInfo(existingSealGenInfo *vaultseal.SealGenerationInfo, sealConfigs []*configutil.KMS, hasPartiallyWrappedPaths bool) (*vaultseal.SealGenerationInfo, error) { + generation := uint64(1) + + if existingSealGenInfo != nil { + // This forces a seal re-wrap on all seal related config changes, as we can't + // be sure what effect the config change might do. This is purposefully different + // from within the Validate call below that just matches on seal configs based + // on name/type. + if cmp.Equal(existingSealGenInfo.Seals, sealConfigs) { + return existingSealGenInfo, nil + } + generation = existingSealGenInfo.Generation + 1 + } + c.logger.Info("incrementing seal generation", "generation", generation) + + // If the stored copy doesn't match the current configuration, we introduce a new generation + // which keeps track if a rewrap of all CSPs and seal wrapped values has completed (initially false). + newSealGenInfo := &vaultseal.SealGenerationInfo{ + Generation: generation, + Seals: sealConfigs, + } + + if server.IsMultisealSupported() { + err := newSealGenInfo.Validate(existingSealGenInfo, hasPartiallyWrappedPaths) + if err != nil { + return nil, err + } + } + + return newSealGenInfo, nil +} + +func hasPartiallyWrappedPaths(ctx context.Context, backend physical.Backend) (bool, error) { + paths, err := vault.GetPartiallySealWrappedPaths(ctx, backend) + if err != nil { + return false, err + } + + return len(paths) > 0, nil +} + +func initHaBackend(c *ServerCommand, config *server.Config, coreConfig *vault.CoreConfig, backend physical.Backend) (bool, error) { + // Initialize the separate HA storage backend, if it exists + var ok bool + if config.HAStorage != nil { + if config.Storage.Type == storageTypeRaft && config.HAStorage.Type == storageTypeRaft { + return false, fmt.Errorf("Raft cannot be set both as 'storage' and 'ha_storage'. Setting 'storage' to 'raft' will automatically set it up for HA operations as well") + } + + if config.Storage.Type == storageTypeRaft { + return false, fmt.Errorf("HA storage cannot be declared when Raft is the storage type") + } + + factory, exists := c.PhysicalBackends[config.HAStorage.Type] + if !exists { + return false, fmt.Errorf("Unknown HA storage type %s", config.HAStorage.Type) + } + + namedHALogger := c.logger.Named("ha." + config.HAStorage.Type) + c.allLoggers = append(c.allLoggers, namedHALogger) + habackend, err := factory(config.HAStorage.Config, namedHALogger) + if err != nil { + return false, fmt.Errorf("Error initializing HA storage of type %s: %s", config.HAStorage.Type, err) + } + + if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok { + return false, fmt.Errorf("Specified HA storage does not support HA") + } + + if !coreConfig.HAPhysical.HAEnabled() { + return false, fmt.Errorf("Specified HA storage has HA support disabled; please consult documentation") + } + + coreConfig.RedirectAddr = config.HAStorage.RedirectAddr + disableClustering := config.HAStorage.DisableClustering + + if config.HAStorage.Type == storageTypeRaft && disableClustering { + return disableClustering, fmt.Errorf("Disable clustering cannot be set to true when Raft is the HA storage type") + } + + if !disableClustering { + coreConfig.ClusterAddr = config.HAStorage.ClusterAddr + } + } else { + if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok { + coreConfig.RedirectAddr = config.Storage.RedirectAddr + disableClustering := config.Storage.DisableClustering + + if (config.Storage.Type == storageTypeRaft) && disableClustering { + return disableClustering, fmt.Errorf("Disable clustering cannot be set to true when Raft is the storage type") + } + + if !disableClustering { + coreConfig.ClusterAddr = config.Storage.ClusterAddr + } + } + } + return config.DisableClustering, nil +} + +func determineRedirectAddr(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config) error { + var retErr error + if envRA := os.Getenv("VAULT_API_ADDR"); envRA != "" { + coreConfig.RedirectAddr = envRA + } else if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" { + coreConfig.RedirectAddr = envRA + } else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" { + coreConfig.RedirectAddr = envAA + } + + // Attempt to detect the redirect address, if possible + if coreConfig.RedirectAddr == "" { + c.logger.Warn("no `api_addr` value specified in config or in VAULT_API_ADDR; falling back to detection if possible, but this value should be manually set") + } + + var ok bool + var detect physical.RedirectDetect + if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() { + detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect) + } else { + detect, ok = coreConfig.Physical.(physical.RedirectDetect) + } + if ok && coreConfig.RedirectAddr == "" { + redirect, err := c.detectRedirect(detect, config) + // the following errors did not cause Run to return, so I'm not returning these + // as errors. + if err != nil { + retErr = fmt.Errorf("Error detecting api address: %s", err) + } else if redirect == "" { + retErr = fmt.Errorf("Failed to detect api address") + } else { + coreConfig.RedirectAddr = redirect + } + } + if coreConfig.RedirectAddr == "" && c.flagDev { + protocol := "http" + if c.flagDevTLS { + protocol = "https" + } + coreConfig.RedirectAddr = fmt.Sprintf("%s://%s", protocol, config.Listeners[0].Address) + } + return retErr +} + +func findClusterAddress(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, disableClustering bool) error { + if disableClustering { + coreConfig.ClusterAddr = "" + } else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" { + coreConfig.ClusterAddr = envCA + } else { + var addrToUse string + switch { + case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "": + addrToUse = coreConfig.RedirectAddr + case c.flagDev: + addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Address) + default: + goto CLUSTER_SYNTHESIS_COMPLETE + } + u, err := url.ParseRequestURI(addrToUse) + if err != nil { + return fmt.Errorf("Error parsing synthesized cluster address %s: %v", addrToUse, err) + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // This sucks, as it's a const in the function but not exported in the package + if strings.Contains(err.Error(), "missing port in address") { + host = u.Host + port = "443" + } else { + return fmt.Errorf("Error parsing api address: %v", err) + } + } + nPort, err := strconv.Atoi(port) + if err != nil { + return fmt.Errorf("Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err) + } + u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1)) + // Will always be TLS-secured + u.Scheme = "https" + coreConfig.ClusterAddr = u.String() + } + +CLUSTER_SYNTHESIS_COMPLETE: + + if coreConfig.RedirectAddr == coreConfig.ClusterAddr && len(coreConfig.RedirectAddr) != 0 { + return fmt.Errorf("Address %q used for both API and cluster addresses", coreConfig.RedirectAddr) + } + + if coreConfig.ClusterAddr != "" { + rendered, err := configutil.ParseSingleIPTemplate(coreConfig.ClusterAddr) + if err != nil { + return fmt.Errorf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err) + } + coreConfig.ClusterAddr = rendered + // Force https as we'll always be TLS-secured + u, err := url.ParseRequestURI(coreConfig.ClusterAddr) + if err != nil { + return fmt.Errorf("Error parsing cluster address %s: %v", coreConfig.ClusterAddr, err) + } + u.Scheme = "https" + coreConfig.ClusterAddr = u.String() + } + return nil +} + +func runUnseal(c *ServerCommand, core *vault.Core, ctx context.Context) { + for { + err := core.UnsealWithStoredKeys(ctx) + if err == nil { + return + } + + if vault.IsFatalError(err) { + c.logger.Error("error unsealing core", "error", err) + return + } + c.logger.Warn("failed to unseal core", "error", err) + + timer := time.NewTimer(5 * time.Second) + select { + case <-c.ShutdownCh: + timer.Stop() + return + case <-timer.C: + } + } +} + +func createCoreConfig(c *ServerCommand, config *server.Config, backend physical.Backend, configSR sr.ServiceRegistration, barrierSeal, unwrapSeal vault.Seal, + metricsHelper *metricsutil.MetricsHelper, metricSink *metricsutil.ClusterMetricSink, secureRandomReader io.Reader, +) vault.CoreConfig { + coreConfig := &vault.CoreConfig{ + RawConfig: config, + Physical: backend, + RedirectAddr: config.Storage.RedirectAddr, + StorageType: config.Storage.Type, + HAPhysical: nil, + ServiceRegistration: configSR, + Seal: barrierSeal, + UnwrapSeal: unwrapSeal, + AuditBackends: c.AuditBackends, + CredentialBackends: c.CredentialBackends, + LogicalBackends: c.LogicalBackends, + LogLevel: config.LogLevel, + Logger: c.logger, + DetectDeadlocks: config.DetectDeadlocks, + ImpreciseLeaseRoleTracking: config.ImpreciseLeaseRoleTracking, + DisableSentinelTrace: config.DisableSentinelTrace, + DisableCache: config.DisableCache, + DisableMlock: config.DisableMlock, + MaxLeaseTTL: config.MaxLeaseTTL, + DefaultLeaseTTL: config.DefaultLeaseTTL, + ClusterName: config.ClusterName, + CacheSize: config.CacheSize, + PluginDirectory: config.PluginDirectory, + PluginFileUid: config.PluginFileUid, + PluginFilePermissions: config.PluginFilePermissions, + EnableUI: config.EnableUI, + EnableRaw: config.EnableRawEndpoint, + EnableIntrospection: config.EnableIntrospectionEndpoint, + DisableSealWrap: config.DisableSealWrap, + DisablePerformanceStandby: config.DisablePerformanceStandby, + DisableIndexing: config.DisableIndexing, + AllLoggers: c.allLoggers, + BuiltinRegistry: builtinplugins.Registry, + DisableKeyEncodingChecks: config.DisablePrintableCheck, + MetricsHelper: metricsHelper, + MetricSink: metricSink, + SecureRandomReader: secureRandomReader, + EnableResponseHeaderHostname: config.EnableResponseHeaderHostname, + EnableResponseHeaderRaftNodeID: config.EnableResponseHeaderRaftNodeID, + License: config.License, + LicensePath: config.LicensePath, + DisableSSCTokens: config.DisableSSCTokens, + Experiments: config.Experiments, + AdministrativeNamespacePath: config.AdministrativeNamespacePath, + } + + if c.flagDev { + coreConfig.EnableRaw = true + coreConfig.EnableIntrospection = true + coreConfig.DevToken = c.flagDevRootTokenID + if c.flagDevLeasedKV { + coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory + } + if c.flagDevPluginDir != "" { + coreConfig.PluginDirectory = c.flagDevPluginDir + } + if c.flagDevLatency > 0 { + injectLatency := time.Duration(c.flagDevLatency) * time.Millisecond + if _, txnOK := backend.(physical.Transactional); txnOK { + coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger) + } else { + coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger) + } + } + } + return *coreConfig +} + +func runListeners(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, configSR sr.ServiceRegistration) error { + if sd := coreConfig.GetServiceRegistration(); sd != nil { + if err := configSR.Run(c.ShutdownCh, c.WaitGroup, coreConfig.RedirectAddr); err != nil { + return fmt.Errorf("Error running service_registration of type %s: %s", config.ServiceRegistration.Type, err) + } + } + return nil +} + +func initDevCore(c *ServerCommand, coreConfig *vault.CoreConfig, config *server.Config, core *vault.Core, certDir string, clusterJSON *testcluster.ClusterJson) error { + if c.flagDev && !c.flagDevSkipInit { + + init, err := c.enableDev(core, coreConfig) + if err != nil { + return fmt.Errorf("Error initializing Dev mode: %s", err) + } + + if clusterJSON != nil { + clusterJSON.RootToken = init.RootToken + } + + var plugins, pluginsNotLoaded []string + if c.flagDevPluginDir != "" && c.flagDevPluginInit { + + f, err := os.Open(c.flagDevPluginDir) + if err != nil { + return fmt.Errorf("Error reading plugin dir: %s", err) + } + + list, err := f.Readdirnames(0) + f.Close() + if err != nil { + return fmt.Errorf("Error listing plugins: %s", err) + } + + for _, name := range list { + path := filepath.Join(f.Name(), name) + if err := c.addPlugin(path, init.RootToken, core); err != nil { + if !errwrap.Contains(err, vault.ErrPluginBadType.Error()) { + return fmt.Errorf("Error enabling plugin %s: %s", name, err) + } + pluginsNotLoaded = append(pluginsNotLoaded, name) + continue + } + plugins = append(plugins, name) + } + + sort.Strings(plugins) + } + + var qw *quiescenceSink + var qwo sync.Once + qw = &quiescenceSink{ + t: time.AfterFunc(100*time.Millisecond, func() { + qwo.Do(func() { + c.logger.DeregisterSink(qw) + + // Print the big dev mode warning! + c.UI.Warn(wrapAtLength( + "WARNING! dev mode is enabled! In this mode, Vault runs entirely " + + "in-memory and starts unsealed with a single unseal key. The root " + + "token is already authenticated to the CLI, so you can immediately " + + "begin using Vault.")) + c.UI.Warn("") + c.UI.Warn("You may need to set the following environment variables:") + c.UI.Warn("") + + protocol := "http://" + if c.flagDevTLS { + protocol = "https://" + } + + endpointURL := protocol + config.Listeners[0].Address + if runtime.GOOS == "windows" { + c.UI.Warn("PowerShell:") + c.UI.Warn(fmt.Sprintf(" $env:VAULT_ADDR=\"%s\"", endpointURL)) + c.UI.Warn("cmd.exe:") + c.UI.Warn(fmt.Sprintf(" set VAULT_ADDR=%s", endpointURL)) + } else { + c.UI.Warn(fmt.Sprintf(" $ export VAULT_ADDR='%s'", endpointURL)) + } + + if c.flagDevTLS { + if runtime.GOOS == "windows" { + c.UI.Warn("PowerShell:") + c.UI.Warn(fmt.Sprintf(" $env:VAULT_CACERT=\"%s/vault-ca.pem\"", certDir)) + c.UI.Warn("cmd.exe:") + c.UI.Warn(fmt.Sprintf(" set VAULT_CACERT=%s/vault-ca.pem", certDir)) + } else { + c.UI.Warn(fmt.Sprintf(" $ export VAULT_CACERT='%s/vault-ca.pem'", certDir)) + } + c.UI.Warn("") + } + + // Unseal key is not returned if stored shares is supported + if len(init.SecretShares) > 0 { + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "The unseal key and root token are displayed below in case you want " + + "to seal/unseal the Vault or re-authenticate.")) + c.UI.Warn("") + c.UI.Warn(fmt.Sprintf("Unseal Key: %s", base64.StdEncoding.EncodeToString(init.SecretShares[0]))) + } + + if len(init.RecoveryShares) > 0 { + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "The recovery key and root token are displayed below in case you want " + + "to seal/unseal the Vault or re-authenticate.")) + c.UI.Warn("") + c.UI.Warn(fmt.Sprintf("Recovery Key: %s", base64.StdEncoding.EncodeToString(init.RecoveryShares[0]))) + } + + c.UI.Warn(fmt.Sprintf("Root Token: %s", init.RootToken)) + + if len(plugins) > 0 { + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "The following dev plugins are registered in the catalog:")) + for _, p := range plugins { + c.UI.Warn(fmt.Sprintf(" - %s", p)) + } + } + + if len(pluginsNotLoaded) > 0 { + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "The following dev plugins FAILED to be registered in the catalog due to unknown type:")) + for _, p := range pluginsNotLoaded { + c.UI.Warn(fmt.Sprintf(" - %s", p)) + } + } + + c.UI.Warn("") + c.UI.Warn(wrapAtLength( + "Development mode should NOT be used in production installations!")) + c.UI.Warn("") + }) + }), + } + c.logger.RegisterSink(qw) + } + return nil +} + +// Initialize the HTTP servers +func startHttpServers(c *ServerCommand, core *vault.Core, config *server.Config, lns []listenerutil.Listener) error { + for _, ln := range lns { + if ln.Config == nil { + return fmt.Errorf("Found nil listener config after parsing") + } + + if err := config2.IsValidListener(ln.Config); err != nil { + return err + } + + handler := vaulthttp.Handler.Handler(&vault.HandlerProperties{ + Core: core, + ListenerConfig: ln.Config, + DisablePrintableCheck: config.DisablePrintableCheck, + RecoveryMode: c.flagRecovery, + }) + + if len(ln.Config.XForwardedForAuthorizedAddrs) > 0 { + handler = vaulthttp.WrapForwardedForHandler(handler, ln.Config) + } + + // server defaults + server := &http.Server{ + Handler: handler, + ReadHeaderTimeout: 10 * time.Second, + ReadTimeout: 30 * time.Second, + IdleTimeout: 5 * time.Minute, + ErrorLog: c.logger.StandardLogger(nil), + } + + // override server defaults with config values for read/write/idle timeouts if configured + if ln.Config.HTTPReadHeaderTimeout > 0 { + server.ReadHeaderTimeout = ln.Config.HTTPReadHeaderTimeout + } + if ln.Config.HTTPReadTimeout > 0 { + server.ReadTimeout = ln.Config.HTTPReadTimeout + } + if ln.Config.HTTPWriteTimeout > 0 { + server.WriteTimeout = ln.Config.HTTPWriteTimeout + } + if ln.Config.HTTPIdleTimeout > 0 { + server.IdleTimeout = ln.Config.HTTPIdleTimeout + } + + // server config tests can exit now + if c.flagTestServerConfig { + continue + } + + go server.Serve(ln.Listener) + } + return nil +} + +func (c *ServerCommand) reloadSeals(ctx context.Context, core *vault.Core, config *server.Config) (*SetSealResponse, error) { + if len(config.Seals) == 1 && config.Seals[0].Disabled { + return nil, errors.New("moving from autoseal to shamir requires seal migration") + } + + if core.SealAccess().BarrierSealConfigType() == vault.SealConfigTypeShamir { + return nil, errors.New("moving from shamir to autoseal requires seal migration") + } + + infoKeysReload := make([]string, 0) + infoReload := make(map[string]string) + + setSealResponse, secureRandomReader, err := c.configureSeals(ctx, config, core.PhysicalAccess(), infoKeysReload, infoReload) + if err != nil { + return nil, err + } + if setSealResponse.sealConfigError != nil { + return nil, err + } + + err = core.SetSeals(setSealResponse.barrierSeal, secureRandomReader) + if err != nil { + return nil, fmt.Errorf("error setting seal: %s", err) + } + + newGen := setSealResponse.barrierSeal.GetAccess().GetSealGenerationInfo() + + if err := core.SetPhysicalSealGenInfo(ctx, newGen); err != nil { + c.logger.Warn("could not update seal information in storage", "err", err) + } + + return setSealResponse, nil +} + +func SetStorageMigration(b physical.Backend, active bool) error { + if !active { + return b.Delete(context.Background(), storageMigrationLock) + } + + status := StorageMigrationStatus{ + Start: time.Now(), + } + + enc, err := jsonutil.EncodeJSON(status) + if err != nil { + return err + } + + entry := &physical.Entry{ + Key: storageMigrationLock, + Value: enc, + } + + return b.Put(context.Background(), entry) +} + +type grpclogFaker struct { + logger hclog.Logger + log bool +} + +func (g *grpclogFaker) Fatal(args ...interface{}) { + g.logger.Error(fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *grpclogFaker) Fatalf(format string, args ...interface{}) { + g.logger.Error(fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *grpclogFaker) Fatalln(args ...interface{}) { + g.logger.Error(fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *grpclogFaker) Print(args ...interface{}) { + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprint(args...)) + } +} + +func (g *grpclogFaker) Printf(format string, args ...interface{}) { + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprintf(format, args...)) + } +} + +func (g *grpclogFaker) Println(args ...interface{}) { + if g.log && g.logger.IsDebug() { + g.logger.Debug(fmt.Sprintln(args...)) + } } diff --git a/ui/package.json b/ui/package.json index ed538813d665..8a12a738aa23 100644 --- a/ui/package.json +++ b/ui/package.json @@ -1,259 +1,129 @@ -{ - "name": "vault", - "version": "0.0.0", - "description": "The official UI for Vault by HashiCorp", - "repository": "", - "author": "", - "directories": { - "doc": "doc", - "test": "tests" - }, - "scripts": { - "build": "ember build --environment=production && cp metadata.json ../http/web_ui/metadata.json", - "build:dev": "ember build", - "lint:css": "stylelint \"**/*.css\"", - "lint:css:fix": "yarn lint:css --fix", - "lint:fix": "npm-run-all --print-name --aggregate-output --continue-on-error --parallel \"lint:*:fix\"", - "lint:hbs": "ember-template-lint '**/*.hbs'", - "lint:hbs:quiet": "ember-template-lint '**/*.hbs' --quiet", - "lint:hbs:fix": "ember-template-lint . --fix", - "lint:js": "eslint . --cache", - "lint:js:quiet": "eslint . --cache --quiet", - "lint:js:fix": "eslint . --fix", - "fmt": "npm-run-all --aggregate-output --continue-on-error --parallel fmt:*", - "fmt:js": "prettier --config .prettierrc.js --write '{app,tests,config,lib}/**/*.js'", - "fmt:hbs": "prettier --config .prettierrc.js --write '**/*.hbs'", - "fmt:styles": "prettier --write app/styles/**/*.*", - "start": "VAULT_ADDR=http://localhost:8200; ember server --proxy=$VAULT_ADDR", - "start2": "ember server --proxy=http://localhost:8202 --port=4202", - "start:chroot": "ember server --proxy=http://localhost:8300 --port=4300", - "start:mirage": "start () { MIRAGE_DEV_HANDLER=$1 yarn run start; }; start", - "test": "npm-run-all --print-name lint:js:quiet lint:hbs:quiet && node scripts/start-vault.js", - "test:enos": "npm-run-all lint:js:quiet lint:hbs:quiet && node scripts/enos-test-ember.js", - "test:oss": "yarn run test -f='!enterprise'", - "test:quick": "node scripts/start-vault.js", - "test:quick-oss": "yarn test:quick -f='!enterprise'", - "types:declare": "declare () { yarn tsc $1 --declaration --allowJs --emitDeclarationOnly --experimentalDecorators --outDir $2; }; declare", - "vault": "VAULT_REDIRECT_ADDR=http://127.0.0.1:8200 vault server -log-level=error -dev -dev-root-token-id=root -dev-ha -dev-transactional", - "vault:cluster": "VAULT_REDIRECT_ADDR=http://127.0.0.1:8202 vault server -log-level=error -dev -dev-root-token-id=root -dev-listen-address=127.0.0.1:8202 -dev-ha -dev-transactional" - }, - "lint-staged": { - "*.js": [ - "prettier --config .prettierrc.js --write", - "eslint --quiet", - "git add" - ], - "*.hbs": [ - "prettier --config .prettierrc.js --write", - "ember-template-lint --quiet", - "git add" - ], - "*.scss": [ - "prettier --write", - "git add" - ] - }, - "devDependencies": { - "@babel/eslint-parser": "^7.21.3", - "@babel/plugin-proposal-decorators": "^7.21.0", - "@babel/plugin-proposal-object-rest-spread": "^7.12.1", - "@babel/plugin-transform-block-scoping": "^7.12.1", - "@ember/legacy-built-in-components": "^0.4.1", - "@ember/optional-features": "^2.0.0", - "@ember/render-modifiers": "^1.0.2", - "@ember/string": "^3.0.1", - "@ember/test-helpers": "2.9.3", - "@ember/test-waiters": "^3.0.0", - "@glimmer/component": "^1.1.2", - "@glimmer/tracking": "^1.1.2", - "@hashicorp/structure-icons": "^1.3.0", - "@icholy/duration": "^5.1.0", - "@tsconfig/ember": "^1.0.1", - "@types/ember": "^4.0.2", - "@types/ember-data": "^4.4.6", - "@types/ember-data__adapter": "^4.0.1", - "@types/ember-data__model": "^4.0.0", - "@types/ember-data__serializer": "^4.0.1", - "@types/ember-data__store": "^4.0.2", - "@types/ember-qunit": "^5.0.2", - "@types/ember-resolver": "^5.0.13", - "@types/ember__application": "^4.0.4", - "@types/ember__array": "^4.0.3", - "@types/ember__component": "^4.0.11", - "@types/ember__controller": "^4.0.3", - "@types/ember__debug": "^4.0.3", - "@types/ember__destroyable": "^4.0.1", - "@types/ember__engine": "^4.0.4", - "@types/ember__error": "^4.0.1", - "@types/ember__object": "^4.0.5", - "@types/ember__polyfills": "^4.0.1", - "@types/ember__routing": "^4.0.12", - "@types/ember__runloop": "^4.0.2", - "@types/ember__service": "^4.0.1", - "@types/ember__string": "^3.0.10", - "@types/ember__template": "^4.0.1", - "@types/ember__test": "^4.0.1", - "@types/ember__test-helpers": "^2.8.2", - "@types/ember__utils": "^4.0.2", - "@types/qunit": "^2.19.3", - "@types/rsvp": "^4.0.4", - "@types/shell-quote": "^1.7.1", - "@typescript-eslint/eslint-plugin": "^5.19.0", - "@typescript-eslint/parser": "^5.19.0", - "asn1js": "^2.2.0", - "autosize": "^4.0.0", - "babel-plugin-inline-json-import": "^0.3.2", - "base64-js": "^1.3.1", - "broccoli-asset-rev": "^3.0.0", - "broccoli-sri-hash": "meirish/broccoli-sri-hash#rooturl", - "codemirror": "^5.58.2", - "columnify": "^1.5.4", - "d3-axis": "^1.0.8", - "d3-ease": "^1.0.5", - "d3-scale": "^1.0.7", - "d3-selection": "^1.3.0", - "d3-time-format": "^2.1.1", - "d3-tip": "^0.9.1", - "d3-transition": "^1.2.0", - "date-fns": "^2.16.1", - "date-fns-tz": "^1.2.2", - "deepmerge": "^4.0.0", - "doctoc": "^2.2.0", - "dompurify": "^3.0.2", - "ember-auto-import": "2.6.3", - "ember-basic-dropdown": "6.0.1", - "ember-cli": "~4.12.1", - "ember-cli-autoprefixer": "^0.8.1", - "ember-cli-babel": "^7.26.11", - "ember-cli-content-security-policy": "2.0.3", - "ember-cli-dependency-checker": "^3.3.1", - "ember-cli-deprecation-workflow": "^2.1.0", - "ember-cli-flash": "4.0.0", - "ember-cli-htmlbars": "^6.2.0", - "ember-cli-inject-live-reload": "^2.1.0", - "ember-cli-mirage": "2.4.0", - "ember-cli-page-object": "1.17.10", - "ember-cli-sass": "11.0.1", - "ember-cli-sri": "meirish/ember-cli-sri#rooturl", - "ember-cli-string-helpers": "6.1.0", - "ember-cli-terser": "^4.0.2", - "ember-cli-typescript": "^5.2.1", - "ember-composable-helpers": "5.0.0", - "ember-concurrency": "2.3.4", - "ember-copy": "2.0.1", - "ember-d3": "^0.5.1", - "ember-data": "~4.11.3", - "ember-engines": "0.8.23", - "ember-fetch": "^8.1.2", - "ember-inflector": "4.0.2", - "ember-load-initializers": "^2.1.2", - "ember-maybe-in-element": "^2.0.3", - "ember-modal-dialog": "^4.0.1", - "ember-modifier": "^4.1.0", - "ember-page-title": "^7.0.0", - "ember-power-select": "6.0.1", - "ember-qrcode-shim": "^0.4.0", - "ember-qunit": "6.0.0", - "ember-resolver": "^10.0.0", - "ember-responsive": "5.0.0", - "ember-router-helpers": "^0.4.0", - "ember-service-worker": "meirish/ember-service-worker#configurable-scope", - "ember-sinon": "^4.0.0", - "ember-source": "~4.12.0", - "ember-svg-jar": "2.4.0", - "ember-template-lint": "5.7.2", - "ember-template-lint-plugin-prettier": "4.0.0", - "ember-test-selectors": "6.0.0", - "ember-tether": "^2.0.1", - "ember-truth-helpers": "3.0.0", - "escape-string-regexp": "^2.0.0", - "eslint": "^8.37.0", - "eslint-config-prettier": "^8.8.0", - "eslint-plugin-compat": "4.0.2", - "eslint-plugin-ember": "^11.5.0", - "eslint-plugin-n": "^15.7.0", - "eslint-plugin-prettier": "^4.2.1", - "eslint-plugin-qunit": "^7.3.4", - "filesize": "^4.2.1", - "flat": "^6.0.1", - "jsondiffpatch": "^0.4.1", - "jsonlint": "^1.6.3", - "lint-staged": "^10.5.1", - "loader.js": "^4.7.0", - "normalize.css": "4.1.1", - "npm-run-all": "^4.1.5", - "pkijs": "^2.2.2", - "pretender": "^3.4.3", - "prettier": "2.8.7", - "prettier-eslint-cli": "^7.1.0", - "pvutils": "^1.0.17", - "qunit": "^2.19.4", - "qunit-dom": "^2.0.0", - "sass": "^1.58.3", - "sass-svg-uri": "^1.0.0", - "shell-quote": "^1.8.1", - "string.prototype.endswith": "^0.2.0", - "string.prototype.startswith": "^0.2.0", - "stylelint": "^15.4.0", - "stylelint-config-standard": "^32.0.0", - "stylelint-prettier": "^3.0.0", - "swagger-ui-dist": "^5.9.0", - "text-encoder-lite": "2.0.0", - "tracked-built-ins": "^3.1.1", - "typescript": "^4.8.4", - "walk-sync": "^2.0.2", - "webpack": "5.78.0", - "xstate": "^3.3.3" - }, - "resolutions": { - "cryptiles": "^4.1.2", - "eslint-utils": "^1.4.1", - "ember-basic-dropdown": "6.0.1", - "growl": "^1.10.0", - "highlight.js": "^10.4.1", - "https-proxy-agent": "^2.2.3", - "ini": "^1.3.6", - "kind-of": "^6.0.3", - "minimatch": "^3.0.2", - "node-notifier": "^8.0.1", - "prismjs": "^1.21.0", - "qs": "^6.3.0", - "serialize-javascript": "^3.1.0", - "underscore": "^1.12.1", - "trim": "^0.0.3", - "xmlhttprequest-ssl": "^1.6.2", - "@embroider/macros": "^1.0.0" - }, - "engines": { - "node": "16" - }, - "ember": { - "edition": "octane" - }, - "private": true, - "ember-addon": { - "paths": [ - "lib/core", - "lib/css", - "lib/keep-gitkeep", - "lib/kmip", - "lib/kubernetes", - "lib/ldap", - "lib/kv", - "lib/open-api-explorer", - "lib/pki", - "lib/replication", - "lib/service-worker-authenticated-download", - "lib/sync" - ] - }, - "dependencies": { - "@hashicorp/design-system-components": "^2.13.0", - "@hashicorp/ember-flight-icons": "^3.1.3", - "handlebars": "4.7.7", - "highlight.js": "^10.4.1", - "node-notifier": "^8.0.1", - "uuid": "^9.0.0" - }, - "packageManager": "yarn@3.5.0" -} +--- +layout: docs +page_title: server - Command +description: |- + The "server" command starts a Vault server that responds to API requests. By + default, Vault will start in a "sealed" state. The Vault cluster must be + initialized before use. +--- + +# server + +The `server` command starts a Vault server that responds to API requests. By +default, Vault will start in a "sealed" state. The Vault cluster must be +initialized before use, usually by the `vault operator init` command. Each Vault +server must also be unsealed using the `vault operator unseal` command or the +API before the server can respond to requests. + +For more information, please see: + +- [`operator init` command](/vault/docs/commands/operator/init) for information + on initializing a Vault server. + +- [`operator unseal` command](/vault/docs/commands/operator/unseal) for + information on providing unseal keys. + +- [Vault configuration](/vault/docs/configuration) for the syntax and + various configuration options for a Vault server. + +## Examples + +Start a server with a configuration file: + +```shell-session +$ vault server -config=/etc/vault/config.hcl +``` + +Run in "dev" mode with a custom initial root token: + +```shell-session +$ vault server -dev -dev-root-token-id="root" +``` + +## Usage + +The following flags are available in addition to the [standard set of +flags](/vault/docs/commands) included on all commands. + +### Command options + +- `-config` `(string: "")` - Path to a configuration file or directory of + configuration files. This flag can be specified multiple times to load + multiple configurations. If the path is a directory, all files which end in + .hcl or .json are loaded. + +- `-log-level` ((#\_log_level)) `(string: "info")` - Log verbosity level. Supported values (in + order of descending detail) are `trace`, `debug`, `info`, `warn`, and `error`. This can + also be specified via the `VAULT_LOG_LEVEL` environment variable. + +- `-log-format` ((#\_log_format)) `(string: "standard")` - Log format. Supported values + are `standard` and `json`. This can also be specified via the + `VAULT_LOG_FORMAT` environment variable. + +- `-log-file` ((#\_log_file)) - the absolute path where Vault should save log + messages in addition to other, existing outputs like journald / stdout. Paths + that end with a path separator use the default file name, `vault.log`. Paths + that do not end with a file extension use the default `.log` extension. If the + log file rotates, Vault appends the current timestamp to the file name + at the time of rotation. For example: + + `log-file` | Full log file | Rotated log file + ---------- | ------------- | ---------------- + `/var/log` | `/var/log/vault.log` | `/var/log/vault-{timestamp}.log` + `/var/log/my-diary` | `/var/log/my-diary.log` | `/var/log/my-diary-{timestamp}.log` + `/var/log/my-diary.txt` | `/var/log/my-diary.txt` | `/var/log/my-diary-{timestamp}.txt` + +- `-log-rotate-bytes` ((#\_log_rotate_bytes)) - to specify the number of + bytes that should be written to a log before it needs to be rotated. Unless specified, + there is no limit to the number of bytes that can be written to a log file. + +- `-log-rotate-duration` ((#\_log_rotate_duration)) - to specify the maximum + duration a log should be written to before it needs to be rotated. Must be a duration + value such as 30s. Defaults to 24h. + +- `-log-rotate-max-files` ((#\_log_rotate_max_files)) - to specify the maximum + number of older log file archives to keep. Defaults to 0 (no files are ever deleted). + Set to -1 to discard old log files when a new one is created. + +- `-experiment` `(string array: [])` - The name of an experiment to enable for this node. + This flag can be specified multiple times to enable multiple experiments. Experiments + should NOT be used in production, and the associated APIs may have backwards incompatible + changes between releases. Additional experiments can also be specified via the + `VAULT_EXPERIMENTS` environment variable as a comma-separated list, or via the + [`experiments`](/vault/docs/configuration#experiments) config key. + +- `VAULT_ALLOW_PENDING_REMOVAL_MOUNTS` `(bool: false)` - (environment variable) + Allow Vault to be started with builtin engines which have the `Pending Removal` + deprecation state. This is a temporary stopgap in place in order to perform an + upgrade and disable these engines. Once these engines are marked `Removed` (in + the next major release of Vault), the environment variable will no longer work + and a downgrade must be performed in order to remove the offending engines. For + more information, see the [deprecation faq](/vault/docs/deprecation/faq/#q-what-are-the-phases-of-deprecation). + +### Dev options + +- `-dev` `(bool: false)` - Enable development mode. In this mode, Vault runs + in-memory and starts unsealed. As the name implies, do not run "dev" mode in + production. + +- `-dev-tls` `(bool: false)` - Enable TLS development mode. In this mode, Vault runs + in-memory and starts unsealed with a generated TLS CA, certificate and key. + As the name implies, do not run "dev" mode in production. + +- `-dev-tls-cert-dir` `(string: "")` - Directory where generated TLS files are created if `-dev-tls` is specified. If left unset, files are generated in a temporary directory. + +- `-dev-listen-address` `(string: "127.0.0.1:8200")` - Address to bind to in + "dev" mode. This can also be specified via the `VAULT_DEV_LISTEN_ADDRESS` + environment variable. + +- `-dev-root-token-id` `(string: "")` - Initial root token. This only applies + when running in "dev" mode. This can also be specified via the + `VAULT_DEV_ROOT_TOKEN_ID` environment variable. + + _Note:_ The token ID should not start with the `s.` prefix. + +- `-dev-no-store-token` `(string: "")` - Do not persist the dev root token to + the token helper (usually the local filesystem) for use in future requests. + The token will only be displayed in the command output. + +- `-dev-plugin-dir` `(string: "")` - Directory from which plugins are allowed to be loaded. Only applies in "dev" mode, it will automatically register all the plugins in the provided directory. diff --git a/ui/tests/acceptance/enterprise-license-banner-test.js b/ui/tests/acceptance/enterprise-license-banner-test.js index d7c252c7e52d..e46d5bedba3f 100644 --- a/ui/tests/acceptance/enterprise-license-banner-test.js +++ b/ui/tests/acceptance/enterprise-license-banner-test.js @@ -1,110 +1,450 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import sinon from 'sinon'; -import { visit } from '@ember/test-helpers'; -import { setupApplicationTest } from 'ember-qunit'; -import Pretender from 'pretender'; -import formatRFC3339 from 'date-fns/formatRFC3339'; -import { addDays, subDays } from 'date-fns'; -import timestamp from 'core/utils/timestamp'; - -const generateHealthResponse = (now, state) => { - let expiry; - switch (state) { - case 'expired': - expiry = subDays(now, 2); - break; - case 'expiring': - expiry = addDays(now, 10); - break; - default: - expiry = addDays(now, 33); - break; - } - return { - initialized: true, - sealed: false, - standby: false, - license: { - expiry_time: formatRFC3339(expiry), - state: 'stored', - }, - performance_standby: false, - replication_performance_mode: 'disabled', - replication_dr_mode: 'disabled', - server_time_utc: 1622562585, - version: '1.9.0+ent', - cluster_name: 'vault-cluster-e779cd7c', - cluster_id: '5f20f5ab-acea-0481-787e-71ec2ff5a60b', - last_wal: 121, - }; -}; - -module('Acceptance | Enterprise | License banner warnings', function (hooks) { - setupApplicationTest(hooks); - - hooks.before(function () { - sinon.stub(timestamp, 'now').callsFake(() => new Date('2018-04-03T14:15:30')); - }); - hooks.beforeEach(function () { - this.now = timestamp.now(); - }); - hooks.afterEach(function () { - this.server.shutdown(); - }); - hooks.after(function () { - timestamp.now.restore(); - }); - - test('it shows no license banner if license expires in > 30 days', async function (assert) { - const healthResp = generateHealthResponse(this.now); - this.server = new Pretender(function () { - this.get('/v1/sys/health', (response) => { - return [response, { 'Content-Type': 'application/json' }, JSON.stringify(healthResp)]; - }); - this.get('/v1/sys/internal/ui/feature-flags', this.passthrough); - this.get('/v1/sys/internal/ui/mounts', this.passthrough); - this.get('/v1/sys/seal-status', this.passthrough); - this.get('/v1/sys/license/features', this.passthrough); - }); - await visit('/vault/auth'); - assert.dom('[data-test-license-banner-expired]').doesNotExist('expired banner does not show'); - assert.dom('[data-test-license-banner-warning]').doesNotExist('warning banner does not show'); - this.server.shutdown(); - }); - test('it shows license banner warning if license expires within 30 days', async function (assert) { - const healthResp = generateHealthResponse(this.now, 'expiring'); - this.server = new Pretender(function () { - this.get('/v1/sys/health', (response) => { - return [response, { 'Content-Type': 'application/json' }, JSON.stringify(healthResp)]; - }); - this.get('/v1/sys/internal/ui/feature-flags', this.passthrough); - this.get('/v1/sys/internal/ui/mounts', this.passthrough); - this.get('/v1/sys/seal-status', this.passthrough); - this.get('/v1/sys/license/features', this.passthrough); - }); - await visit('/vault/auth'); - assert.dom('[data-test-license-banner-warning]').exists('license warning shows'); - this.server.shutdown(); - }); - - test('it shows license banner alert if license has already expired', async function (assert) { - const healthResp = generateHealthResponse(this.now, 'expired'); - this.server = new Pretender(function () { - this.get('/v1/sys/health', (response) => { - return [response, { 'Content-Type': 'application/json' }, JSON.stringify(healthResp)]; - }); - this.get('/v1/sys/internal/ui/feature-flags', this.passthrough); - this.get('/v1/sys/internal/ui/mounts', this.passthrough); - this.get('/v1/sys/seal-status', this.passthrough); - this.get('/v1/sys/license/features', this.passthrough); - }); - await visit('/vault/auth'); - assert.dom('[data-test-license-banner-expired]').exists('expired license message shows'); - this.server.shutdown(); - }); -}); +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !race && !hsm && !fips_140_3 + +// NOTE: we can't use this with HSM. We can't set testing mode on and it's not +// safe to use env vars since that provides an attack vector in the real world. +// +// The server tests have a go-metrics/exp manager race condition :(. + +package command + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/internalshared/configutil" + "github.com/hashicorp/vault/sdk/physical" + physInmem "github.com/hashicorp/vault/sdk/physical/inmem" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/seal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + if signed := os.Getenv("VAULT_LICENSE_CI"); signed != "" { + os.Setenv(EnvVaultLicense, signed) + } +} + +func testBaseHCL(tb testing.TB, listenerExtras string) string { + tb.Helper() + + return strings.TrimSpace(fmt.Sprintf(` + disable_mlock = true + listener "tcp" { + address = "127.0.0.1:%d" + tls_disable = "true" + %s + } + `, 0, listenerExtras)) +} + +const ( + goodListenerTimeouts = `http_read_header_timeout = 12 + http_read_timeout = "34s" + http_write_timeout = "56m" + http_idle_timeout = "78h"` + + badListenerReadHeaderTimeout = `http_read_header_timeout = "12km"` + badListenerReadTimeout = `http_read_timeout = "34日"` + badListenerWriteTimeout = `http_write_timeout = "56lbs"` + badListenerIdleTimeout = `http_idle_timeout = "78gophers"` + + inmemHCL = ` +backend "inmem_ha" { + advertise_addr = "http://127.0.0.1:8200" +} +` + haInmemHCL = ` +ha_backend "inmem_ha" { + redirect_addr = "http://127.0.0.1:8200" +} +` + + badHAInmemHCL = ` +ha_backend "inmem" {} +` + + reloadHCL = ` +backend "inmem" {} +disable_mlock = true +listener "tcp" { + address = "127.0.0.1:8203" + tls_cert_file = "TMPDIR/reload_cert.pem" + tls_key_file = "TMPDIR/reload_key.pem" +} +` + cloudHCL = ` +cloud { + resource_id = "organization/bc58b3d0-2eab-4ab8-abf4-f61d3c9975ff/project/1c78e888-2142-4000-8918-f933bbbc7690/hashicorp.example.resource/example" + client_id = "J2TtcSYOyPUkPV2z0mSyDtvitxLVjJmu" + client_secret = "N9JtHZyOnHrIvJZs82pqa54vd4jnkyU3xCcqhFXuQKJZZuxqxxbP1xCfBZVB82vY" +} +` +) + +func testServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &ServerCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + ShutdownCh: MakeShutdownCh(), + SighupCh: MakeSighupCh(), + SigUSR2Ch: MakeSigUSR2Ch(), + PhysicalBackends: map[string]physical.Factory{ + "inmem": physInmem.NewInmem, + "inmem_ha": physInmem.NewInmemHA, + }, + + // These prevent us from random sleep guessing... + startedCh: make(chan struct{}, 5), + reloadedCh: make(chan struct{}, 5), + licenseReloadedCh: make(chan error), + } +} + +func TestServer_ReloadListener(t *testing.T) { + t.Parallel() + + wd, _ := os.Getwd() + wd += "/server/test-fixtures/reload/" + + td, err := ioutil.TempDir("", "vault-test-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + + wg := &sync.WaitGroup{} + // Setup initial certs + inBytes, _ := ioutil.ReadFile(wd + "reload_foo.pem") + ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0o777) + inBytes, _ = ioutil.ReadFile(wd + "reload_foo.key") + ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0o777) + + relhcl := strings.ReplaceAll(reloadHCL, "TMPDIR", td) + ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0o777) + + inBytes, _ = ioutil.ReadFile(wd + "reload_ca.pem") + certPool := x509.NewCertPool() + ok := certPool.AppendCertsFromPEM(inBytes) + if !ok { + t.Fatal("not ok when appending CA cert") + } + + ui, cmd := testServerCommand(t) + _ = ui + + wg.Add(1) + args := []string{"-config", td + "/reload.hcl"} + go func() { + if code := cmd.Run(args); code != 0 { + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + t.Errorf("got a non-zero exit status: %s", output) + } + wg.Done() + }() + + testCertificateName := func(cn string) error { + conn, err := tls.Dial("tcp", "127.0.0.1:8203", &tls.Config{ + RootCAs: certPool, + }) + if err != nil { + return err + } + defer conn.Close() + if err = conn.Handshake(); err != nil { + return err + } + servName := conn.ConnectionState().PeerCertificates[0].Subject.CommonName + if servName != cn { + return fmt.Errorf("expected %s, got %s", cn, servName) + } + return nil + } + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("foo.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + relhcl = strings.ReplaceAll(reloadHCL, "TMPDIR", td) + inBytes, _ = ioutil.ReadFile(wd + "reload_bar.pem") + ioutil.WriteFile(td+"/reload_cert.pem", inBytes, 0o777) + inBytes, _ = ioutil.ReadFile(wd + "reload_bar.key") + ioutil.WriteFile(td+"/reload_key.pem", inBytes, 0o777) + ioutil.WriteFile(td+"/reload.hcl", []byte(relhcl), 0o777) + + cmd.SighupCh <- struct{}{} + select { + case <-cmd.reloadedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + if err := testCertificateName("bar.example.com"); err != nil { + t.Fatalf("certificate name didn't check out: %s", err) + } + + cmd.ShutdownCh <- struct{}{} + + wg.Wait() +} + +func TestServer(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + contents string + exp string + code int + args []string + }{ + { + "common_ha", + testBaseHCL(t, "") + inmemHCL, + "(HA available)", + 0, + []string{"-test-verify-only"}, + }, + { + "separate_ha", + testBaseHCL(t, "") + inmemHCL + haInmemHCL, + "HA Storage:", + 0, + []string{"-test-verify-only"}, + }, + { + "bad_separate_ha", + testBaseHCL(t, "") + inmemHCL + badHAInmemHCL, + "Specified HA storage does not support HA", + 1, + []string{"-test-verify-only"}, + }, + { + "good_listener_timeout_config", + testBaseHCL(t, goodListenerTimeouts) + inmemHCL, + "", + 0, + []string{"-test-server-config"}, + }, + { + "bad_listener_read_header_timeout_config", + testBaseHCL(t, badListenerReadHeaderTimeout) + inmemHCL, + "unknown unit \"km\" in duration \"12km\"", + 1, + []string{"-test-server-config"}, + }, + { + "bad_listener_read_timeout_config", + testBaseHCL(t, badListenerReadTimeout) + inmemHCL, + "unknown unit \"\\xe6\\x97\\xa5\" in duration", + 1, + []string{"-test-server-config"}, + }, + { + "bad_listener_write_timeout_config", + testBaseHCL(t, badListenerWriteTimeout) + inmemHCL, + "unknown unit \"lbs\" in duration \"56lbs\"", + 1, + []string{"-test-server-config"}, + }, + { + "bad_listener_idle_timeout_config", + testBaseHCL(t, badListenerIdleTimeout) + inmemHCL, + "unknown unit \"gophers\" in duration \"78gophers\"", + 1, + []string{"-test-server-config"}, + }, + { + "environment_variables_logged", + testBaseHCL(t, "") + inmemHCL, + "Environment Variables", + 0, + []string{"-test-verify-only"}, + }, + { + "cloud_config", + testBaseHCL(t, "") + inmemHCL + cloudHCL, + "HCP Organization: bc58b3d0-2eab-4ab8-abf4-f61d3c9975ff", + 0, + []string{"-test-verify-only"}, + }, + { + "recovery_mode", + testBaseHCL(t, "") + inmemHCL, + "", + 0, + []string{"-test-verify-only", "-recovery"}, + }, + } + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ui, cmd := testServerCommand(t) + + f, err := os.CreateTemp(t.TempDir(), "") + require.NoErrorf(t, err, "error creating temp dir: %v", err) + + _, err = f.WriteString(tc.contents) + require.NoErrorf(t, err, "cannot write temp file contents") + + err = f.Close() + require.NoErrorf(t, err, "unable to close temp file") + + args := append(tc.args, "-config", f.Name()) + code := cmd.Run(args) + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, tc.code, code, "expected %d to be %d: %s", code, tc.code, output) + require.Contains(t, output, tc.exp, "expected %q to contain %q", output, tc.exp) + }) + } +} + +// TestServer_DevTLS verifies that a vault server starts up correctly with the -dev-tls flag +func TestServer_DevTLS(t *testing.T) { + ui, cmd := testServerCommand(t) + args := []string{"-dev-tls", "-dev-listen-address=127.0.0.1:0", "-test-server-config"} + retCode := cmd.Run(args) + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, 0, retCode, output) + require.Contains(t, output, `tls: "enabled"`) +} + +// TestConfigureDevTLS verifies the various logic paths that flow through the +// configureDevTLS function. +func TestConfigureDevTLS(t *testing.T) { + testcases := []struct { + ServerCommand *ServerCommand + DeferFuncNotNil bool + ConfigNotNil bool + TLSDisable bool + CertPathEmpty bool + ErrNotNil bool + TestDescription string + }{ + { + ServerCommand: &ServerCommand{ + flagDevTLS: false, + }, + ConfigNotNil: true, + TLSDisable: true, + CertPathEmpty: true, + ErrNotNil: false, + TestDescription: "flagDev is false, nothing will be configured", + }, + { + ServerCommand: &ServerCommand{ + flagDevTLS: true, + flagDevTLSCertDir: "", + }, + DeferFuncNotNil: true, + ConfigNotNil: true, + ErrNotNil: false, + TestDescription: "flagDevTLSCertDir is empty", + }, + { + ServerCommand: &ServerCommand{ + flagDevTLS: true, + flagDevTLSCertDir: "@/#", + }, + CertPathEmpty: true, + ErrNotNil: true, + TestDescription: "flagDevTLSCertDir is set to something invalid", + }, + } + + for _, testcase := range testcases { + fun, cfg, certPath, err := configureDevTLS(testcase.ServerCommand) + if fun != nil { + // If a function is returned, call it right away to clean up + // files created in the temporary directory before anything else has + // a chance to fail this test. + fun() + } + + t.Run(testcase.TestDescription, func(t *testing.T) { + assert.Equal(t, testcase.DeferFuncNotNil, (fun != nil)) + assert.Equal(t, testcase.ConfigNotNil, cfg != nil) + if testcase.ConfigNotNil && cfg != nil { + assert.True(t, len(cfg.Listeners) > 0) + assert.Equal(t, testcase.TLSDisable, cfg.Listeners[0].TLSDisable) + } + assert.Equal(t, testcase.CertPathEmpty, len(certPath) == 0) + if testcase.ErrNotNil { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestConfigureSeals(t *testing.T) { + testConfig := server.Config{SharedConfig: &configutil.SharedConfig{}} + _, testCommand := testServerCommand(t) + + logger := corehelpers.NewTestLogger(t) + backend, err := physInmem.NewInmem(nil, logger) + if err != nil { + t.Fatal(err) + } + testCommand.logger = logger + + setSealResponse, _, err := testCommand.configureSeals(context.Background(), &testConfig, backend, []string{}, map[string]string{}) + if err != nil { + t.Fatal(err) + } + + if len(setSealResponse.barrierSeal.GetAccess().GetAllSealWrappersByPriority()) != 1 { + t.Fatalf("expected 1 seal, got %d", len(setSealResponse.barrierSeal.GetAccess().GetAllSealWrappersByPriority())) + } + + if setSealResponse.barrierSeal.BarrierSealConfigType() != vault.SealConfigTypeShamir { + t.Fatalf("expected shamir seal, got seal type %s", setSealResponse.barrierSeal.BarrierSealConfigType()) + } +} + +func TestReloadSeals(t *testing.T) { + testCore := vault.TestCoreWithSeal(t, vault.NewTestSeal(t, &seal.TestSealOpts{StoredKeys: seal.StoredKeysSupportedShamirRoot}), false) + _, testCommand := testServerCommand(t) + testConfig := server.Config{SharedConfig: &configutil.SharedConfig{}} + + _, err := testCommand.reloadSeals(context.Background(), testCore, &testConfig) + if err == nil { + t.Fatal("expected error, got nil") + } + + testConfig = server.Config{SharedConfig: &configutil.SharedConfig{Seals: []*configutil.KMS{{Disabled: true}}}} + _, err = testCommand.reloadSeals(context.Background(), testCore, &testConfig) + if err == nil { + t.Fatal("expected error, got nil") + } +} diff --git a/ui/tests/acceptance/enterprise-reduced-disclosure-test.js b/ui/tests/acceptance/enterprise-reduced-disclosure-test.js new file mode 100644 index 000000000000..b32688ca022b --- /dev/null +++ b/ui/tests/acceptance/enterprise-reduced-disclosure-test.js @@ -0,0 +1,432 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package pkcs7 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "time" +) + +// SignedData is an opaque data structure for creating signed data payloads +type SignedData struct { + sd signedData + certs []*x509.Certificate + data, messageDigest []byte + digestOid asn1.ObjectIdentifier + encryptionOid asn1.ObjectIdentifier +} + +// NewSignedData takes data and initializes a PKCS7 SignedData struct that is +// ready to be signed via AddSigner. The digest algorithm is set to SHA-256 by default +// and can be changed by calling SetDigestAlgorithm. +func NewSignedData(data []byte) (*SignedData, error) { + content, err := asn1.Marshal(data) + if err != nil { + return nil, err + } + ci := contentInfo{ + ContentType: OIDData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + sd := signedData{ + ContentInfo: ci, + Version: 1, + } + return &SignedData{sd: sd, data: data, digestOid: OIDDigestAlgorithmSHA256}, nil +} + +// SignerInfoConfig are optional values to include when adding a signer +type SignerInfoConfig struct { + ExtraSignedAttributes []Attribute + ExtraUnsignedAttributes []Attribute +} + +type signedData struct { + Version int `asn1:"default:1"` + DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"` + ContentInfo contentInfo + Certificates rawCertificates `asn1:"optional,tag:0"` + CRLs []pkix.CertificateList `asn1:"optional,tag:1"` + SignerInfos []signerInfo `asn1:"set"` +} + +type signerInfo struct { + Version int `asn1:"default:1"` + IssuerAndSerialNumber issuerAndSerial + DigestAlgorithm pkix.AlgorithmIdentifier + AuthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:0"` + DigestEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedDigest []byte + UnauthenticatedAttributes []attribute `asn1:"optional,omitempty,tag:1"` +} + +type attribute struct { + Type asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +func marshalAttributes(attrs []attribute) ([]byte, error) { + encodedAttributes, err := asn1.Marshal(struct { + A []attribute `asn1:"set"` + }{A: attrs}) + if err != nil { + return nil, err + } + + // Remove the leading sequence octets + var raw asn1.RawValue + asn1.Unmarshal(encodedAttributes, &raw) + return raw.Bytes, nil +} + +type rawCertificates struct { + Raw asn1.RawContent +} + +type issuerAndSerial struct { + IssuerName asn1.RawValue + SerialNumber *big.Int +} + +// SetDigestAlgorithm sets the digest algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetDigestAlgorithm(d asn1.ObjectIdentifier) { + sd.digestOid = d +} + +// SetEncryptionAlgorithm sets the encryption algorithm to be used in the signing process. +// +// This should be called before adding signers +func (sd *SignedData) SetEncryptionAlgorithm(d asn1.ObjectIdentifier) { + sd.encryptionOid = d +} + +// AddSigner is a wrapper around AddSignerChain() that adds a signer without any parent. +func (sd *SignedData) AddSigner(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { + var parents []*x509.Certificate + return sd.AddSignerChain(ee, pkey, parents, config) +} + +// AddSignerChain signs attributes about the content and adds certificates +// and signers infos to the Signed Data. The certificate and private key +// of the end-entity signer are used to issue the signature, and any +// parent of that end-entity that need to be added to the list of +// certifications can be specified in the parents slice. +// +// The signature algorithm used to hash the data is the one of the end-entity +// certificate. +func (sd *SignedData) AddSignerChain(ee *x509.Certificate, pkey crypto.PrivateKey, parents []*x509.Certificate, config SignerInfoConfig) error { + // Following RFC 2315, 9.2 SignerInfo type, the distinguished name of + // the issuer of the end-entity signer is stored in the issuerAndSerialNumber + // section of the SignedData.SignerInfo, alongside the serial number of + // the end-entity. + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + if len(parents) == 0 { + // no parent, the issuer is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + } else { + err := verifyPartialChain(ee, parents) + if err != nil { + return err + } + // the first parent is the issuer + ias.IssuerName = asn1.RawValue{FullBytes: parents[0].RawSubject} + } + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, + pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + ) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + encryptionOid, err := getOIDForEncryptionAlgorithm(pkey, sd.digestOid) + if err != nil { + return err + } + attrs := &attributes{} + attrs.Add(OIDAttributeContentType, sd.sd.ContentInfo.ContentType) + attrs.Add(OIDAttributeMessageDigest, sd.messageDigest) + attrs.Add(OIDAttributeSigningTime, time.Now().UTC()) + for _, attr := range config.ExtraSignedAttributes { + attrs.Add(attr.Type, attr.Value) + } + finalAttrs, err := attrs.ForMarshalling() + if err != nil { + return err + } + unsignedAttrs := &attributes{} + for _, attr := range config.ExtraUnsignedAttributes { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + // create signature of signed attributes + signature, err := signAttributes(finalAttrs, pkey, hash) + if err != nil { + return err + } + signer := signerInfo{ + AuthenticatedAttributes: finalAttrs, + UnauthenticatedAttributes: finalUnsignedAttrs, + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + sd.certs = append(sd.certs, ee) + if len(parents) > 0 { + sd.certs = append(sd.certs, parents...) + } + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) + return nil +} + +// SignWithoutAttr issues a signature on the content of the pkcs7 SignedData. +// Unlike AddSigner/AddSignerChain, it calculates the digest on the data alone +// and does not include any signed attributes like timestamp and so on. +// +// This function is needed to sign old Android APKs, something you probably +// shouldn't do unless you're maintaining backward compatibility for old +// applications. +func (sd *SignedData) SignWithoutAttr(ee *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error { + var signature []byte + sd.sd.DigestAlgorithmIdentifiers = append(sd.sd.DigestAlgorithmIdentifiers, pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}) + hash, err := getHashForOID(sd.digestOid) + if err != nil { + return err + } + h := hash.New() + h.Write(sd.data) + sd.messageDigest = h.Sum(nil) + switch pkey := pkey.(type) { + case *dsa.PrivateKey: + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + r, s, err := dsa.Sign(rand.Reader, pkey, sd.messageDigest) + if err != nil { + return err + } + signature, err = asn1.Marshal(dsaSignature{r, s}) + if err != nil { + return err + } + default: + key, ok := pkey.(crypto.Signer) + if !ok { + return errors.New("pkcs7: private key does not implement crypto.Signer") + } + signature, err = key.Sign(rand.Reader, sd.messageDigest, hash) + if err != nil { + return err + } + } + var ias issuerAndSerial + ias.SerialNumber = ee.SerialNumber + // no parent, the issue is the end-entity cert itself + ias.IssuerName = asn1.RawValue{FullBytes: ee.RawIssuer} + if sd.encryptionOid == nil { + // if the encryption algorithm wasn't set by SetEncryptionAlgorithm, + // infer it from the digest algorithm + sd.encryptionOid, err = getOIDForEncryptionAlgorithm(pkey, sd.digestOid) + } + if err != nil { + return err + } + signer := signerInfo{ + DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.digestOid}, + DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: sd.encryptionOid}, + IssuerAndSerialNumber: ias, + EncryptedDigest: signature, + Version: 1, + } + // create signature of signed attributes + sd.certs = append(sd.certs, ee) + sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer) + return nil +} + +func (si *signerInfo) SetUnauthenticatedAttributes(extraUnsignedAttrs []Attribute) error { + unsignedAttrs := &attributes{} + for _, attr := range extraUnsignedAttrs { + unsignedAttrs.Add(attr.Type, attr.Value) + } + finalUnsignedAttrs, err := unsignedAttrs.ForMarshalling() + if err != nil { + return err + } + + si.UnauthenticatedAttributes = finalUnsignedAttrs + + return nil +} + +// AddCertificate adds the certificate to the payload. Useful for parent certificates +func (sd *SignedData) AddCertificate(cert *x509.Certificate) { + sd.certs = append(sd.certs, cert) +} + +// Detach removes content from the signed data struct to make it a detached signature. +// This must be called right before Finish() +func (sd *SignedData) Detach() { + sd.sd.ContentInfo = contentInfo{ContentType: OIDData} +} + +// GetSignedData returns the private Signed Data +func (sd *SignedData) GetSignedData() *signedData { + return &sd.sd +} + +// Finish marshals the content and its signers +func (sd *SignedData) Finish() ([]byte, error) { + sd.sd.Certificates = marshalCertificates(sd.certs) + inner, err := asn1.Marshal(sd.sd) + if err != nil { + return nil, err + } + outer := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true}, + } + return asn1.Marshal(outer) +} + +// RemoveAuthenticatedAttributes removes authenticated attributes from signedData +// similar to OpenSSL's PKCS7_NOATTR or -noattr flags +func (sd *SignedData) RemoveAuthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].AuthenticatedAttributes = nil + } +} + +// RemoveUnauthenticatedAttributes removes unauthenticated attributes from signedData +func (sd *SignedData) RemoveUnauthenticatedAttributes() { + for i := range sd.sd.SignerInfos { + sd.sd.SignerInfos[i].UnauthenticatedAttributes = nil + } +} + +// verifyPartialChain checks that a given cert is issued by the first parent in the list, +// then continue down the path. It doesn't require the last parent to be a root CA, +// or to be trusted in any truststore. It simply verifies that the chain provided, albeit +// partial, makes sense. +func verifyPartialChain(cert *x509.Certificate, parents []*x509.Certificate) error { + if len(parents) == 0 { + return fmt.Errorf("pkcs7: zero parents provided to verify the signature of certificate %q", cert.Subject.CommonName) + } + err := cert.CheckSignatureFrom(parents[0]) + if err != nil { + return fmt.Errorf("pkcs7: certificate signature from parent is invalid: %v", err) + } + if len(parents) == 1 { + // there is no more parent to check, return + return nil + } + return verifyPartialChain(parents[0], parents[1:]) +} + +func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) { + var ias issuerAndSerial + // The issuer RDNSequence has to match exactly the sequence in the certificate + // We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence + ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer} + ias.SerialNumber = cert.SerialNumber + + return ias, nil +} + +// signs the DER encoded form of the attributes with the private key +func signAttributes(attrs []attribute, pkey crypto.PrivateKey, digestAlg crypto.Hash) ([]byte, error) { + attrBytes, err := marshalAttributes(attrs) + if err != nil { + return nil, err + } + h := digestAlg.New() + h.Write(attrBytes) + hash := h.Sum(nil) + + // dsa doesn't implement crypto.Signer so we make a special case + // https://github.com/golang/go/issues/27889 + switch pkey := pkey.(type) { + case *dsa.PrivateKey: + r, s, err := dsa.Sign(rand.Reader, pkey, hash) + if err != nil { + return nil, err + } + return asn1.Marshal(dsaSignature{r, s}) + } + + key, ok := pkey.(crypto.Signer) + if !ok { + return nil, errors.New("pkcs7: private key does not implement crypto.Signer") + } + return key.Sign(rand.Reader, hash, digestAlg) +} + +type dsaSignature struct { + R, S *big.Int +} + +// concats and wraps the certificates in the RawValue structure +func marshalCertificates(certs []*x509.Certificate) rawCertificates { + var buf bytes.Buffer + for _, cert := range certs { + buf.Write(cert.Raw) + } + rawCerts, _ := marshalCertificateBytes(buf.Bytes()) + return rawCerts +} + +// Even though, the tag & length are stripped out during marshalling the +// RawContent, we have to encode it into the RawContent. If its missing, +// then `asn1.Marshal()` will strip out the certificate wrapper instead. +func marshalCertificateBytes(certs []byte) (rawCertificates, error) { + val := asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true} + b, err := asn1.Marshal(val) + if err != nil { + return rawCertificates{}, err + } + return rawCertificates{Raw: b}, nil +} + +// DegenerateCertificate creates a signed data structure containing only the +// provided certificate or certificate chain. +func DegenerateCertificate(cert []byte) ([]byte, error) { + rawCert, err := marshalCertificateBytes(cert) + if err != nil { + return nil, err + } + emptyContent := contentInfo{ContentType: OIDData} + sd := signedData{ + Version: 1, + ContentInfo: emptyContent, + Certificates: rawCert, + CRLs: []pkix.CertificateList{}, + } + content, err := asn1.Marshal(sd) + if err != nil { + return nil, err + } + signedContent := contentInfo{ + ContentType: OIDSignedData, + Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true}, + } + return asn1.Marshal(signedContent) +} diff --git a/ui/tests/acceptance/enterprise-replication-modes-test.js b/ui/tests/acceptance/enterprise-replication-modes-test.js new file mode 100644 index 000000000000..3484f783de59 --- /dev/null +++ b/ui/tests/acceptance/enterprise-replication-modes-test.js @@ -0,0 +1,128 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + + + + + + +

    + Sign SSH Key +

    +
    +
    + +{{#if this.model.signedKey}} +
    + + Warning + + You will not be able to access this information later, so please copy the information below. + + + {{#each this.model.attrs as |attr|}} + {{#if (eq attr.type "object")}} + + {{else}} + + {{/if}} + {{/each}} +
    +
    +
    + +
    + {{#if this.model.leaseId}} +
    + +
    + {{/if}} +
    + +
    +
    +{{else}} +
    +
    + + + {{#if this.model.attrs}} + {{#each (take 1 this.model.attrs) as |attr|}} + + {{/each}} + + {{#if this.showOptions}} +
    + {{#each (drop 1 this.model.attrs) as |attr|}} + + {{/each}} +
    + {{/if}} + {{/if}} +
    +
    + + + + +
    +
    +{{/if}} \ No newline at end of file diff --git a/ui/tests/acceptance/enterprise-replication-test.js b/ui/tests/acceptance/enterprise-replication-test.js index ea70e3b97639..7a24b2f5bdb5 100644 --- a/ui/tests/acceptance/enterprise-replication-test.js +++ b/ui/tests/acceptance/enterprise-replication-test.js @@ -1,363 +1,50 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { clickTrigger } from 'ember-power-select/test-support/helpers'; -import { click, fillIn, findAll, currentURL, find, visit, settled, waitUntil } from '@ember/test-helpers'; -import { module, test } from 'qunit'; -import { setupApplicationTest } from 'ember-qunit'; -import authPage from 'vault/tests/pages/auth'; -import { pollCluster } from 'vault/tests/helpers/poll-cluster'; -import { create } from 'ember-cli-page-object'; -import flashMessage from 'vault/tests/pages/components/flash-message'; -import ss from 'vault/tests/pages/components/search-select'; -import { disableReplication } from 'vault/tests/helpers/replication'; -const searchSelect = create(ss); -const flash = create(flashMessage); - -module('Acceptance | Enterprise | replication', function (hooks) { - setupApplicationTest(hooks); - - hooks.beforeEach(async function () { - await authPage.login(); - await settled(); - await disableReplication('dr'); - await settled(); - await disableReplication('performance'); - await settled(); - }); - - hooks.afterEach(async function () { - await disableReplication('dr'); - await settled(); - await disableReplication('performance'); - await settled(); - }); - - test('replication', async function (assert) { - assert.expect(17); - const secondaryName = 'firstSecondary'; - const mode = 'deny'; - - // confirm unable to visit dr secondary details page when both replications are disabled - await visit('/vault/replication-dr-promote/details'); - - assert.dom('[data-test-component="empty-state"]').exists(); - assert - .dom('[data-test-empty-state-title]') - .includesText('Disaster Recovery secondary not set up', 'shows the correct title of the empty state'); - - assert - .dom('[data-test-empty-state-message]') - .hasText( - 'This cluster has not been enabled as a Disaster Recovery Secondary. You can do so by enabling replication and adding a secondary from the Disaster Recovery Primary.', - 'renders default message specific to when no replication is enabled' - ); - - await visit('/vault/replication'); - - assert.strictEqual(currentURL(), '/vault/replication'); - - // enable perf replication - await click('[data-test-replication-type-select="performance"]'); - - await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); - - await click('[data-test-replication-enable]'); - - await pollCluster(this.owner); - - // confirm that the details dashboard shows - assert.ok(await waitUntil(() => find('[data-test-replication-dashboard]')), 'details dashboard is shown'); - - // add a secondary with a mount filter config - await click('[data-test-replication-link="secondaries"]'); - - await click('[data-test-secondary-add]'); - - await fillIn('[data-test-replication-secondary-id]', secondaryName); - - await click('#deny'); - await clickTrigger(); - const mountPath = searchSelect.options.objectAt(0).text; - await searchSelect.options.objectAt(0).click(); - await click('[data-test-secondary-add]'); - - await pollCluster(this.owner); - // click into the added secondary's mount filter config - await click('[data-test-replication-link="secondaries"]'); - - await click('[data-test-popup-menu-trigger]'); - - await click('[data-test-replication-path-filter-link]'); - - assert.strictEqual( - currentURL(), - `/vault/replication/performance/secondaries/config/show/${secondaryName}` - ); - assert.dom('[data-test-mount-config-mode]').includesText(mode, 'show page renders the correct mode'); - assert - .dom('[data-test-mount-config-paths]') - .includesText(mountPath, 'show page renders the correct mount path'); - - // delete config by choosing "no filter" in the edit screen - await click('[data-test-replication-link="edit-mount-config"]'); - - await click('#no-filtering'); - - await click('[data-test-config-save]'); - await settled(); // eslint-disable-line - - assert.strictEqual( - flash.latestMessage, - `The performance mount filter config for the secondary ${secondaryName} was successfully deleted.`, - 'renders success flash upon deletion' - ); - assert.strictEqual( - currentURL(), - `/vault/replication/performance/secondaries`, - 'redirects to the secondaries page' - ); - // nav back to details page and confirm secondary is in the known secondaries table - await click('[data-test-replication-link="details"]'); - - assert - .dom(`[data-test-secondaries=row-for-${secondaryName}]`) - .exists('shows a table row the recently added secondary'); - - // nav to DR - await visit('/vault/replication/dr'); - - await fillIn('[data-test-replication-cluster-mode-select]', 'secondary'); - assert - .dom('[data-test-replication-enable]') - .isDisabled('dr secondary enable is disabled when other replication modes are on'); - - // disable performance replication - await disableReplication('performance', assert); - await settled(); - await pollCluster(this.owner); - - // enable dr replication - await visit('vault/replication/dr'); - - await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); - await click('button[type="submit"]'); - - await pollCluster(this.owner); - await waitUntil(() => find('[data-test-empty-state-title]')); - // empty state inside of know secondaries table - assert - .dom('[data-test-empty-state-title]') - .includesText( - 'No known dr secondary clusters associated with this cluster', - 'shows the correct title of the empty state' - ); - - assert.ok( - find('[data-test-replication-title]').textContent.includes('Disaster Recovery'), - 'it displays the replication type correctly' - ); - assert.ok( - find('[data-test-replication-mode-display]').textContent.includes('primary'), - 'it displays the cluster mode correctly' - ); - - // add dr secondary - await click('[data-test-replication-link="secondaries"]'); - - await click('[data-test-secondary-add]'); - - await fillIn('[data-test-replication-secondary-id]', secondaryName); - - await click('[data-test-secondary-add]'); - - await pollCluster(this.owner); - await click('[data-test-replication-link="secondaries"]'); - - assert - .dom('[data-test-secondary-name]') - .includesText(secondaryName, 'it displays the secondary in the list of known secondaries'); - }); - - test('disabling dr primary when perf replication is enabled', async function (assert) { - await visit('vault/replication/performance'); - - // enable perf replication - await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); - await click('[data-test-replication-enable]'); - - await pollCluster(this.owner); - - // enable dr replication - await visit('/vault/replication/dr'); - - await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); - - await click('[data-test-replication-enable]'); - - await pollCluster(this.owner); - await visit('/vault/replication/dr/manage'); - - await click('[data-test-demote-replication] [data-test-replication-action-trigger]'); - - assert.ok(findAll('[data-test-demote-warning]').length, 'displays the demotion warning'); - }); - - test('navigating to dr secondary details page when dr secondary is not enabled', async function (assert) { - // enable dr replication - - await visit('/vault/replication/dr'); - - await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); - await click('[data-test-replication-enable]'); - await settled(); // eslint-disable-line - await pollCluster(this.owner); - await visit('/vault/replication-dr-promote/details'); - - assert.dom('[data-test-component="empty-state"]').exists(); - assert - .dom('[data-test-empty-state-message]') - .hasText( - 'This Disaster Recovery secondary has not been enabled. You can do so from the Disaster Recovery Primary.', - 'renders message when replication is enabled' - ); - }); - - test('add secondary and navigate through token generation modal', async function (assert) { - const secondaryNameFirst = 'firstSecondary'; - const secondaryNameSecond = 'secondSecondary'; - await visit('/vault/replication'); - - // enable perf replication - await click('[data-test-replication-type-select="performance"]'); - - await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); - await click('[data-test-replication-enable]'); - - await pollCluster(this.owner); - await settled(); - - // add a secondary with default TTL - await click('[data-test-replication-link="secondaries"]'); - - await click('[data-test-secondary-add]'); - - await fillIn('[data-test-replication-secondary-id]', secondaryNameFirst); - await click('[data-test-secondary-add]'); - - await pollCluster(this.owner); - await settled(); - const modalDefaultTtl = document.querySelector('[data-test-row-value="TTL"]').innerText; - - // checks on secondary token modal - assert.dom('#replication-copy-token-modal').exists(); - assert.dom('[data-test-inline-error-message]').hasText('Copy token to dismiss modal'); - assert.strictEqual(modalDefaultTtl, '1800s', 'shows the correct TTL of 1800s'); - // click off the modal to make sure you don't just have to click on the copy-close button to copy the token - assert.dom('[data-test-modal-close]').isDisabled('cancel is disabled'); - await click('[data-test-modal-copy]'); - assert.dom('[data-test-modal-close]').isEnabled('cancel is enabled after token is copied'); - await click('[data-test-modal-close]'); - - // add another secondary not using the default ttl - await click('[data-test-secondary-add]'); - - await fillIn('[data-test-replication-secondary-id]', secondaryNameSecond); - await click('[data-test-toggle-input]'); - - await fillIn('[data-test-ttl-value]', 3); - await click('[data-test-secondary-add]'); - - await pollCluster(this.owner); - await settled(); - const modalTtl = document.querySelector('[data-test-row-value="TTL"]').innerText; - assert.strictEqual(modalTtl, '180s', 'shows the correct TTL of 180s'); - await click('[data-test-modal-copy]'); - await click('[data-test-modal-close]'); - - // confirm you were redirected to the secondaries page - assert.strictEqual( - currentURL(), - `/vault/replication/performance/secondaries`, - 'redirects to the secondaries page' - ); - assert - .dom('[data-test-secondary-name]') - .includesText(secondaryNameFirst, 'it displays the secondary in the list of secondaries'); - }); - - test('render performance and dr primary and navigate to details page', async function (assert) { - // enable perf primary replication - await visit('/vault/replication'); - await click('[data-test-replication-type-select="performance"]'); - - await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); - await click('[data-test-replication-enable]'); - - await pollCluster(this.owner); - await settled(); - - await visit('/vault/replication'); - - assert - .dom(`[data-test-replication-summary-card]`) - .doesNotExist(`does not render replication summary card when both modes are not enabled as primary`); - - // enable DR primary replication - await click('[data-test-replication-promote-secondary]'); - await click('[data-test-replication-enable]'); - - await pollCluster(this.owner); - await settled(); - - // navigate using breadcrumbs back to replication.index - await click('[data-test-replication-breadcrumb]'); - - assert - .dom('[data-test-replication-summary-card]') - .exists({ count: 2 }, 'renders two replication-summary-card components'); - - // navigate to details page using the "Details" link - await click('[data-test-manage-link="Disaster Recovery"]'); - - assert - .dom('[data-test-selectable-card-container="primary"]') - .exists('shows the correct card on the details dashboard'); - assert.strictEqual(currentURL(), '/vault/replication/dr'); - }); - - test('render performance secondary and navigate to the details page', async function (assert) { - // enable perf replication - await visit('/vault/replication'); - - await click('[data-test-replication-type-select="performance"]'); - - await fillIn('[data-test-replication-cluster-mode-select]', 'primary'); - await click('[data-test-replication-enable]'); - - await pollCluster(this.owner); - await settled(); - - // demote perf primary to a secondary - await click('[data-test-replication-link="manage"]'); - - // open demote modal - await click('[data-test-demote-replication] [data-test-replication-action-trigger]'); - - // enter confirmation text - await fillIn('[data-test-confirmation-modal-input="Demote to secondary?"]', 'Performance'); - // Click confirm button - await click('[data-test-confirm-button="Demote to secondary?"]'); - - await click('[data-test-replication-link="details"]'); - - assert.dom('[data-test-replication-dashboard]').exists(); - assert.dom('[data-test-selectable-card-container="secondary"]').exists(); - assert.ok( - find('[data-test-replication-mode-display]').textContent.includes('secondary'), - 'it displays the cluster mode correctly' - ); - }); -}); +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package cluster + +import ( + "io" + "net" + "time" + + uberAtomic "go.uber.org/atomic" +) + +type delayedConn struct { + net.Conn + dr *delayedReader +} + +func newDelayedConn(conn net.Conn, delay time.Duration) net.Conn { + dr := &delayedReader{ + r: conn, + delay: uberAtomic.NewDuration(delay), + } + return &delayedConn{ + dr: dr, + Conn: conn, + } +} + +func (conn *delayedConn) Read(data []byte) (int, error) { + return conn.dr.Read(data) +} + +func (conn *delayedConn) SetDelay(delay time.Duration) { + conn.dr.delay.Store(delay) +} + +type delayedReader struct { + r io.Reader + delay *uberAtomic.Duration +} + +func (dr *delayedReader) Read(data []byte) (int, error) { + // Sleep for the delay period prior to reading + if delay := dr.delay.Load(); delay != 0 { + time.Sleep(delay) + } + + return dr.r.Read(data) +} diff --git a/ui/tests/acceptance/enterprise-replication-unsupported-test.js b/ui/tests/acceptance/enterprise-replication-unsupported-test.js index cfcb2a73c273..adcfac4e1c44 100644 --- a/ui/tests/acceptance/enterprise-replication-unsupported-test.js +++ b/ui/tests/acceptance/enterprise-replication-unsupported-test.js @@ -1,33 +1,538 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import { setupApplicationTest } from 'ember-qunit'; -import { setupMirage } from 'ember-cli-mirage/test-support'; -import authPage from 'vault/tests/pages/auth'; -import { visit } from '@ember/test-helpers'; - -module('Acceptance | Enterprise | replication unsupported', function (hooks) { - setupApplicationTest(hooks); - setupMirage(hooks); - - hooks.beforeEach(async function () { - this.server.get('/sys/replication/status', function () { - return { - data: { - mode: 'unsupported', - }, - }; - }); - return authPage.login(); - }); - - test('replication page when unsupported', async function (assert) { - await visit('/vault/replication'); - assert - .dom('[data-test-replication-title]') - .hasText('Replication unsupported', 'it shows the unsupported view'); - }); -}); +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package raft + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + bolt "github.com/hashicorp-forge/bbolt" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/plugin/pb" + "github.com/rboyer/safeio" + "go.uber.org/atomic" + + "github.com/hashicorp/raft" +) + +const ( + // boltSnapshotID is the stable ID for any boltDB snapshot. Keeping the ID + // stable means there is only ever one bolt snapshot in the system + boltSnapshotID = "bolt-snapshot" + tmpSuffix = ".tmp" + snapPath = "snapshots" +) + +// BoltSnapshotStore implements the SnapshotStore interface and allows snapshots +// to be stored in BoltDB files on local disk. Since we always have an up to +// date FSM we use a special snapshot ID to indicate that the snapshot can be +// pulled from the BoltDB file that is currently backing the FSM. This allows us +// to provide just-in-time snapshots without doing incremental data dumps. +// +// When a snapshot is being installed on the node we will Create and Write data +// to it. This will cause the snapshot store to create a new BoltDB file and +// write the snapshot data to it. Then, we can simply rename the snapshot to the +// FSM's filename. This allows us to atomically install the snapshot and +// reduces the amount of disk i/o. Older snapshots are reaped on startup and +// before each subsequent snapshot write. This ensures we only have one snapshot +// on disk at a time. +type BoltSnapshotStore struct { + // path is the directory in which to store file based snapshots + path string + + // We hold a copy of the FSM so we can stream snapshots straight out of the + // database. + fsm *FSM + + logger log.Logger +} + +// BoltSnapshotSink implements SnapshotSink optionally choosing to write to a +// file. +type BoltSnapshotSink struct { + store *BoltSnapshotStore + logger log.Logger + meta raft.SnapshotMeta + trans raft.Transport + + // These fields will be used if we are writing a snapshot (vs. reading + // one) + written atomic.Bool + writer io.WriteCloser + writeError error + dir string + parentDir string + doneWritingCh chan struct{} + + l sync.Mutex + closed bool +} + +// NewBoltSnapshotStore creates a new BoltSnapshotStore based +// on a base directory. +func NewBoltSnapshotStore(base string, logger log.Logger, fsm *FSM) (*BoltSnapshotStore, error) { + if logger == nil { + return nil, fmt.Errorf("no logger provided") + } + + // Ensure our path exists + path := filepath.Join(base, snapPath) + if err := os.MkdirAll(path, 0o700); err != nil && !os.IsExist(err) { + return nil, fmt.Errorf("snapshot path not accessible: %v", err) + } + + // Setup the store + store := &BoltSnapshotStore{ + logger: logger, + fsm: fsm, + path: path, + } + + // Cleanup any old or failed snapshots on startup. + if err := store.ReapSnapshots(); err != nil { + return nil, err + } + + return store, nil +} + +// Create is used to start a new snapshot +func (f *BoltSnapshotStore) Create(version raft.SnapshotVersion, index, term uint64, configuration raft.Configuration, configurationIndex uint64, trans raft.Transport) (raft.SnapshotSink, error) { + // We only support version 1 snapshots at this time. + if version != 1 { + return nil, fmt.Errorf("unsupported snapshot version %d", version) + } + + // Create the sink + sink := &BoltSnapshotSink{ + store: f, + logger: f.logger, + meta: raft.SnapshotMeta{ + Version: version, + ID: boltSnapshotID, + Index: index, + Term: term, + Configuration: configuration, + ConfigurationIndex: configurationIndex, + }, + trans: trans, + } + + return sink, nil +} + +// List returns available snapshots in the store. It only returns bolt +// snapshots. No snapshot will be returned if there are no indexes in the +// FSM. +func (f *BoltSnapshotStore) List() ([]*raft.SnapshotMeta, error) { + meta, err := f.getMetaFromFSM() + if err != nil { + return nil, err + } + + // If we haven't seen any data yet do not return a snapshot + if meta.Index == 0 { + return nil, nil + } + + return []*raft.SnapshotMeta{meta}, nil +} + +// getBoltSnapshotMeta returns the fsm's latest state and configuration. +func (f *BoltSnapshotStore) getMetaFromFSM() (*raft.SnapshotMeta, error) { + latestIndex, latestConfig := f.fsm.LatestState() + meta := &raft.SnapshotMeta{ + Version: 1, + ID: boltSnapshotID, + Index: latestIndex.Index, + Term: latestIndex.Term, + } + + if latestConfig != nil { + meta.ConfigurationIndex, meta.Configuration = protoConfigurationToRaftConfiguration(latestConfig) + } + + return meta, nil +} + +// Open takes a snapshot ID and returns a ReadCloser for that snapshot. +func (f *BoltSnapshotStore) Open(id string) (*raft.SnapshotMeta, io.ReadCloser, error) { + if id == boltSnapshotID { + return f.openFromFSM() + } + + return f.openFromFile(id) +} + +func (f *BoltSnapshotStore) openFromFSM() (*raft.SnapshotMeta, io.ReadCloser, error) { + meta, err := f.getMetaFromFSM() + if err != nil { + return nil, nil, err + } + // If we don't have any data return an error + if meta.Index == 0 { + return nil, nil, errors.New("no snapshot data") + } + + // Stream data out of the FSM to calculate the size + readCloser, writeCloser := io.Pipe() + metaReadCloser, metaWriteCloser := io.Pipe() + go func() { + f.fsm.writeTo(context.Background(), metaWriteCloser, writeCloser) + }() + + // Compute the size + n, err := io.Copy(ioutil.Discard, metaReadCloser) + if err != nil { + f.logger.Error("failed to read state file", "error", err) + metaReadCloser.Close() + readCloser.Close() + return nil, nil, err + } + + meta.Size = n + metaReadCloser.Close() + + return meta, readCloser, nil +} + +func (f *BoltSnapshotStore) getMetaFromDB(id string) (*raft.SnapshotMeta, error) { + if len(id) == 0 { + return nil, errors.New("can not open empty snapshot ID") + } + + filename := filepath.Join(f.path, id, databaseFilename) + boltDB, err := bolt.Open(filename, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return nil, err + } + defer boltDB.Close() + + meta := &raft.SnapshotMeta{ + Version: 1, + ID: id, + } + + err = boltDB.View(func(tx *bolt.Tx) error { + b := tx.Bucket(configBucketName) + val := b.Get(latestIndexKey) + if val != nil { + var snapshotIndexes IndexValue + err := proto.Unmarshal(val, &snapshotIndexes) + if err != nil { + return err + } + + meta.Index = snapshotIndexes.Index + meta.Term = snapshotIndexes.Term + } + + // Read in our latest config and populate it inmemory + val = b.Get(latestConfigKey) + if val != nil { + var config ConfigurationValue + err := proto.Unmarshal(val, &config) + if err != nil { + return err + } + + meta.ConfigurationIndex, meta.Configuration = protoConfigurationToRaftConfiguration(&config) + } + return nil + }) + if err != nil { + return nil, err + } + + return meta, nil +} + +func (f *BoltSnapshotStore) openFromFile(id string) (*raft.SnapshotMeta, io.ReadCloser, error) { + meta, err := f.getMetaFromDB(id) + if err != nil { + return nil, nil, err + } + + filename := filepath.Join(f.path, id, databaseFilename) + installer := &boltSnapshotInstaller{ + meta: meta, + ReadCloser: ioutil.NopCloser(strings.NewReader(filename)), + filename: filename, + } + + return meta, installer, nil +} + +// ReapSnapshots reaps all snapshots. +func (f *BoltSnapshotStore) ReapSnapshots() error { + snapshots, err := ioutil.ReadDir(f.path) + switch { + case err == nil: + case os.IsNotExist(err): + return nil + default: + f.logger.Error("failed to scan snapshot directory", "error", err) + return err + } + + for _, snap := range snapshots { + // Ignore any files + if !snap.IsDir() { + continue + } + + // Warn about temporary snapshots, this indicates a previously failed + // snapshot attempt. We still want to clean these up. + dirName := snap.Name() + if strings.HasSuffix(dirName, tmpSuffix) { + f.logger.Warn("found temporary snapshot", "name", dirName) + } + + path := filepath.Join(f.path, dirName) + f.logger.Info("reaping snapshot", "path", path) + if err := os.RemoveAll(path); err != nil { + f.logger.Error("failed to reap snapshot", "path", snap.Name(), "error", err) + return err + } + } + + return nil +} + +// ID returns the ID of the snapshot, can be used with Open() +// after the snapshot is finalized. +func (s *BoltSnapshotSink) ID() string { + s.l.Lock() + defer s.l.Unlock() + + return s.meta.ID +} + +func (s *BoltSnapshotSink) writeBoltDBFile() error { + // Create a new path + name := snapshotName(s.meta.Term, s.meta.Index) + path := filepath.Join(s.store.path, name+tmpSuffix) + s.logger.Info("creating new snapshot", "path", path) + + // Make the directory + if err := os.MkdirAll(path, 0o700); err != nil { + s.logger.Error("failed to make snapshot directory", "error", err) + return err + } + + // Create the BoltDB file + dbPath := filepath.Join(path, databaseFilename) + boltDB, err := bolt.Open(dbPath, 0o600, &bolt.Options{Timeout: 1 * time.Second}) + if err != nil { + return err + } + + // Write the snapshot metadata + if err := writeSnapshotMetaToDB(&s.meta, boltDB); err != nil { + return err + } + + // Set the snapshot ID to the generated name. + s.meta.ID = name + + // Create the done channel + s.doneWritingCh = make(chan struct{}) + + // Store the directories so we can commit the changes on success or abort + // them on failure. + s.dir = path + s.parentDir = s.store.path + + // Create a pipe so we pipe writes into the go routine below. + reader, writer := io.Pipe() + s.writer = writer + + // Start a go routine in charge of piping data from the snapshot's Write + // call to the delimtedreader and the BoltDB file. + go func() { + defer close(s.doneWritingCh) + defer boltDB.Close() + + // The delimted reader will parse full proto messages from the snapshot + // data. + protoReader := NewDelimitedReader(reader, math.MaxInt32) + defer protoReader.Close() + + var done bool + var keys int + entry := new(pb.StorageEntry) + for !done { + err := boltDB.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(dataBucketName) + if err != nil { + return err + } + + // Commit in batches of 50k. Bolt holds all the data in memory and + // doesn't split the pages until commit so we do incremental writes. + for i := 0; i < 50000; i++ { + err := protoReader.ReadMsg(entry) + if err != nil { + if err == io.EOF { + done = true + return nil + } + return err + } + + err = b.Put([]byte(entry.Key), entry.Value) + if err != nil { + return err + } + keys += 1 + } + + return nil + }) + if err != nil { + s.logger.Error("snapshot write: failed to write transaction", "error", err) + s.writeError = err + return + } + + s.logger.Trace("snapshot write: writing keys", "num_written", keys) + } + }() + + return nil +} + +// Write is used to append to the bolt file. The first call to write ensures we +// have the file created. +func (s *BoltSnapshotSink) Write(b []byte) (int, error) { + s.l.Lock() + defer s.l.Unlock() + + // If this is the first call to Write we need to setup the boltDB file and + // kickoff the pipeline write + if previouslyWritten := s.written.Swap(true); !previouslyWritten { + // Reap any old snapshots + if err := s.store.ReapSnapshots(); err != nil { + return 0, err + } + + if err := s.writeBoltDBFile(); err != nil { + return 0, err + } + } + + return s.writer.Write(b) +} + +// Close is used to indicate a successful end. +func (s *BoltSnapshotSink) Close() error { + s.l.Lock() + defer s.l.Unlock() + + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + if s.writer != nil { + s.writer.Close() + <-s.doneWritingCh + + if s.writeError != nil { + // If we encountered an error while writing then we should remove + // the directory and return the error + _ = os.RemoveAll(s.dir) + return s.writeError + } + + // Move the directory into place + newPath := strings.TrimSuffix(s.dir, tmpSuffix) + + var err error + if runtime.GOOS != "windows" { + err = safeio.Rename(s.dir, newPath) + } else { + err = os.Rename(s.dir, newPath) + } + + if err != nil { + s.logger.Error("failed to move snapshot into place", "error", err) + return err + } + } + + return nil +} + +// Cancel is used to indicate an unsuccessful end. +func (s *BoltSnapshotSink) Cancel() error { + s.l.Lock() + defer s.l.Unlock() + + // Make sure close is idempotent + if s.closed { + return nil + } + s.closed = true + + if s.writer != nil { + s.writer.Close() + <-s.doneWritingCh + + // Attempt to remove all artifacts + return os.RemoveAll(s.dir) + } + + return nil +} + +type boltSnapshotInstaller struct { + io.ReadCloser + meta *raft.SnapshotMeta + filename string +} + +func (i *boltSnapshotInstaller) Filename() string { + return i.filename +} + +func (i *boltSnapshotInstaller) Metadata() *raft.SnapshotMeta { + return i.meta +} + +func (i *boltSnapshotInstaller) Install(filename string) error { + if len(i.filename) == 0 { + return errors.New("snapshot filename empty") + } + + if len(filename) == 0 { + return errors.New("fsm filename empty") + } + + // Rename the snapshot to the FSM location + if runtime.GOOS != "windows" { + return safeio.Rename(i.filename, filename) + } else { + return os.Rename(i.filename, filename) + } +} + +// snapshotName generates a name for the snapshot. +func snapshotName(term, index uint64) string { + now := time.Now() + msec := now.UnixNano() / int64(time.Millisecond) + return fmt.Sprintf("%d-%d-%d", term, index, msec) +} diff --git a/ui/tests/acceptance/enterprise-sidebar-nav-test.js b/ui/tests/acceptance/enterprise-sidebar-nav-test.js index 1414e3d1fdc3..82ddef0c9e93 100644 --- a/ui/tests/acceptance/enterprise-sidebar-nav-test.js +++ b/ui/tests/acceptance/enterprise-sidebar-nav-test.js @@ -4,67 +4,34 @@ */ import { module, test } from 'qunit'; -import { setupApplicationTest } from 'ember-qunit'; -import { click, currentURL, fillIn } from '@ember/test-helpers'; -import { setupMirage } from 'ember-cli-mirage/test-support'; -import authPage from 'vault/tests/pages/auth'; - -const link = (label) => `[data-test-sidebar-nav-link="${label}"]`; -const panel = (label) => `[data-test-sidebar-nav-panel="${label}"]`; - -module('Acceptance | Enterprise | sidebar navigation', function (hooks) { - setupApplicationTest(hooks); - setupMirage(hooks); - - hooks.beforeEach(function () { - return authPage.login(); - }); - - // common links are tested in the sidebar-nav test and will not be covered here - test('it should render enterprise only navigation links', async function (assert) { - assert.dom(panel('Cluster')).exists('Cluster nav panel renders'); - - await click(link('Secrets Sync')); - assert.strictEqual(currentURL(), '/vault/sync/secrets/overview', 'Sync route renders'); - - await click(link('Replication')); - assert.strictEqual(currentURL(), '/vault/replication', 'Replication route renders'); - await click('[data-test-replication-enable]'); - - await click(link('Performance')); - assert.strictEqual( - currentURL(), - '/vault/replication/performance', - 'Replication performance route renders' - ); - - await click(link('Disaster Recovery')); - assert.strictEqual(currentURL(), '/vault/replication/dr', 'Replication dr route renders'); - // disable replication now that we have checked the links - await click('[data-test-replication-link="manage"]'); - await click('[data-test-replication-action-trigger]'); - await fillIn('[data-test-confirmation-modal-input="Disable Replication?"]', 'Disaster Recovery'); - await click('[data-test-confirm-button="Disable Replication?"]'); - - await click(link('Client Count')); - assert.strictEqual(currentURL(), '/vault/clients/dashboard', 'Client counts route renders'); - - await click(link('License')); - assert.strictEqual(currentURL(), '/vault/license', 'License route renders'); - - await click(link('Access')); - await click(link('Control Groups')); - assert.strictEqual(currentURL(), '/vault/access/control-groups', 'Control groups route renders'); - - await click(link('Namespaces')); - assert.strictEqual(currentURL(), '/vault/access/namespaces?page=1', 'Replication route renders'); - - await click(link('Back to main navigation')); - await click(link('Policies')); - await click(link('Role-Governing Policies')); - assert.strictEqual(currentURL(), '/vault/policies/rgp', 'Role-Governing Policies route renders'); - - await click(link('Endpoint Governing Policies')); - assert.strictEqual(currentURL(), '/vault/policies/egp', 'Endpoint Governing Policies route renders'); +import { setupRenderingTest } from 'ember-qunit'; +import { render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; + +module('Integration | Component | splash-page', function (hooks) { + setupRenderingTest(hooks); + + test('it should render', async function (assert) { + assert.expect(4); + await render(hbs` + <:header> + Header here + + <:subHeader> + sub header + + <:content> + content + + <:footer> +
    footer
    + + +
    + `); + assert.dom('[data-test-splash-page-header]').includesText('Header here', 'Header renders'); + assert.dom('[data-test-splash-page-sub-header]').includesText('sub header', 'SubHeader renders'); + assert.dom('[data-test-splash-page-content]').includesText('content', 'Content renders'); + assert.dom('[data-test-footer]').includesText('footer', 'Footer renders'); }); }); diff --git a/ui/tests/acceptance/pki/pki-cross-sign-test.js b/ui/tests/acceptance/pki/pki-cross-sign-test.js index 6403cc0bcb32..187687ac4071 100644 --- a/ui/tests/acceptance/pki/pki-cross-sign-test.js +++ b/ui/tests/acceptance/pki/pki-cross-sign-test.js @@ -1,109 +1,21 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import { visit, click, fillIn, find } from '@ember/test-helpers'; -import { setupApplicationTest } from 'vault/tests/helpers'; -import { v4 as uuidv4 } from 'uuid'; - -import authPage from 'vault/tests/pages/auth'; -import enablePage from 'vault/tests/pages/settings/mount-secret-backend'; -import { runCommands } from 'vault/tests/helpers/pki/pki-run-commands'; -import { SELECTORS } from 'vault/tests/helpers/pki/pki-issuer-cross-sign'; -import { verifyCertificates } from 'vault/utils/parse-pki-cert'; -module('Acceptance | pki/pki cross sign', function (hooks) { - setupApplicationTest(hooks); - - hooks.beforeEach(async function () { - await authPage.login(); - this.parentMountPath = `parent-mount-${uuidv4()}`; - this.oldParentIssuerName = 'old-parent-issuer'; // old parent issuer we're transferring from - this.parentIssuerName = 'new-parent-issuer'; // issuer where cross-signing action will begin - this.intMountPath = `intermediate-mount-${uuidv4()}`; // first input box in cross-signing page - this.intIssuerName = 'my-intermediate-issuer'; // second input box in cross-signing page - this.newlySignedIssuer = 'my-newly-signed-int'; // third input - await enablePage.enable('pki', this.parentMountPath); - await enablePage.enable('pki', this.intMountPath); - - await runCommands([ - `write "${this.parentMountPath}/root/generate/internal" common_name="Long-Lived Root X1" ttl=8960h issuer_name="${this.oldParentIssuerName}"`, - `write "${this.parentMountPath}/root/generate/internal" common_name="Long-Lived Root X2" ttl=8960h issuer_name="${this.parentIssuerName}"`, - `write "${this.parentMountPath}/config/issuers" default="${this.parentIssuerName}"`, - ]); - }); - - hooks.afterEach(async function () { - // Cleanup engine - await runCommands([`delete sys/mounts/${this.intMountPath}`]); - await runCommands([`delete sys/mounts/${this.parentMountPath}`]); - }); - - test('it cross-signs an issuer', async function (assert) { - // configure parent and intermediate mounts to make them cross-signable - await visit(`/vault/secrets/${this.intMountPath}/pki/configuration/create`); - await click(SELECTORS.configure.optionByKey('generate-csr')); - await fillIn(SELECTORS.inputByName('type'), 'internal'); - await fillIn(SELECTORS.inputByName('commonName'), 'Short-Lived Int R1'); - await click('[data-test-save]'); - const csr = find(SELECTORS.copyButton('CSR')).getAttribute('data-clipboard-text'); - await visit(`vault/secrets/${this.parentMountPath}/pki/issuers/${this.oldParentIssuerName}/sign`); - await fillIn(SELECTORS.inputByName('csr'), csr); - await fillIn(SELECTORS.inputByName('format'), 'pem_bundle'); - await click('[data-test-pki-sign-intermediate-save]'); - const pemBundle = find(SELECTORS.copyButton('CA Chain')) - .getAttribute('data-clipboard-text') - .replace(/,/, '\n'); - await visit(`vault/secrets/${this.intMountPath}/pki/configuration/create`); - await click(SELECTORS.configure.optionByKey('import')); - await click('[data-test-text-toggle]'); - await fillIn('[data-test-text-file-textarea]', pemBundle); - await click(SELECTORS.configure.importSubmit); - await visit(`vault/secrets/${this.intMountPath}/pki/issuers`); - await click('[data-test-is-default]'); - // name default issuer of intermediate - const oldIntIssuerId = find(SELECTORS.rowValue('Issuer ID')).innerText; - const oldIntCert = find(SELECTORS.copyButton('Certificate')).getAttribute('data-clipboard-text'); - await click(SELECTORS.details.configure); - await fillIn(SELECTORS.inputByName('issuerName'), this.intIssuerName); - await click('[data-test-save]'); - - // perform cross-sign - await visit(`vault/secrets/${this.parentMountPath}/pki/issuers/${this.parentIssuerName}/cross-sign`); - await fillIn(SELECTORS.objectListInput('intermediateMount'), this.intMountPath); - await fillIn(SELECTORS.objectListInput('intermediateIssuer'), this.intIssuerName); - await fillIn(SELECTORS.objectListInput('newCrossSignedIssuer'), this.newlySignedIssuer); - await click(SELECTORS.submitButton); - assert - .dom(`${SELECTORS.signedIssuerCol('intermediateMount')} a`) - .hasAttribute('href', `/ui/vault/secrets/${this.intMountPath}/pki/overview`); - assert - .dom(`${SELECTORS.signedIssuerCol('intermediateIssuer')} a`) - .hasAttribute('href', `/ui/vault/secrets/${this.intMountPath}/pki/issuers/${oldIntIssuerId}/details`); - - // get certificate data of newly signed issuer - await click(`${SELECTORS.signedIssuerCol('newCrossSignedIssuer')} a`); - const newIntCert = find(SELECTORS.copyButton('Certificate')).getAttribute('data-clipboard-text'); - - // verify cross-sign was accurate by creating a role to issue a leaf certificate - const myRole = 'some-role'; - await runCommands([ - `write ${this.intMountPath}/roles/${myRole} \ - issuer_ref=${this.newlySignedIssuer}\ - allow_any_name=true \ - max_ttl="720h"`, - ]); - await visit(`vault/secrets/${this.intMountPath}/pki/roles/${myRole}/generate`); - await fillIn(SELECTORS.inputByName('commonName'), 'my-leaf'); - await fillIn('[data-test-ttl-value="TTL"]', '3600'); - await click('[data-test-pki-generate-button]'); - const myLeafCert = find(SELECTORS.copyButton('Certificate')).getAttribute('data-clipboard-text'); - - // see comments in utils/parse-pki-cert.js for step-by-step explanation of of verifyCertificates method - assert.true( - await verifyCertificates(oldIntCert, newIntCert, myLeafCert), - 'the leaf certificate validates against both intermediate certificates' - ); - }); -}); +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
    +
    +
    +
    + {{yield to="header"}} +
    +
    + {{yield to="subHeader"}} +
    + + {{yield to="footer"}} +
    +
    +
    \ No newline at end of file diff --git a/ui/tests/acceptance/raft-storage-test.js b/ui/tests/acceptance/raft-storage-test.js index 35ddc099aa87..cd39ad45782d 100644 --- a/ui/tests/acceptance/raft-storage-test.js +++ b/ui/tests/acceptance/raft-storage-test.js @@ -1,74 +1,893 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import { setupApplicationTest } from 'ember-qunit'; -import { setupMirage } from 'ember-cli-mirage/test-support'; -import { click, visit } from '@ember/test-helpers'; -import authPage from 'vault/tests/pages/auth'; - -module('Acceptance | raft storage', function (hooks) { - setupApplicationTest(hooks); - setupMirage(hooks); - - hooks.beforeEach(async function () { - this.config = this.server.create('configuration', 'withRaft'); - this.server.get('/sys/internal/ui/resultant-acl', () => - this.server.create('configuration', { data: { root: true } }) - ); - this.server.get('/sys/license/features', () => ({})); - await authPage.login(); - }); - - test('it should render correct number of raft peers', async function (assert) { - assert.expect(3); - - let didRemovePeer = false; - this.server.get('/sys/storage/raft/configuration', () => { - if (didRemovePeer) { - this.config.data.config.servers.pop(); - } else { - // consider peer removed by external means (cli) after initial request - didRemovePeer = true; - } - return this.config; - }); - - await visit('/vault/storage/raft'); - assert.dom('[data-raft-row]').exists({ count: 2 }, '2 raft peers render in table'); - // leave route and return to trigger config fetch - await visit('/vault/secrets'); - await visit('/vault/storage/raft'); - const store = this.owner.lookup('service:store'); - assert.strictEqual( - store.peekAll('server').length, - 2, - 'Store contains 2 server records since remove peer was triggered externally' - ); - assert.dom('[data-raft-row]').exists({ count: 1 }, 'Only raft nodes from response are rendered'); - }); - - test('it should remove raft peer', async function (assert) { - assert.expect(3); - - this.server.get('/sys/storage/raft/configuration', () => this.config); - this.server.post('/sys/storage/raft/remove-peer', (schema, req) => { - const body = JSON.parse(req.requestBody); - assert.strictEqual( - body.server_id, - this.config.data.config.servers[1].node_id, - 'Remove peer request made with node id' - ); - return {}; - }); - - await visit('/vault/storage/raft'); - assert.dom('[data-raft-row]').exists({ count: 2 }, '2 raft peers render in table'); - await click('[data-raft-row]:nth-child(2) [data-test-popup-menu-trigger]'); - await click('[data-test-confirm-action-trigger]'); - await click('[data-test-confirm-button]'); - assert.dom('[data-raft-row]').exists({ count: 1 }, 'Raft peer successfully removed'); - }); -}); +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "os/user" + "strings" + "syscall" + + "github.com/hashicorp/cli" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/ssh" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*SSHCommand)(nil) + _ cli.CommandAutocomplete = (*SSHCommand)(nil) +) + +type SSHCommand struct { + *BaseCommand + + // Common SSH options + flagMode string + flagRole string + flagNoExec bool + flagMountPoint string + flagStrictHostKeyChecking string + flagSSHExecutable string + flagUserKnownHostsFile string + + // SSH CA Mode options + flagPublicKeyPath string + flagPrivateKeyPath string + flagHostKeyMountPoint string + flagHostKeyHostnames string + flagValidPrincipals string +} + +func (c *SSHCommand) Synopsis() string { + return "Initiate an SSH session" +} + +func (c *SSHCommand) Help() string { + helpText := ` +Usage: vault ssh [options] username@ip [ssh options] + + Establishes an SSH connection with the target machine. + + This command uses one of the SSH secrets engines to authenticate and + automatically establish an SSH connection to a host. This operation requires + that the SSH secrets engine is mounted and configured. + + SSH using the OTP mode (requires sshpass for full automation): + + $ vault ssh -mode=otp -role=my-role user@1.2.3.4 + + SSH using the CA mode: + + $ vault ssh -mode=ca -role=my-role user@1.2.3.4 + + SSH using CA mode with host key verification: + + $ vault ssh \ + -mode=ca \ + -role=my-role \ + -host-key-mount-point=host-signer \ + -host-key-hostnames=example.com \ + user@example.com + + For the full list of options and arguments, please see the documentation. + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *SSHCommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + + f := set.NewFlagSet("SSH Options") + + // TODO: doc field? + + // General + f.StringVar(&StringVar{ + Name: "mode", + Target: &c.flagMode, + Default: "", + EnvVar: "", + Completion: complete.PredictSet("ca", "dynamic", "otp"), + Usage: "Name of the authentication mode (ca, dynamic, otp).", + }) + + f.StringVar(&StringVar{ + Name: "role", + Target: &c.flagRole, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Name of the role to use to generate the key.", + }) + + f.BoolVar(&BoolVar{ + Name: "no-exec", + Target: &c.flagNoExec, + Default: false, + EnvVar: "", + Completion: complete.PredictNothing, + Usage: "Print the generated credentials, but do not establish a " + + "connection.", + }) + + f.StringVar(&StringVar{ + Name: "mount-point", + Target: &c.flagMountPoint, + Default: "ssh/", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "Mount point to the SSH secrets engine.", + }) + + f.StringVar(&StringVar{ + Name: "strict-host-key-checking", + Target: &c.flagStrictHostKeyChecking, + Default: "ask", + EnvVar: "VAULT_SSH_STRICT_HOST_KEY_CHECKING", + Completion: complete.PredictSet("ask", "no", "yes"), + Usage: "Value to use for the SSH configuration option " + + "\"StrictHostKeyChecking\".", + }) + + f.StringVar(&StringVar{ + Name: "user-known-hosts-file", + Target: &c.flagUserKnownHostsFile, + Default: "", + EnvVar: "VAULT_SSH_USER_KNOWN_HOSTS_FILE", + Completion: complete.PredictFiles("*"), + Usage: "Value to use for the SSH configuration option " + + "\"UserKnownHostsFile\".", + }) + + // SSH CA + f = set.NewFlagSet("CA Mode Options") + + f.StringVar(&StringVar{ + Name: "public-key-path", + Target: &c.flagPublicKeyPath, + Default: "~/.ssh/id_rsa.pub", + EnvVar: "", + Completion: complete.PredictFiles("*"), + Usage: "Path to the SSH public key to send to Vault for signing.", + }) + + f.StringVar(&StringVar{ + Name: "private-key-path", + Target: &c.flagPrivateKeyPath, + Default: "~/.ssh/id_rsa", + EnvVar: "", + Completion: complete.PredictFiles("*"), + Usage: "Path to the SSH private key to use for authentication. This must " + + "be the corresponding private key to -public-key-path.", + }) + + f.StringVar(&StringVar{ + Name: "host-key-mount-point", + Target: &c.flagHostKeyMountPoint, + Default: "", + EnvVar: "VAULT_SSH_HOST_KEY_MOUNT_POINT", + Completion: complete.PredictAnything, + Usage: "Mount point to the SSH secrets engine where host keys are signed. " + + "When given a value, Vault will generate a custom \"known_hosts\" file " + + "with delegation to the CA at the provided mount point to verify the " + + "SSH connection's host keys against the provided CA. By default, host " + + "keys are validated against the user's local \"known_hosts\" file. " + + "This flag forces strict key host checking and ignores a custom user " + + "known hosts file.", + }) + + f.StringVar(&StringVar{ + Name: "host-key-hostnames", + Target: &c.flagHostKeyHostnames, + Default: "*", + EnvVar: "VAULT_SSH_HOST_KEY_HOSTNAMES", + Completion: complete.PredictAnything, + Usage: "List of hostnames to delegate for the CA. The default value " + + "allows all domains and IPs. This is specified as a comma-separated " + + "list of values.", + }) + + f.StringVar(&StringVar{ + Name: "valid-principals", + Target: &c.flagValidPrincipals, + Default: "", + EnvVar: "", + Completion: complete.PredictAnything, + Usage: "List of valid principal names to include in the generated " + + "user certificate. This is specified as a comma-separated list of values.", + }) + + f.StringVar(&StringVar{ + Name: "ssh-executable", + Target: &c.flagSSHExecutable, + Default: "ssh", + EnvVar: "VAULT_SSH_EXECUTABLE", + Completion: complete.PredictAnything, + Usage: "Path to the SSH executable to use when connecting to the host", + }) + + return set +} + +func (c *SSHCommand) AutocompleteArgs() complete.Predictor { + return nil +} + +func (c *SSHCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +// Structure to hold the fields returned when asked for a credential from SSH +// secrets engine. +type SSHCredentialResp struct { + KeyType string `mapstructure:"key_type"` + Key string `mapstructure:"key"` + Username string `mapstructure:"username"` + IP string `mapstructure:"ip"` + Port string `mapstructure:"port"` +} + +func (c *SSHCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args, DisableDisplayFlagWarning(true)); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Use homedir to expand any relative paths such as ~/.ssh + c.flagUserKnownHostsFile = expandPath(c.flagUserKnownHostsFile) + c.flagPublicKeyPath = expandPath(c.flagPublicKeyPath) + c.flagPrivateKeyPath = expandPath(c.flagPrivateKeyPath) + + args = f.Args() + if len(args) < 1 { + c.UI.Error(fmt.Sprintf("Not enough arguments, (expected 1-n, got %d)", len(args))) + return 1 + } + + // Extract the hostname, username and port from the ssh command + hostname, username, port, err := c.parseSSHCommand(args) + if err != nil { + c.UI.Error(fmt.Sprintf("Error parsing the ssh command: %q", err)) + return 1 + } + + // Use the current user if no user was specified in the ssh command + if username == "" { + u, err := user.Current() + if err != nil { + c.UI.Error(fmt.Sprintf("Error getting the current user: %q", err)) + return 1 + } + username = u.Username + } + + ip, err := c.resolveHostname(hostname) + if err != nil { + c.UI.Error(fmt.Sprintf("Error resolving the ssh hostname: %q", err)) + return 1 + } + + // Set the client in the command + _, err = c.Client() + if err != nil { + c.UI.Error(err.Error()) + return 1 + } + + // Credentials are generated only against a registered role. If user + // does not specify a role with the SSH command, then lookup API is used + // to fetch all the roles with which this IP is associated. If there is + // only one role associated with it, use it to establish the connection. + // + // TODO: remove in 0.9.0, convert to validation error + if c.flagRole == "" { + c.UI.Warn(wrapAtLength( + "WARNING: No -role specified. Use -role to tell Vault which ssh role " + + "to use for authentication. In the future, you will need to tell " + + "Vault which role to use. For now, Vault will attempt to guess based " + + "on the API response. This will be removed in the Vault 1.1.")) + + role, err := c.defaultRole(c.flagMountPoint, ip) + if err != nil { + c.UI.Error(fmt.Sprintf("Error choosing role: %v", err)) + return 1 + } + // Print the default role chosen so that user knows the role name + // if something doesn't work. If the role chosen is not allowed to + // be used by the user (ACL enforcement), then user should see an + // error message accordingly. + c.UI.Output(fmt.Sprintf("Vault SSH: Role: %q", role)) + c.flagRole = role + } + + // If no mode was given, perform the old-school lookup. Keep this now for + // backwards-compatibility, but print a warning. + // + // TODO: remove in 0.9.0, convert to validation error + if c.flagMode == "" { + c.UI.Warn(wrapAtLength( + "WARNING: No -mode specified. Use -mode to tell Vault which ssh " + + "authentication mode to use. In the future, you will need to tell " + + "Vault which mode to use. For now, Vault will attempt to guess based " + + "on the API response. This guess involves creating a temporary " + + "credential, reading its type, and then revoking it. To reduce the " + + "number of API calls and surface area, specify -mode directly. This " + + "will be removed in Vault 1.1.")) + secret, cred, err := c.generateCredential(username, ip) + if err != nil { + // This is _very_ hacky, but is the only sane backwards-compatible way + // to do this. If the error is "key type unknown", we just assume the + // type is "ca". In the future, mode will be required as an option. + if strings.Contains(err.Error(), "key type unknown") { + c.flagMode = ssh.KeyTypeCA + } else { + c.UI.Error(fmt.Sprintf("Error getting credential: %s", err)) + return 1 + } + } else { + c.flagMode = cred.KeyType + } + + // Revoke the secret, since the child functions will generate their own + // credential. Users wishing to avoid this should specify -mode. + if secret != nil { + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + c.UI.Warn(fmt.Sprintf("Failed to revoke temporary key: %s", err)) + } + } + } + + switch strings.ToLower(c.flagMode) { + case ssh.KeyTypeCA: + return c.handleTypeCA(username, ip, port, args) + case ssh.KeyTypeOTP: + return c.handleTypeOTP(username, ip, port, args) + case ssh.KeyTypeDynamic: + return c.handleTypeDynamic(username, ip, port, args) + default: + c.UI.Error(fmt.Sprintf("Unknown SSH mode: %s", c.flagMode)) + return 1 + } +} + +// handleTypeCA is used to handle SSH logins using the "CA" key type. +func (c *SSHCommand) handleTypeCA(username, ip, port string, sshArgs []string) int { + // Read the key from disk + publicKey, err := ioutil.ReadFile(c.flagPublicKeyPath) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to read public key %s: %s", + c.flagPublicKeyPath, err)) + return 1 + } + + sshClient := c.client.SSHWithMountPoint(c.flagMountPoint) + + principals := username + if c.flagValidPrincipals != "" { + principals = c.flagValidPrincipals + } + + // Attempt to sign the public key + secret, err := sshClient.SignKey(c.flagRole, map[string]interface{}{ + // WARNING: publicKey is []byte, which is b64 encoded on JSON upload. We + // have to convert it to a string. SV lost many hours to this... + "public_key": string(publicKey), + "valid_principals": principals, + "cert_type": "user", + + // TODO: let the user configure these. In the interim, if users want to + // customize these values, they can produce the key themselves. + "extensions": map[string]string{ + "permit-X11-forwarding": "", + "permit-agent-forwarding": "", + "permit-port-forwarding": "", + "permit-pty": "", + "permit-user-rc": "", + }, + }) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to sign public key %s: %s", + c.flagPublicKeyPath, err)) + return 2 + } + if secret == nil || secret.Data == nil { + c.UI.Error("missing signed key") + return 2 + } + + // Handle no-exec + if c.flagNoExec { + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + return OutputSecret(c.UI, secret) + } + + // Extract public key + key, ok := secret.Data["signed_key"].(string) + if !ok || key == "" { + c.UI.Error("signed key is empty") + return 2 + } + + // Capture the current value - this could be overwritten later if the user + // enabled host key signing verification. + userKnownHostsFile := c.flagUserKnownHostsFile + strictHostKeyChecking := c.flagStrictHostKeyChecking + + // Handle host key signing verification. If the user specified a mount point, + // download the public key, trust it with the given domains, and use that + // instead of the user's regular known_hosts file. + if c.flagHostKeyMountPoint != "" { + secret, err := c.client.Logical().Read(c.flagHostKeyMountPoint + "/config/ca") + if err != nil { + c.UI.Error(fmt.Sprintf("failed to get host signing key: %s", err)) + return 2 + } + if secret == nil || secret.Data == nil { + c.UI.Error("missing host signing key") + return 2 + } + publicKey, ok := secret.Data["public_key"].(string) + if !ok || publicKey == "" { + c.UI.Error("host signing key is empty") + return 2 + } + + // Write the known_hosts file + name := fmt.Sprintf("vault_ssh_ca_known_hosts_%s_%s", username, ip) + data := fmt.Sprintf("@cert-authority %s %s", c.flagHostKeyHostnames, publicKey) + knownHosts, err, closer := c.writeTemporaryFile(name, []byte(data), 0o644) + defer closer() + if err != nil { + c.UI.Error(fmt.Sprintf("failed to write host public key: %s", err)) + return 1 + } + + // Update the variables + userKnownHostsFile = knownHosts + strictHostKeyChecking = "yes" + } + + // Write the signed public key to disk + name := fmt.Sprintf("vault_ssh_ca_%s_%s", username, ip) + signedPublicKeyPath, err, closer := c.writeTemporaryKey(name, []byte(key)) + defer closer() + if err != nil { + c.UI.Error(fmt.Sprintf("failed to write signed public key: %s", err)) + return 2 + } + + args := []string{ + "-i", c.flagPrivateKeyPath, + "-i", signedPublicKeyPath, + "-o StrictHostKeyChecking=" + strictHostKeyChecking, + } + + if userKnownHostsFile != "" { + args = append(args, + "-o UserKnownHostsFile="+userKnownHostsFile, + ) + } + + // Add extra user defined ssh arguments + args = append(args, sshArgs...) + + cmd := exec.Command(c.flagSSHExecutable, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + exitCode := 2 + + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.Success() { + return 0 + } + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + exitCode = ws.ExitStatus() + } + } + + c.UI.Error(fmt.Sprintf("failed to run ssh command: %s", err)) + return exitCode + } + + // There is no secret to revoke, since it's a certificate signing + return 0 +} + +// handleTypeOTP is used to handle SSH logins using the "otp" key type. +func (c *SSHCommand) handleTypeOTP(username, ip, port string, sshArgs []string) int { + secret, cred, err := c.generateCredential(username, ip) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to generate credential: %s", err)) + return 2 + } + + // Handle no-exec + if c.flagNoExec { + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + return OutputSecret(c.UI, secret) + } + + var cmd *exec.Cmd + + // Check if the application 'sshpass' is installed in the client machine. If + // it is then, use it to automate typing in OTP to the prompt. Unfortunately, + // it was not possible to automate it without a third-party application, with + // only the Go libraries. Feel free to try and remove this dependency. + args := make([]string, 0) + env := os.Environ() + sshCmd := c.flagSSHExecutable + + sshpassPath, err := exec.LookPath("sshpass") + if err != nil { + // No sshpass available so using normal ssh client + c.UI.Warn(wrapAtLength( + "Vault could not locate \"sshpass\". The OTP code for the session is " + + "displayed below. Enter this code in the SSH password prompt. If you " + + "install sshpass, Vault can automatically perform this step for you.")) + c.UI.Output("OTP for the session is: " + cred.Key) + } else { + // sshpass is available so lets use it instead + sshCmd = sshpassPath + args = append(args, + "-e", // Read password for SSHPASS environment variable + c.flagSSHExecutable, + ) + env = append(env, fmt.Sprintf("SSHPASS=%s", string(cred.Key))) + } + + // Only harcode the knownhostsfile path if it has been set + if c.flagUserKnownHostsFile != "" { + args = append(args, + "-o UserKnownHostsFile="+c.flagUserKnownHostsFile, + ) + } + + // If a port wasn't specified in the ssh arguments lets use the port we got back from vault + if port == "" { + args = append(args, "-p", cred.Port) + } + + args = append(args, + "-o StrictHostKeyChecking="+c.flagStrictHostKeyChecking, + ) + + // Add the rest of the ssh args appended by the user + args = append(args, sshArgs...) + + cmd = exec.Command(sshCmd, args...) + cmd.Env = env + + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + exitCode := 2 + + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.Success() { + return 0 + } + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + exitCode = ws.ExitStatus() + } + } + + c.UI.Error(fmt.Sprintf("failed to run ssh command: %s", err)) + return exitCode + } + + // Revoke the key if it's longer than expected + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + c.UI.Error(fmt.Sprintf("failed to revoke key: %s", err)) + return 2 + } + + return 0 +} + +// handleTypeDynamic is used to handle SSH logins using the "dyanmic" key type. +func (c *SSHCommand) handleTypeDynamic(username, ip, port string, sshArgs []string) int { + // Generate the credential + secret, cred, err := c.generateCredential(username, ip) + if err != nil { + c.UI.Error(fmt.Sprintf("failed to generate credential: %s", err)) + return 2 + } + + // Handle no-exec + if c.flagNoExec { + if c.flagField != "" { + return PrintRawField(c.UI, secret, c.flagField) + } + return OutputSecret(c.UI, secret) + } + + // Write the dynamic key to disk + name := fmt.Sprintf("vault_ssh_dynamic_%s_%s", username, ip) + keyPath, err, closer := c.writeTemporaryKey(name, []byte(cred.Key)) + defer closer() + if err != nil { + c.UI.Error(fmt.Sprintf("failed to write dynamic key: %s", err)) + return 1 + } + + args := make([]string, 0) + // If a port wasn't specified in the ssh arguments lets use the port we got back from vault + if port == "" { + args = append(args, "-p", cred.Port) + } + + args = append(args, + "-i", keyPath, + "-o UserKnownHostsFile="+c.flagUserKnownHostsFile, + "-o StrictHostKeyChecking="+c.flagStrictHostKeyChecking, + ) + + // Add extra user defined ssh arguments + args = append(args, sshArgs...) + + cmd := exec.Command(c.flagSSHExecutable, args...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err = cmd.Run() + if err != nil { + exitCode := 2 + + if exitError, ok := err.(*exec.ExitError); ok { + if exitError.Success() { + return 0 + } + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + exitCode = ws.ExitStatus() + } + } + + c.UI.Error(fmt.Sprintf("failed to run ssh command: %s", err)) + return exitCode + } + + // Revoke the key if it's longer than expected + if err := c.client.Sys().Revoke(secret.LeaseID); err != nil { + c.UI.Error(fmt.Sprintf("failed to revoke key: %s", err)) + return 2 + } + + return 0 +} + +// generateCredential generates a credential for the given role and returns the +// decoded secret data. +func (c *SSHCommand) generateCredential(username, ip string) (*api.Secret, *SSHCredentialResp, error) { + sshClient := c.client.SSHWithMountPoint(c.flagMountPoint) + + // Attempt to generate the credential. + secret, err := sshClient.Credential(c.flagRole, map[string]interface{}{ + "username": username, + "ip": ip, + }) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to get credentials") + } + if secret == nil || secret.Data == nil { + return nil, nil, fmt.Errorf("vault returned empty credentials") + } + + // Port comes back as a json.Number which mapstructure doesn't like, so + // convert it + if d, ok := secret.Data["port"].(json.Number); ok { + secret.Data["port"] = d.String() + } + + // Use mapstructure to decode the response + var resp SSHCredentialResp + if err := mapstructure.Decode(secret.Data, &resp); err != nil { + return nil, nil, errors.Wrap(err, "failed to decode credential") + } + + // Check for an empty key response + if len(resp.Key) == 0 { + return nil, nil, fmt.Errorf("vault returned an invalid key") + } + + return secret, &resp, nil +} + +// writeTemporaryFile writes a file to a temp location with the given data and +// file permissions. +func (c *SSHCommand) writeTemporaryFile(name string, data []byte, perms os.FileMode) (string, error, func() error) { + // default closer to prevent panic + closer := func() error { return nil } + + f, err := ioutil.TempFile("", name) + if err != nil { + return "", errors.Wrap(err, "creating temporary file"), closer + } + + closer = func() error { return os.Remove(f.Name()) } + + if err := ioutil.WriteFile(f.Name(), data, perms); err != nil { + return "", errors.Wrap(err, "writing temporary key"), closer + } + + if err := f.Close(); err != nil { + return "", errors.Wrap(err, "closing temporary key"), closer + } + + return f.Name(), nil, closer +} + +// writeTemporaryKey writes the key to a temporary file and returns the path. +// The caller should defer the closer to cleanup the key. +func (c *SSHCommand) writeTemporaryKey(name string, data []byte) (string, error, func() error) { + return c.writeTemporaryFile(name, data, 0o600) +} + +// If user did not provide the role with which SSH connection has +// to be established and if there is only one role associated with +// the IP, it is used by default. +func (c *SSHCommand) defaultRole(mountPoint, ip string) (string, error) { + data := map[string]interface{}{ + "ip": ip, + } + secret, err := c.client.Logical().Write(mountPoint+"/lookup", data) + if err != nil { + return "", fmt.Errorf("error finding roles for IP %q: %w", ip, err) + } + if secret == nil || secret.Data == nil { + return "", fmt.Errorf("error finding roles for IP %q: %w", ip, err) + } + + if secret.Data["roles"] == nil { + return "", fmt.Errorf("no matching roles found for IP %q", ip) + } + + if len(secret.Data["roles"].([]interface{})) == 1 { + return secret.Data["roles"].([]interface{})[0].(string), nil + } else { + var roleNames string + for _, item := range secret.Data["roles"].([]interface{}) { + roleNames += item.(string) + ", " + } + roleNames = strings.TrimRight(roleNames, ", ") + return "", fmt.Errorf("Roles: %q. "+` + Multiple roles are registered for this IP. + Select a role using '-role' option. + Note that all roles may not be permitted, based on ACLs.`, roleNames) + } +} + +func (c *SSHCommand) isSingleSSHArg(arg string) bool { + // list of single SSH arguments is taken from + // https://github.com/openssh/openssh-portable/blob/28013759f09ed3ebf7e8335e83a62936bd7a7f47/ssh.c#L204 + singleArgs := []string{ + "4", "6", "A", "a", "C", "f", "G", "g", "K", "k", "M", "N", "n", "q", + "s", "T", "t", "V", "v", "X", "x", "Y", "y", + } + + // We want to get the first character after the dash. This is so args like -vvv are picked up as just being -v + flag := string(arg[1]) + + for _, a := range singleArgs { + if flag == a { + return true + } + } + return false +} + +// Finds the hostname, username (optional) and port (optional) from any valid ssh command +// Supports usrname@hostname but also specifying valid ssh flags like -o User=username, +// -o Port=2222 and -p 2222 anywhere in the command +func (c *SSHCommand) parseSSHCommand(args []string) (hostname string, username string, port string, err error) { + lastArg := "" + for _, i := range args { + arg := lastArg + lastArg = "" + + // If -p has been specified then this is our ssh port + if arg == "-p" { + port = i + continue + } + + // this is an ssh option, lets see if User or Port have been set and use it + if arg == "-o" { + split := strings.Split(i, "=") + key := split[0] + // Incase the value contains = signs we want to get all of them + value := strings.Join(split[1:], " ") + + if key == "User" { + // Don't overwrite the user if it is already set by username@hostname + // This matches the behaviour for how regular ssh reponds when both are specified + if username == "" { + username = value + } + } + + if key == "Port" { + // Don't overwrite the port if it is already set by -p + // This matches the behaviour for how regular ssh reponds when both are specified + if port == "" { + port = value + } + } + continue + } + + // This isn't an ssh argument that we care about. Lets keep on parsing the command + if arg != "" { + continue + } + + // If this is an ssh argument with a value we want to look at it in the next loop + if strings.HasPrefix(i, "-") { + // If this isn't a single SSH arg we want to store the flag to we can look at the value next loop + if !c.isSingleSSHArg(i) { + lastArg = i + } + continue + } + + // If we have gotten this far it means this is a bare argument + // The first bare argument is the hostname + // The second bare argument is the command to run on the remote host + + // If the hostname hasn't been set yet than it means we have found the first bare argument + if hostname == "" { + if strings.Contains(i, "@") { + split := strings.Split(i, "@") + username = split[0] + hostname = split[1] + } else { + hostname = i + } + continue + } else { + // The second bare argument is the command to run on the remote host. + // We need to break out and stop parsing arguments now + break + } + + } + if hostname == "" { + return "", "", "", errors.Wrap( + err, + fmt.Sprintf("failed to find a hostname in ssh command %q", strings.Join(args, " ")), + ) + } + return hostname, username, port, nil +} + +func (c *SSHCommand) resolveHostname(hostname string) (ip string, err error) { + // Resolving domain names to IP address on the client side. + // Vault only deals with IP addresses. + ipAddr, err := net.ResolveIPAddr("ip", hostname) + if err != nil { + return "", errors.Wrap(err, "failed to resolve IP address") + } + ip = ipAddr.String() + return ip, nil +} diff --git a/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-edge-cases-test.js b/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-edge-cases-test.js index c92914102ba9..b6dfd563d242 100644 --- a/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-edge-cases-test.js +++ b/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-edge-cases-test.js @@ -1,454 +1,235 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import { v4 as uuidv4 } from 'uuid'; -import { click, currentURL, fillIn, findAll, setupOnerror, typeIn, visit } from '@ember/test-helpers'; -import { setupApplicationTest } from 'vault/tests/helpers'; -import authPage from 'vault/tests/pages/auth'; -import { - createPolicyCmd, - deleteEngineCmd, - mountEngineCmd, - runCmd, - createTokenCmd, -} from 'vault/tests/helpers/commands'; -import { - dataPolicy, - deleteVersionsPolicy, - destroyVersionsPolicy, - metadataListPolicy, - metadataPolicy, -} from 'vault/tests/helpers/policy-generator/kv'; -import { clearRecords, writeSecret, writeVersionedSecret } from 'vault/tests/helpers/kv/kv-run-commands'; -import { FORM, PAGE } from 'vault/tests/helpers/kv/kv-selectors'; - -/** - * This test set is for testing edge cases, such as specific bug fixes or reported user workflows - */ -module('Acceptance | kv-v2 workflow | edge cases', function (hooks) { - setupApplicationTest(hooks); - - hooks.beforeEach(async function () { - const uid = uuidv4(); - this.backend = `kv-edge-${uid}`; - this.rootSecret = 'root-directory'; - this.fullSecretPath = `${this.rootSecret}/nested/child-secret`; - await authPage.login(); - await runCmd(mountEngineCmd('kv-v2', this.backend), false); - await writeSecret(this.backend, this.fullSecretPath, 'foo', 'bar'); - await writeSecret(this.backend, 'edge/one', 'foo', 'bar'); - await writeSecret(this.backend, 'edge/two', 'foo', 'bar'); - return; - }); - - hooks.afterEach(async function () { - await authPage.login(); - await runCmd(deleteEngineCmd(this.backend)); - return; - }); - - module('persona with read and list access on the secret level', function (hooks) { - // see github issue for more details https://github.com/hashicorp/vault/issues/5362 - hooks.beforeEach(async function () { - const secretPath = `${this.rootSecret}/*`; // user has LIST and READ access within this root secret directory - const capabilities = ['list', 'read']; - const backend = this.backend; - const token = await runCmd([ - createPolicyCmd( - 'nested-secret-list-reader', - metadataPolicy({ backend, secretPath, capabilities }) + - dataPolicy({ backend, secretPath, capabilities }) - ), - createTokenCmd('nested-secret-list-reader'), - ]); - await authPage.login(token); - }); - - test('it can navigate to secrets within a secret directory', async function (assert) { - assert.expect(21); - const backend = this.backend; - const [root, subdirectory, secret] = this.fullSecretPath.split('/'); - - await visit(`/vault/secrets/${backend}/kv/list`); - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list`, 'lands on secrets list page'); - - await typeIn(PAGE.list.overviewInput, `${root}/no-access/`); - assert - .dom(PAGE.list.overviewButton) - .hasText('View list', 'shows list and not secret because search is a directory'); - await click(PAGE.list.overviewButton); - assert.dom(PAGE.emptyStateTitle).hasText(`There are no secrets matching "${root}/no-access/".`); - - await visit(`/vault/secrets/${backend}/kv/list`); - await typeIn(PAGE.list.overviewInput, `${root}/`); // add slash because this is a directory - await click(PAGE.list.overviewButton); - - // URL correct - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list/${root}/`, - 'visits list-directory of root' - ); - - // Title correct - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - // Tabs correct - assert.dom(PAGE.secretTab('Secrets')).hasText('Secrets'); - assert.dom(PAGE.secretTab('Secrets')).hasClass('active'); - assert.dom(PAGE.secretTab('Configuration')).hasText('Configuration'); - assert.dom(PAGE.secretTab('Configuration')).doesNotHaveClass('active'); - // Toolbar correct - assert.dom(PAGE.toolbarAction).exists({ count: 1 }, 'toolbar only renders create secret action'); - assert.dom(PAGE.list.filter).hasValue(`${root}/`); - // List content correct - assert.dom(PAGE.list.item(`${subdirectory}/`)).exists('renders linked block for subdirectory'); - await click(PAGE.list.item(`${subdirectory}/`)); - assert.dom(PAGE.list.item(secret)).exists('renders linked block for child secret'); - await click(PAGE.list.item(secret)); - // Secret details visible - assert.dom(PAGE.title).hasText(this.fullSecretPath); - assert.dom(PAGE.secretTab('Secret')).hasText('Secret'); - assert.dom(PAGE.secretTab('Secret')).hasClass('active'); - assert.dom(PAGE.secretTab('Metadata')).hasText('Metadata'); - assert.dom(PAGE.secretTab('Metadata')).doesNotHaveClass('active'); - assert.dom(PAGE.secretTab('Version History')).hasText('Version History'); - assert.dom(PAGE.secretTab('Version History')).doesNotHaveClass('active'); - assert.dom(PAGE.toolbarAction).exists({ count: 5 }, 'toolbar renders all actions'); - }); - - test('it navigates back to engine index route via breadcrumbs from secret details', async function (assert) { - assert.expect(6); - const backend = this.backend; - const [root, subdirectory, secret] = this.fullSecretPath.split('/'); - - await visit(`vault/secrets/${backend}/kv/${encodeURIComponent(this.fullSecretPath)}/details?version=1`); - // navigate back through crumbs - let previousCrumb = findAll('[data-test-breadcrumbs] li').length - 2; - await click(PAGE.breadcrumbAtIdx(previousCrumb)); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list/${root}/${subdirectory}/`, - 'goes back to subdirectory list' - ); - assert.dom(PAGE.list.filter).hasValue(`${root}/${subdirectory}/`); - assert.dom(PAGE.list.item(secret)).exists('renders linked block for child secret'); - - // back again - previousCrumb = findAll('[data-test-breadcrumbs] li').length - 2; - await click(PAGE.breadcrumbAtIdx(previousCrumb)); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list/${root}/`, - 'goes back to root directory' - ); - assert.dom(PAGE.list.item(`${subdirectory}/`)).exists('renders linked block for subdirectory'); - - // and back to the engine list view - previousCrumb = findAll('[data-test-breadcrumbs] li').length - 2; - await click(PAGE.breadcrumbAtIdx(previousCrumb)); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list`, - 'navigates back to engine list from crumbs' - ); - }); - - test('it handles errors when attempting to view details of a secret that is a directory', async function (assert) { - assert.expect(7); - const backend = this.backend; - const [root, subdirectory] = this.fullSecretPath.split('/'); - setupOnerror((error) => assert.strictEqual(error.httpStatus, 404), '404 error is thrown'); // catches error so qunit test doesn't fail - - await visit(`/vault/secrets/${backend}/kv/list`); - await typeIn(PAGE.list.overviewInput, `${root}/${subdirectory}`); // intentionally leave out trailing slash - await click(PAGE.list.overviewButton); - assert.dom(PAGE.error.title).hasText('404 Not Found'); - assert - .dom(PAGE.error.message) - .hasText(`Sorry, we were unable to find any content at /v1/${backend}/data/${root}/${subdirectory}.`); - - assert.dom(PAGE.breadcrumbAtIdx(0)).hasText('secrets'); - assert.dom(PAGE.breadcrumbAtIdx(1)).hasText(backend); - assert.dom(PAGE.secretTab('Secrets')).doesNotHaveClass('is-active'); - assert.dom(PAGE.secretTab('Configuration')).doesNotHaveClass('is-active'); - }); - }); - - module('destruction without read', function (hooks) { - hooks.beforeEach(async function () { - const backend = this.backend; - const testSecrets = [ - 'data-delete-only', - 'delete-version-only', - 'destroy-version-only', - 'destroy-metadata-only', - ]; - - // user has different permissions for each secret path - const token = await runCmd([ - createPolicyCmd( - 'destruction-no-read', - dataPolicy({ backend, secretPath: 'data-delete-only', capabilities: ['delete'] }) + - deleteVersionsPolicy({ backend, secretPath: 'delete-version-only' }) + - destroyVersionsPolicy({ backend, secretPath: 'destroy-version-only' }) + - metadataPolicy({ backend, secretPath: 'destroy-metadata-only', capabilities: ['delete'] }) + - metadataListPolicy(backend) - ), - createTokenCmd('destruction-no-read'), - ]); - for (const secret of testSecrets) { - await writeVersionedSecret(backend, secret, 'foo', 'bar', 2); - } - await authPage.login(token); - }); - - test('it renders the delete action and disables delete this version option', async function (assert) { - assert.expect(4); - const testSecret = 'data-delete-only'; - await visit(`/vault/secrets/${this.backend}/kv/${testSecret}/details`); - - assert.dom(PAGE.detail.delete).exists('renders delete button'); - await click(PAGE.detail.delete); - assert - .dom(PAGE.detail.deleteModal) - .hasTextContaining('Delete this version This deletes a specific version of the secret'); - assert.dom(PAGE.detail.deleteOption).isDisabled('disables version specific option'); - assert.dom(PAGE.detail.deleteOptionLatest).isEnabled('enables version specific option'); - }); - - test('it renders the delete action and disables delete latest version option', async function (assert) { - assert.expect(4); - const testSecret = 'delete-version-only'; - await visit(`/vault/secrets/${this.backend}/kv/${testSecret}/details`); - - assert.dom(PAGE.detail.delete).exists('renders delete button'); - await click(PAGE.detail.delete); - assert - .dom(PAGE.detail.deleteModal) - .hasTextContaining('Delete this version This deletes a specific version of the secret'); - - assert.dom(PAGE.detail.deleteOption).isEnabled('enables version specific option'); - assert.dom(PAGE.detail.deleteOptionLatest).isDisabled('disables version specific option'); - }); - - test('it hides destroy option without version number', async function (assert) { - assert.expect(1); - const testSecret = 'destroy-version-only'; - await visit(`/vault/secrets/${this.backend}/kv/${testSecret}/details`); - - assert.dom(PAGE.detail.destroy).doesNotExist(); - }); - - test('it renders the destroy metadata action and expected modal copy', async function (assert) { - assert.expect(2); - - const testSecret = 'destroy-metadata-only'; - await visit(`/vault/secrets/${this.backend}/kv/${testSecret}/metadata`); - assert.dom(PAGE.metadata.deleteMetadata).exists('renders delete metadata button'); - await click(PAGE.metadata.deleteMetadata); - assert - .dom(PAGE.detail.deleteModal) - .hasText( - 'Delete metadata and secret data? This will permanently delete the metadata and versions of the secret. All version history will be removed. This cannot be undone. Confirm Cancel' - ); - }); - }); - - test('no ghost item after editing metadata', async function (assert) { - await visit(`/vault/secrets/${this.backend}/kv/list/edge/`); - assert.dom(PAGE.list.item()).exists({ count: 2 }, 'two secrets are listed'); - await click(PAGE.list.item('two')); - await click(PAGE.secretTab('Metadata')); - await click(PAGE.metadata.editBtn); - await fillIn(FORM.keyInput(), 'foo'); - await fillIn(FORM.valueInput(), 'bar'); - await click(FORM.saveBtn); - await click(PAGE.breadcrumbAtIdx(2)); - assert.dom(PAGE.list.item()).exists({ count: 2 }, 'two secrets are listed'); - }); -}); - -// NAMESPACE TESTS -module('Acceptance | Enterprise | kv-v2 workflow | edge cases', function (hooks) { - setupApplicationTest(hooks); - - const navToEngine = async (backend) => { - await click('[data-test-sidebar-nav-link="Secrets Engines"]'); - return await click(PAGE.backends.link(backend)); - }; - - const assertDeleteActions = (assert, expected = ['delete', 'destroy']) => { - ['delete', 'destroy', 'undelete'].forEach((toolbar) => { - if (expected.includes(toolbar)) { - assert.dom(PAGE.detail[toolbar]).exists(`${toolbar} toolbar action exists`); - } else { - assert.dom(PAGE.detail[toolbar]).doesNotExist(`${toolbar} toolbar action not rendered`); - } - }); - }; - - const assertVersionDropdown = async (assert, deleted = [], versions = [2, 1]) => { - assert.dom(PAGE.detail.versionDropdown).hasText(`Version ${versions[0]}`); - await click(PAGE.detail.versionDropdown); - versions.forEach((num) => { - assert.dom(PAGE.detail.version(num)).exists(`renders version ${num} link in dropdown`); - }); - // also asserts destroyed icon - deleted.forEach((num) => { - assert.dom(`${PAGE.detail.version(num)} [data-test-icon="x-square"]`); - }); - }; - - // each test uses a different secret path - hooks.beforeEach(async function () { - const uid = uuidv4(); - this.store = this.owner.lookup('service:store'); - this.backend = `kv-enterprise-edge-${uid}`; - this.namespace = `ns-${uid}`; - await authPage.login(); - await runCmd([`write sys/namespaces/${this.namespace} -force`]); - return; - }); - - hooks.afterEach(async function () { - await authPage.login(); - await runCmd([`delete /sys/auth/${this.namespace}`]); - await runCmd(deleteEngineCmd(this.backend)); - return; - }); - - module('admin persona', function (hooks) { - hooks.beforeEach(async function () { - await authPage.loginNs(this.namespace); - // mount engine within namespace - await runCmd(mountEngineCmd('kv-v2', this.backend), false); - clearRecords(this.store); - return; - }); - hooks.afterEach(async function () { - // visit logout with namespace query param because we're transitioning from within an engine - // and navigating directly to /vault/auth caused test context routing problems :( - await visit(`/vault/logout?namespace=${this.namespace}`); - await authPage.namespaceInput(''); // clear login form namespace input - }); - - test('namespace: it can create a secret and new secret version', async function (assert) { - assert.expect(15); - const backend = this.backend; - const ns = this.namespace; - const secret = 'my-create-secret'; - await navToEngine(backend); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list?namespace=${ns}`, - 'navigates to list' - ); - // Create first version of secret - await click(PAGE.list.createSecret); - await fillIn(FORM.inputByAttr('path'), secret); - assert.dom(FORM.toggleMetadata).exists('Shows metadata toggle when creating new secret'); - await fillIn(FORM.keyInput(), 'foo'); - await fillIn(FORM.maskedValueInput(), 'woahsecret'); - await click(FORM.saveBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secret}/details?namespace=${ns}&version=1`, - 'navigates to details' - ); - - // Create a new version - await click(PAGE.detail.createNewVersion); - assert.dom(FORM.inputByAttr('path')).isDisabled('path input is disabled'); - assert.dom(FORM.inputByAttr('path')).hasValue(secret); - assert.dom(FORM.toggleMetadata).doesNotExist('Does not show metadata toggle when creating new version'); - assert.dom(FORM.keyInput()).hasValue('foo'); - assert.dom(FORM.maskedValueInput()).hasValue('woahsecret'); - await fillIn(FORM.keyInput(1), 'foo-two'); - await fillIn(FORM.maskedValueInput(1), 'supersecret'); - await click(FORM.saveBtn); - - // Check details - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secret}/details?namespace=${ns}&version=2`, - 'navigates to details' - ); - await assertVersionDropdown(assert); - assert - .dom(`${PAGE.detail.version(2)} [data-test-icon="check-circle"]`) - .exists('renders current version icon'); - assert.dom(PAGE.infoRowValue('foo-two')).hasText('***********'); - await click(PAGE.infoRowToggleMasked('foo-two')); - assert.dom(PAGE.infoRowValue('foo-two')).hasText('supersecret', 'secret value shows after toggle'); - }); - - test('namespace: it manages state throughout delete, destroy and undelete operations', async function (assert) { - assert.expect(34); - const backend = this.backend; - const ns = this.namespace; - const secret = 'my-delete-secret'; - await writeVersionedSecret(backend, secret, 'foo', 'bar', 2, ns); - await navToEngine(backend); - - await click(PAGE.list.item(secret)); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secret}/details?namespace=${ns}&version=2`, - 'navigates to details' - ); - - // correct toolbar options & details show - assertDeleteActions(assert); - await assertVersionDropdown(assert); - // delete flow - await click(PAGE.detail.delete); - await click(PAGE.detail.deleteOption); - await click(PAGE.detail.deleteConfirm); - // check empty state and toolbar - assertDeleteActions(assert, ['undelete', 'destroy']); - assert - .dom(PAGE.emptyStateTitle) - .hasText('Version 2 of this secret has been deleted', 'Shows deleted message'); - assert.dom(PAGE.detail.versionTimestamp).includesText('Version 2 deleted'); - await assertVersionDropdown(assert, [2]); // important to test dropdown versions are accurate - - // navigate to sibling route to make sure empty state remains for details tab - await click(PAGE.secretTab('Version History')); - assert.dom(PAGE.versions.linkedBlock()).exists({ count: 2 }); - - // back to secret tab to confirm deleted state - await click(PAGE.secretTab('Secret')); - // if this assertion fails, the view is rendering a stale model - assert.dom(PAGE.emptyStateTitle).exists('still renders empty state!!'); - await assertVersionDropdown(assert, [2]); - - // undelete flow - await click(PAGE.detail.undelete); - // details update accordingly - assertDeleteActions(assert, ['delete', 'destroy']); - assert.dom(PAGE.infoRow).exists('shows secret data'); - assert.dom(PAGE.detail.versionTimestamp).includesText('Version 2 created'); - - // destroy flow - await click(PAGE.detail.destroy); - await click(PAGE.detail.deleteConfirm); - assertDeleteActions(assert, []); - assert - .dom(PAGE.emptyStateTitle) - .hasText('Version 2 of this secret has been permanently destroyed', 'Shows destroyed message'); - - // navigate to sibling route to make sure empty state remains for details tab - await click(PAGE.secretTab('Version History')); - assert.dom(PAGE.versions.linkedBlock()).exists({ count: 2 }); - - // back to secret tab to confirm destroyed state - await click(PAGE.secretTab('Secret')); - // if this assertion fails, the view is rendering a stale model - assert.dom(PAGE.emptyStateTitle).exists('still renders empty state!!'); - await assertVersionDropdown(assert, [2]); - }); - }); -}); +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testSSHCommand(tb testing.TB) (*cli.MockUi, *SSHCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &SSHCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestSSHCommand_Run(t *testing.T) { + t.Parallel() + t.Skip("Need a way to setup target infrastructure") +} + +func TestParseSSHCommand(t *testing.T) { + t.Parallel() + + _, cmd := testSSHCommand(t) + tests := []struct { + name string + args []string + hostname string + username string + port string + err error + }{ + { + "Parse just a hostname", + []string{ + "hostname", + }, + "hostname", + "", + "", + nil, + }, + { + "Parse the standard username@hostname", + []string{ + "username@hostname", + }, + "hostname", + "username", + "", + nil, + }, + { + "Parse the username out of -o User=username", + []string{ + "-o", "User=username", + "hostname", + }, + "hostname", + "username", + "", + nil, + }, + { + "If the username is specified with -o User=username and realname@hostname prefer realname@", + []string{ + "-o", "User=username", + "realname@hostname", + }, + "hostname", + "realname", + "", + nil, + }, + { + "Parse the port out of -o Port=2222", + []string{ + "-o", "Port=2222", + "hostname", + }, + "hostname", + "", + "2222", + nil, + }, + { + "Parse the port out of -p 2222", + []string{ + "-p", "2222", + "hostname", + }, + "hostname", + "", + "2222", + nil, + }, + { + "If port is defined with -o Port=2222 and -p 2244 prefer -p", + []string{ + "-p", "2244", + "-o", "Port=2222", + "hostname", + }, + "hostname", + "", + "2244", + nil, + }, + { + "Ssh args with a command", + []string{ + "hostname", + "command", + }, + "hostname", + "", + "", + nil, + }, + { + "Flags after the ssh command are not passed because they are part of the command", + []string{ + "username@hostname", + "command", + "-p 22", + }, + "hostname", + "username", + "", + nil, + }, + { + "Allow single args which don't have a value", + []string{ + "-v", + "hostname", + }, + "hostname", + "", + "", + nil, + }, + { + "Allow single args before and after the hostname and command", + []string{ + "-v", + "hostname", + "-v", + "command", + "-v", + }, + "hostname", + "", + "", + nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hostname, username, port, err := cmd.parseSSHCommand(test.args) + if err != test.err { + t.Errorf("got error: %q want %q", err, test.err) + } + if hostname != test.hostname { + t.Errorf("got hostname: %q want %q", hostname, test.hostname) + } + if username != test.username { + t.Errorf("got username: %q want %q", username, test.username) + } + if port != test.port { + t.Errorf("got port: %q want %q", port, test.port) + } + }) + } +} + +func TestIsSingleSSHArg(t *testing.T) { + t.Parallel() + + _, cmd := testSSHCommand(t) + tests := []struct { + name string + arg string + want bool + }{ + { + "-v is a single ssh arg", + "-v", + true, + }, + { + "-o is NOT a single ssh arg", + "-o", + false, + }, + { + "Repeated args like -vvv is still a single ssh arg", + "-vvv", + true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := cmd.isSingleSSHArg(test.arg) + if got != test.want { + t.Errorf("arg %q got %v want %v", test.arg, got, test.want) + } + }) + } +} + +// TestSSHCommandOmitFlagWarning checks if flags warning messages are printed +// in the output of the CLI command or not. If so, it will fail. +func TestSSHCommandOmitFlagWarning(t *testing.T) { + t.Parallel() + + ui, cmd := testSSHCommand(t) + + _ = cmd.Run([]string{"-mode", "ca", "-role", "otp_key_role", "user@1.2.3.4", "-extraFlag", "bug"}) + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if strings.Contains(combined, "Command flags must be provided before positional arguments. The following arguments will not be parsed as flags") { + t.Fatalf("ssh command displayed flag warnings") + } +} diff --git a/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-navigation-test.js b/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-navigation-test.js index 54327e8d350c..3ce3fb96bb3b 100644 --- a/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-navigation-test.js +++ b/ui/tests/acceptance/secrets/backend/kv/kv-v2-workflow-navigation-test.js @@ -1,1270 +1,114 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import { v4 as uuidv4 } from 'uuid'; -import { click, currentRouteName, currentURL, typeIn, visit, waitUntil } from '@ember/test-helpers'; -import { setupApplicationTest } from 'vault/tests/helpers'; -import authPage from 'vault/tests/pages/auth'; -import { - createPolicyCmd, - deleteEngineCmd, - mountEngineCmd, - runCmd, - createTokenCmd, - tokenWithPolicyCmd, -} from 'vault/tests/helpers/commands'; -import { personas } from 'vault/tests/helpers/policy-generator/kv'; -import { - addSecretMetadataCmd, - clearRecords, - writeSecret, - writeVersionedSecret, -} from 'vault/tests/helpers/kv/kv-run-commands'; -import { FORM, PAGE } from 'vault/tests/helpers/kv/kv-selectors'; -import { setupControlGroup, grantAccess } from 'vault/tests/helpers/control-groups'; - -const secretPath = `my-#:$=?-secret`; -// This doesn't encode in a normal way, so hardcoding it here until we sort that out -const secretPathUrlEncoded = `my-%23:$=%3F-secret`; -const navToBackend = async (backend) => { - await visit(`/vault/secrets`); - return click(PAGE.backends.link(backend)); -}; -const assertCorrectBreadcrumbs = (assert, expected) => { - assert.dom(PAGE.breadcrumb).exists({ count: expected.length }, 'correct number of breadcrumbs'); - const breadcrumbs = document.querySelectorAll(PAGE.breadcrumb); - expected.forEach((text, idx) => { - assert.dom(breadcrumbs[idx]).includesText(text, `position ${idx} breadcrumb includes text ${text}`); - }); -}; -const assertDetailTabs = (assert, current, hidden = []) => { - const allTabs = ['Secret', 'Metadata', 'Paths', 'Version History']; - allTabs.forEach((tab) => { - if (hidden.includes(tab)) { - assert.dom(PAGE.secretTab(tab)).doesNotExist(`${tab} tab does not render`); - return; - } - assert.dom(PAGE.secretTab(tab)).hasText(tab); - if (current === tab) { - assert.dom(PAGE.secretTab(tab)).hasClass('active'); - } else { - assert.dom(PAGE.secretTab(tab)).doesNotHaveClass('active'); - } - }); -}; -const DETAIL_TOOLBARS = ['delete', 'destroy', 'copy', 'versionDropdown', 'createNewVersion']; -const assertDetailsToolbar = (assert, expected = DETAIL_TOOLBARS) => { - assert - .dom(PAGE.toolbarAction) - .exists({ count: expected.length }, 'correct number of toolbar actions render'); - DETAIL_TOOLBARS.forEach((toolbar) => { - if (expected.includes(toolbar)) { - assert.dom(PAGE.detail[toolbar]).exists(`${toolbar} toolbar action exists`); - } else { - assert.dom(PAGE.detail[toolbar]).doesNotExist(`${toolbar} toolbar action not rendered`); - } - }); -}; - -/** - * This test set is for testing the navigation, breadcrumbs, and tabs. - * Letter(s) in parenthesis at the end are shorthand for the persona, - * for ease of tracking down specific tests failures from CI - */ -module('Acceptance | kv-v2 workflow | navigation', function (hooks) { - setupApplicationTest(hooks); - - hooks.beforeEach(async function () { - const uid = uuidv4(); - this.store = this.owner.lookup('service:store'); - this.emptyBackend = `kv-empty-${uid}`; - this.backend = `kv-nav-${uid}`; - await authPage.login(); - await runCmd(mountEngineCmd('kv-v2', this.emptyBackend), false); - await runCmd(mountEngineCmd('kv-v2', this.backend), false); - await writeSecret(this.backend, 'app/nested/secret', 'foo', 'bar'); - await writeVersionedSecret(this.backend, secretPath, 'foo', 'bar', 3); - await runCmd(addSecretMetadataCmd(this.backend, secretPath, { max_versions: 5, cas_required: true })); - return; - }); - - hooks.afterEach(async function () { - await authPage.login(); - await runCmd(deleteEngineCmd(this.backend)); - await runCmd(deleteEngineCmd(this.emptyBackend)); - return; - }); - - module('admin persona', function (hooks) { - hooks.beforeEach(async function () { - const token = await runCmd( - tokenWithPolicyCmd('admin', personas.admin(this.backend) + personas.admin(this.emptyBackend)) - ); - await authPage.login(token); - clearRecords(this.store); - return; - }); - test('empty backend - breadcrumbs, title, tabs, emptyState (a)', async function (assert) { - assert.expect(18); - const backend = this.emptyBackend; - await navToBackend(backend); - - // URL correct - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list`, 'lands on secrets list page'); - // Breadcrumbs correct - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - // Title correct - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - // Tabs correct - assert.dom(PAGE.secretTab('Secrets')).hasText('Secrets'); - assert.dom(PAGE.secretTab('Secrets')).hasClass('active'); - assert.dom(PAGE.secretTab('Configuration')).hasText('Configuration'); - assert.dom(PAGE.secretTab('Configuration')).doesNotHaveClass('active'); - // Toolbar correct - assert.dom(PAGE.toolbar).exists({ count: 1 }, 'toolbar renders'); - assert.dom(PAGE.list.filter).doesNotExist('List filter does not show because no secrets exists.'); - // Page content correct - assert.dom(PAGE.emptyStateTitle).hasText('No secrets yet'); - assert.dom(PAGE.emptyStateActions).hasText('Create secret'); - assert.dom(PAGE.list.createSecret).hasText('Create secret'); - - // Click empty state CTA - await click(`${PAGE.emptyStateActions} a`); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/create`), - `url includes /vault/secrets/${backend}/kv/create` - ); - - // Click cancel btn - await click(FORM.cancelBtn); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), - `url includes /vault/secrets/${backend}/kv/list` - ); - - // click toolbar CTA - await click(PAGE.list.createSecret); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/create`), - `url includes /vault/secrets/${backend}/kv/create` - ); - - // Click cancel btn - await click(FORM.cancelBtn); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), - `url includes /vault/secrets/${backend}/kv/list` - ); - }); - test('can access nested secret (a)', async function (assert) { - assert.expect(40); - const backend = this.backend; - await navToBackend(backend); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'title text correct'); - assert.dom(PAGE.emptyStateTitle).doesNotExist('No empty state'); - assertCorrectBreadcrumbs(assert, ['secret', backend]); - assert.dom(PAGE.list.filter).hasNoValue('List filter input is empty'); - - // Navigate through list items - await click(PAGE.list.item('app/')); - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list/app/`); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - assert.dom(PAGE.list.filter).hasValue('app/', 'List filter input is prefilled'); - assert.dom(PAGE.list.item('nested/')).exists('Shows nested secret'); - - await click(PAGE.list.item('nested/')); - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list/app/nested/`); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - assert.dom(PAGE.list.filter).hasValue('app/nested/', 'List filter input is prefilled'); - assert.dom(PAGE.list.item('secret')).exists('Shows deeply nested secret'); - - await click(PAGE.list.item('secret')); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/app%2Fnested%2Fsecret/details?version=1` - ); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested', 'secret']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'title is full secret path'); - assertDetailsToolbar(assert); - - await click(PAGE.breadcrumbAtIdx(3)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/nested/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(2)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(1)); - assert.ok(currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), 'links back to list root'); - }); - test('versioned secret nav, tabs, breadcrumbs (a)', async function (assert) { - assert.expect(45); - const backend = this.backend; - await navToBackend(backend); - await click(PAGE.list.item(secretPath)); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=3`, - 'Url includes version query param' - ); - assert.dom(PAGE.title).hasText(secretPath, 'title is correct on detail view'); - assertDetailTabs(assert, 'Secret'); - assert.dom(PAGE.detail.versionDropdown).hasText('Version 3', 'Version dropdown shows current version'); - assert.dom(PAGE.detail.createNewVersion).hasText('Create new version', 'Create version button shows'); - assert.dom(PAGE.detail.versionTimestamp).containsText('Version 3 created'); - assert.dom(PAGE.infoRowValue('foo')).exists('renders current data'); - - await click(PAGE.detail.createNewVersion); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details/edit?version=3`, - 'Url includes version query param' - ); - assert.dom(FORM.versionAlert).doesNotExist('Does not show version alert for current version'); - assert.dom(FORM.inputByAttr('path')).isDisabled(); - - await click(FORM.cancelBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=3`, - 'Goes back to detail view' - ); - - await click(PAGE.detail.versionDropdown); - await click(`${PAGE.detail.version(1)} a`); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=1`, - 'Goes to detail view for version 1' - ); - assert.dom(PAGE.detail.versionDropdown).hasText('Version 1', 'Version dropdown shows selected version'); - assert.dom(PAGE.detail.versionTimestamp).containsText('Version 1 created'); - assert.dom(PAGE.infoRowValue('key-1')).exists('renders previous data'); - - await click(PAGE.detail.createNewVersion); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details/edit?version=1`, - 'Url includes version query param' - ); - assert.dom(FORM.inputByAttr('path')).isDisabled(); - assert.dom(FORM.keyInput()).hasValue('key-1', 'pre-populates form with selected version data'); - assert.dom(FORM.maskedValueInput()).hasValue('val-1', 'pre-populates form with selected version data'); - assert.dom(FORM.versionAlert).exists('Shows version alert'); - await click(FORM.cancelBtn); - - await click(PAGE.secretTab('Metadata')); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata`, - `goes to metadata page` - ); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath); - assert - .dom(`${PAGE.metadata.customMetadataSection} ${PAGE.emptyStateTitle}`) - .hasText('No custom metadata'); - assert - .dom(`${PAGE.metadata.customMetadataSection} ${PAGE.emptyStateActions}`) - .hasText('Add metadata', 'empty state has metadata CTA'); - assert.dom(PAGE.metadata.editBtn).hasText('Edit metadata'); - - await click(PAGE.metadata.editBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata/edit`, - `goes to metadata edit page` - ); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata', 'edit']); - await click(FORM.cancelBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata`, - `cancel btn goes back to metadata page` - ); - }); - test('breadcrumbs & page titles are correct (a)', async function (assert) { - assert.expect(45); - const backend = this.backend; - await navToBackend(backend); - await click(PAGE.secretTab('Configuration')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'configuration']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for configuration'); - - await click(PAGE.secretTab('Secrets')); - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for secret list'); - - await click(PAGE.list.item(secretPath)); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath]); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for secret detail'); - - await click(PAGE.detail.createNewVersion); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'edit']); - assert.dom(PAGE.title).hasText('Create New Version', 'correct page title for secret edit'); - - await click(PAGE.breadcrumbAtIdx(2)); - await click(PAGE.secretTab('Metadata')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for metadata'); - - await click(PAGE.metadata.editBtn); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata', 'edit']); - assert.dom(PAGE.title).hasText('Edit Secret Metadata', 'correct page title for metadata edit'); - - await click(PAGE.breadcrumbAtIdx(3)); - await click(PAGE.secretTab('Paths')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'paths']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for paths'); - - await click(PAGE.secretTab('Version History')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'version history']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for version history'); - }); - }); - - module('data-reader persona', function (hooks) { - hooks.beforeEach(async function () { - const token = await runCmd([ - createPolicyCmd( - 'data-reader', - personas.dataReader(this.backend) + personas.dataReader(this.emptyBackend) - ), - createTokenCmd('data-reader'), - ]); - await authPage.login(token); - clearRecords(this.store); - return; - }); - test('empty backend - breadcrumbs, title, tabs, emptyState (dr)', async function (assert) { - assert.expect(16); - const backend = this.emptyBackend; - await navToBackend(backend); - - // URL correct - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list`, 'lands on secrets list page'); - // Breadcrumbs correct - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - // Title correct - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - // Tabs correct - assert.dom(PAGE.secretTab('Secrets')).hasText('Secrets'); - assert.dom(PAGE.secretTab('Secrets')).hasClass('active'); - assert.dom(PAGE.secretTab('Configuration')).hasText('Configuration'); - assert.dom(PAGE.secretTab('Configuration')).doesNotHaveClass('active'); - // Toolbar correct - assert.dom(PAGE.toolbar).exists({ count: 1 }, 'toolbar renders'); - assert - .dom(PAGE.list.filter) - .doesNotExist('list filter input does not render because no list capabilities'); - // Page content correct - assert - .dom(PAGE.emptyStateTitle) - .doesNotExist('empty state does not render because no metadata access to list'); - assert.dom(PAGE.list.overviewCard).exists('renders overview card'); - - await typeIn(PAGE.list.overviewInput, 'directory/'); - await click(PAGE.list.overviewButton); - assert - .dom('[data-test-inline-error-message]') - .hasText('You do not have the required permissions or the directory does not exist.'); - - // click toolbar CTA - await visit(`/vault/secrets/${backend}/kv/list`); - await click(PAGE.list.createSecret); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/create`, - `url includes /vault/secrets/${backend}/kv/create` - ); - - // Click cancel btn - await click(FORM.cancelBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list`, - `url includes /vault/secrets/${backend}/kv/list` - ); - }); - test('can access nested secret (dr)', async function (assert) { - assert.expect(23); - const backend = this.backend; - await navToBackend(backend); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'title text correct'); - assert.dom(PAGE.emptyStateTitle).doesNotExist('No empty state'); - assertCorrectBreadcrumbs(assert, ['secret', backend]); - assert - .dom(PAGE.list.filter) - .doesNotExist('List filter input does not render because no list capabilities'); - - await typeIn(PAGE.list.overviewInput, 'app/nested/secret'); - await click(PAGE.list.overviewButton); - - // Goes to correct detail view - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/app%2Fnested%2Fsecret/details?version=1` - ); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested', 'secret']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'title is full secret path'); - assertDetailsToolbar(assert, ['copy']); - - await click(PAGE.breadcrumbAtIdx(3)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/nested/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(2)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(1)); - assert.ok(currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), 'links back to list root'); - }); - test('versioned secret nav, tabs, breadcrumbs (dr)', async function (assert) { - assert.expect(28); - const backend = this.backend; - await navToBackend(backend); - - // Navigate to secret - await typeIn(PAGE.list.overviewInput, secretPath); - await click(PAGE.list.overviewButton); - - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=3`, - 'Url includes version query param' - ); - assert.dom(PAGE.title).hasText(secretPath, 'Goes to secret detail view'); - assertDetailTabs(assert, 'Secret', ['Version History']); - assert.dom(PAGE.detail.versionDropdown).doesNotExist('Version dropdown hidden'); - assert.dom(PAGE.detail.createNewVersion).doesNotExist('unable to create a new version'); - assert.dom(PAGE.detail.versionTimestamp).containsText('Version 3 created'); - assert.dom(PAGE.infoRowValue('foo')).exists('renders current data'); - - // data-reader can't navigate to older versions, but they can go to page directly - await visit(`/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=1`); - assert.dom(PAGE.detail.versionDropdown).doesNotExist('Version dropdown does not exist'); - assert.dom(PAGE.detail.versionTimestamp).containsText('Version 1 created'); - assert.dom(PAGE.infoRowValue('key-1')).exists('renders previous data'); - - assert.dom(PAGE.detail.createNewVersion).doesNotExist('cannot create new version'); - - await click(PAGE.secretTab('Metadata')); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata`, - `goes to metadata page` - ); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath); - assert.dom(PAGE.toolbarAction).doesNotExist('no toolbar actions available on metadata'); - assert - .dom(`${PAGE.metadata.customMetadataSection} ${PAGE.emptyStateTitle}`) - .hasText('No custom metadata'); - assert - .dom(`${PAGE.metadata.secretMetadataSection} ${PAGE.emptyStateTitle}`) - .hasText('You do not have access to secret metadata'); - assert.dom(PAGE.metadata.editBtn).doesNotExist('edit button hidden'); - }); - test('breadcrumbs & page titles are correct (dr)', async function (assert) { - assert.expect(35); - const backend = this.backend; - await navToBackend(backend); - await click(PAGE.secretTab('Configuration')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'configuration']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'title correct on config page'); - - await click(PAGE.secretTab('Secrets')); - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'title correct on secrets list'); - - await typeIn(PAGE.list.overviewInput, 'app/nested/secret'); - await click(PAGE.list.overviewButton); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'app', 'nested', 'secret']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'title correct on secret detail'); - - assert.dom(PAGE.detail.createNewVersion).doesNotExist('cannot create new version'); - - await click(PAGE.secretTab('Metadata')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'app', 'nested', 'secret', 'metadata']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'title correct on metadata'); - - assert.dom(PAGE.metadata.editBtn).doesNotExist('cannot edit metadata'); - - await click(PAGE.secretTab('Paths')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'app', 'nested', 'secret', 'paths']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'correct page title for paths'); - - assert.dom(PAGE.secretTab('Version History')).doesNotExist('Version History tab not shown'); - }); - }); - - module('data-list-reader persona', function (hooks) { - hooks.beforeEach(async function () { - const token = await runCmd([ - createPolicyCmd( - 'data-reader-list', - personas.dataListReader(this.backend) + personas.dataListReader(this.emptyBackend) - ), - createTokenCmd('data-reader-list'), - ]); - - await authPage.login(token); - clearRecords(this.store); - return; - }); - test('empty backend - breadcrumbs, title, tabs, emptyState (dlr)', async function (assert) { - assert.expect(18); - const backend = this.emptyBackend; - await navToBackend(backend); - - // URL correct - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list`, 'lands on secrets list page'); - // Breadcrumbs correct - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - // Title correct - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - // Tabs correct - assert.dom(PAGE.secretTab('Secrets')).hasText('Secrets'); - assert.dom(PAGE.secretTab('Secrets')).hasClass('active'); - assert.dom(PAGE.secretTab('Configuration')).hasText('Configuration'); - assert.dom(PAGE.secretTab('Configuration')).doesNotHaveClass('active'); - // Toolbar correct - assert.dom(PAGE.toolbar).exists({ count: 1 }, 'toolbar renders'); - assert.dom(PAGE.list.filter).doesNotExist('List filter does not show because no secrets exists.'); - // Page content correct - assert.dom(PAGE.emptyStateTitle).hasText('No secrets yet'); - assert.dom(PAGE.emptyStateActions).hasText('Create secret'); - assert.dom(PAGE.list.createSecret).hasText('Create secret'); - - // Click empty state CTA - await click(`${PAGE.emptyStateActions} a`); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/create`), - `url includes /vault/secrets/${backend}/kv/create` - ); - - // Click cancel btn - await click(FORM.cancelBtn); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), - `url includes /vault/secrets/${backend}/kv/list` - ); - - // click toolbar CTA - await click(PAGE.list.createSecret); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/create`), - `url includes /vault/secrets/${backend}/kv/create` - ); - - // Click cancel btn - await click(FORM.cancelBtn); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), - `url includes /vault/secrets/${backend}/kv/list` - ); - }); - test('can access nested secret (dlr)', async function (assert) { - assert.expect(31); - const backend = this.backend; - await navToBackend(backend); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'title text correct'); - assert.dom(PAGE.emptyStateTitle).doesNotExist('No empty state'); - assertCorrectBreadcrumbs(assert, ['secret', backend]); - assert.dom(PAGE.list.filter).hasNoValue('List filter input is empty'); - - // Navigate through list items - await click(PAGE.list.item('app/')); - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list/app/`); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - assert.dom(PAGE.list.filter).doesNotExist('List filter hidden since no nested list access'); - - assert - .dom(PAGE.list.overviewInput) - .hasValue('app/', 'overview card is pre-filled with directory param'); - await typeIn(PAGE.list.overviewInput, 'nested/secret'); - await click(PAGE.list.overviewButton); - - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/app%2Fnested%2Fsecret/details?version=1` - ); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested', 'secret']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'title is full secret path'); - assertDetailsToolbar(assert, ['delete', 'copy']); - - await click(PAGE.breadcrumbAtIdx(3)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/nested/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(2)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(1)); - assert.ok(currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), 'links back to list root'); - }); - test('versioned secret nav, tabs, breadcrumbs (dlr)', async function (assert) { - assert.expect(28); - const backend = this.backend; - await navToBackend(backend); - await click(PAGE.list.item(secretPath)); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=3`, - 'Url includes version query param' - ); - assert.dom(PAGE.title).hasText(secretPath, 'Goes to secret detail view'); - assertDetailTabs(assert, 'Secret', ['Version History']); - assert.dom(PAGE.detail.versionDropdown).doesNotExist('does not show version dropdown'); - assert.dom(PAGE.detail.createNewVersion).doesNotExist('unable to create a new version'); - assert.dom(PAGE.detail.versionTimestamp).containsText('Version 3 created'); - assert.dom(PAGE.infoRowValue('foo')).exists('renders current data'); - - assert.dom(PAGE.detail.createNewVersion).doesNotExist('cannot create new version'); - - // data-list-reader can't navigate to older versions, but they can go to page directly - await visit(`/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=1`); - assert.dom(PAGE.detail.versionDropdown).doesNotExist('no version dropdown'); - assert.dom(PAGE.detail.versionTimestamp).containsText('Version 1 created'); - assert.dom(PAGE.infoRowValue('key-1')).exists('renders previous data'); - - assert.dom(PAGE.detail.createNewVersion).doesNotExist('cannot create new version from old version'); - - await click(PAGE.secretTab('Metadata')); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata`, - `goes to metadata page` - ); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath); - assert - .dom(`${PAGE.metadata.customMetadataSection} ${PAGE.emptyStateTitle}`) - .hasText('No custom metadata'); - assert - .dom(`${PAGE.metadata.secretMetadataSection} ${PAGE.emptyStateTitle}`) - .hasText('You do not have access to secret metadata'); - assert.dom(PAGE.metadata.editBtn).doesNotExist('edit button hidden'); - }); - test('breadcrumbs & page titles are correct (dlr)', async function (assert) { - assert.expect(29); - const backend = this.backend; - await navToBackend(backend); - - await click(PAGE.secretTab('Configuration')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'configuration']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for configuration'); - - await click(PAGE.secretTab('Secrets')); - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for secret list'); - - await click(PAGE.list.item(secretPath)); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath]); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for secret detail'); - - assert.dom(PAGE.detail.createNewVersion).doesNotExist('cannot create new version'); - - await click(PAGE.secretTab('Metadata')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for metadata'); - - assert.dom(PAGE.metadata.editBtn).doesNotExist('cannot edit metadata'); - - await click(PAGE.secretTab('Paths')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'paths']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for paths'); - - assert.dom(PAGE.secretTab('Version History')).doesNotExist('Version History tab not shown'); - }); - }); - - module('metadata-maintainer persona', function (hooks) { - hooks.beforeEach(async function () { - const token = await runCmd([ - createPolicyCmd( - 'metadata-maintainer', - personas.metadataMaintainer(this.backend) + personas.metadataMaintainer(this.emptyBackend) - ), - createTokenCmd('metadata-maintainer'), - ]); - await authPage.login(token); - clearRecords(this.store); - return; - }); - test('empty backend - breadcrumbs, title, tabs, emptyState (mm)', async function (assert) { - assert.expect(18); - const backend = this.emptyBackend; - await navToBackend(backend); - - // URL correct - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list`, 'lands on secrets list page'); - // Breadcrumbs correct - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - // Title correct - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - // Tabs correct - assert.dom(PAGE.secretTab('Secrets')).hasText('Secrets'); - assert.dom(PAGE.secretTab('Secrets')).hasClass('active'); - assert.dom(PAGE.secretTab('Configuration')).hasText('Configuration'); - assert.dom(PAGE.secretTab('Configuration')).doesNotHaveClass('active'); - // Toolbar correct - assert.dom(PAGE.toolbar).exists({ count: 1 }, 'toolbar only renders create secret action'); - assert.dom(PAGE.list.filter).doesNotExist('List filter does not show because no secrets exists.'); - // Page content correct - assert.dom(PAGE.emptyStateTitle).hasText('No secrets yet'); - assert.dom(PAGE.emptyStateActions).hasText('Create secret'); - assert.dom(PAGE.list.createSecret).hasText('Create secret'); - - // Click empty state CTA - await click(`${PAGE.emptyStateActions} a`); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/create`), - `url includes /vault/secrets/${backend}/kv/create` - ); - - // Click cancel btn - await click(FORM.cancelBtn); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), - `url includes /vault/secrets/${backend}/kv/list` - ); - - // click toolbar CTA - await click(PAGE.list.createSecret); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/create`), - `url includes /vault/secrets/${backend}/kv/create` - ); - - // Click cancel btn - await click(FORM.cancelBtn); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), - `url includes /vault/secrets/${backend}/kv/list` - ); - }); - test('can access nested secret (mm)', async function (assert) { - assert.expect(41); - const backend = this.backend; - await navToBackend(backend); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'title text correct'); - assert.dom(PAGE.emptyStateTitle).doesNotExist('No empty state'); - assertCorrectBreadcrumbs(assert, ['secret', backend]); - assert.dom(PAGE.list.filter).hasNoValue('List filter input is empty'); - - // Navigate through list items - await click(PAGE.list.item('app/')); - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list/app/`); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - assert.dom(PAGE.list.filter).hasValue('app/', 'List filter input is prefilled'); - assert.dom(PAGE.list.item('nested/')).exists('Shows nested secret'); - - await click(PAGE.list.item('nested/')); - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list/app/nested/`); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - assert.dom(PAGE.list.filter).hasValue('app/nested/', 'List filter input is prefilled'); - assert.dom(PAGE.list.item('secret')).exists('Shows deeply nested secret'); - - await click(PAGE.list.item('secret')); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/app%2Fnested%2Fsecret/details`, - `Goes to URL with version` - ); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested', 'secret']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'title is full secret path'); - assertDetailsToolbar(assert, ['delete', 'destroy', 'versionDropdown']); - assert.dom(PAGE.detail.versionDropdown).hasText('Version 1', 'Shows version timestamp'); - - await click(PAGE.breadcrumbAtIdx(3)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/nested/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(2)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(1)); - assert.ok(currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), 'links back to list root'); - }); - test('versioned secret nav, tabs, breadcrumbs (mm)', async function (assert) { - assert.expect(37); - const backend = this.backend; - await navToBackend(backend); - await click(PAGE.list.item(secretPath)); - - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details`, - 'Url includes version query param' - ); - assert.dom(PAGE.title).hasText(secretPath, 'Goes to secret detail view'); - assertDetailTabs(assert, 'Secret'); - assert.dom(PAGE.detail.versionDropdown).hasText('Version 3', 'Version dropdown shows current version'); - assert.dom(PAGE.detail.createNewVersion).doesNotExist('Create new version button not shown'); - assert.dom(PAGE.detail.versionTimestamp).doesNotExist('Version created text not shown'); - assert.dom(PAGE.infoRowValue('foo')).doesNotExist('does not render current data'); - assert - .dom(PAGE.emptyStateTitle) - .hasText('You do not have permission to read this secret', 'Shows empty state on secret detail'); - - await click(PAGE.detail.versionDropdown); - await click(`${PAGE.detail.version(1)} a`); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=1`, - 'Goes to detail view for version 1' - ); - assert.dom(PAGE.detail.versionDropdown).hasText('Version 1', 'Version dropdown shows selected version'); - - assert.dom(PAGE.infoRowValue('key-1')).doesNotExist('does not render previous data'); - assert - .dom(PAGE.emptyStateTitle) - .hasText( - 'You do not have permission to read this secret', - 'Shows empty state on secret detail for older version' - ); - - await click(PAGE.secretTab('Metadata')); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata`, - `goes to metadata page` - ); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath); - assert - .dom(`${PAGE.metadata.customMetadataSection} ${PAGE.emptyStateTitle}`) - .hasText('No custom metadata'); - assert - .dom(`${PAGE.metadata.customMetadataSection} ${PAGE.emptyStateActions}`) - .hasText('Add metadata', 'empty state has metadata CTA'); - assert.dom(PAGE.metadata.editBtn).hasText('Edit metadata'); - - await click(PAGE.metadata.editBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata/edit`, - `goes to metadata edit page` - ); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata', 'edit']); - await click(FORM.cancelBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata`, - `cancel btn goes back to metadata page` - ); - }); - test('breadcrumbs & page titles are correct (mm)', async function (assert) { - assert.expect(39); - const backend = this.backend; - await navToBackend(backend); - await click(PAGE.secretTab('Configuration')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'configuration']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for configuration'); - - await click(PAGE.secretTab('Secrets')); - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for secret list'); - - await click(PAGE.list.item(secretPath)); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath]); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for secret detail'); - - await click(PAGE.secretTab('Metadata')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for metadata'); - - await click(PAGE.metadata.editBtn); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata', 'edit']); - assert.dom(PAGE.title).hasText('Edit Secret Metadata', 'correct page title for metadata edit'); - - await click(PAGE.breadcrumbAtIdx(3)); - await click(PAGE.secretTab('Paths')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'paths']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for paths'); - - await click(PAGE.secretTab('Version History')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'version history']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for version history'); - }); - }); - - module('secret-creator persona', function (hooks) { - hooks.beforeEach(async function () { - const token = await runCmd([ - createPolicyCmd( - 'secret-creator', - personas.secretCreator(this.backend) + personas.secretCreator(this.emptyBackend) - ), - createTokenCmd('secret-creator'), - ]); - await authPage.login(token); - clearRecords(this.store); - return; - }); - test('empty backend - breadcrumbs, title, tabs, emptyState (sc)', async function (assert) { - assert.expect(15); - const backend = this.emptyBackend; - await navToBackend(backend); - - // URL correct - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list`, 'lands on secrets list page'); - // Breadcrumbs correct - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - // Title correct - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - // Tabs correct - assert.dom(PAGE.secretTab('Secrets')).hasText('Secrets'); - assert.dom(PAGE.secretTab('Secrets')).hasClass('active'); - assert.dom(PAGE.secretTab('Configuration')).hasText('Configuration'); - assert.dom(PAGE.secretTab('Configuration')).doesNotHaveClass('active'); - // Toolbar correct - assert.dom(PAGE.toolbar).exists({ count: 1 }, 'toolbar only renders create secret action'); - assert.dom(PAGE.list.filter).doesNotExist('List filter input is not rendered'); - // Page content correct - assert.dom(PAGE.list.overviewCard).exists('Overview card renders'); - assert.dom(PAGE.list.createSecret).hasText('Create secret'); - - // click toolbar CTA - await click(PAGE.list.createSecret); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/create`, - `goes to /vault/secrets/${backend}/kv/create` - ); - - // Click cancel btn - await click(FORM.cancelBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list`, - `url includes /vault/secrets/${backend}/kv/list` - ); - }); - test('can access nested secret (sc)', async function (assert) { - assert.expect(23); - const backend = this.backend; - await navToBackend(backend); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'title text correct'); - assert.dom(PAGE.emptyStateTitle).doesNotExist('No empty state'); - assertCorrectBreadcrumbs(assert, ['secret', backend]); - assert.dom(PAGE.list.filter).doesNotExist('List filter input is not rendered'); - - // Navigate to secret - await typeIn(PAGE.list.overviewInput, 'app/nested/secret'); - await click(PAGE.list.overviewButton); - - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/app%2Fnested%2Fsecret/details`, - 'goes to secret detail page' - ); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested', 'secret']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'title is full secret path'); - assertDetailsToolbar(assert, ['createNewVersion']); - - await click(PAGE.breadcrumbAtIdx(3)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/nested/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(2)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(1)); - assert.ok(currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), 'links back to list root'); - }); - test('versioned secret nav, tabs, breadcrumbs (sc)', async function (assert) { - assert.expect(36); - const backend = this.backend; - await navToBackend(backend); - - await typeIn(PAGE.list.overviewInput, secretPath); - await click(PAGE.list.overviewButton); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details`, - 'Goes to detail view' - ); - assert.dom(PAGE.title).hasText(secretPath, 'Goes to secret detail view'); - assertDetailTabs(assert, 'Secret', ['Version History']); - assert.dom(PAGE.detail.versionDropdown).doesNotExist('Version dropdown does not render'); - assert.dom(PAGE.detail.createNewVersion).hasText('Create new version', 'Create version button shows'); - assert.dom(PAGE.detail.versionTimestamp).doesNotExist('Version created info is not rendered'); - assert.dom(PAGE.infoRowValue('foo')).doesNotExist('current data not rendered'); - assert - .dom(PAGE.emptyStateTitle) - .hasText('You do not have permission to read this secret', 'empty state shows'); - - await click(PAGE.detail.createNewVersion); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details/edit`, - 'Goes to edit page' - ); - assert.dom(FORM.versionAlert).doesNotExist('Does not show version alert for current version'); - assert - .dom(FORM.noReadAlert) - .hasText( - 'Warning You do not have read permissions for this secret data. Saving will overwrite the existing secret.', - 'Shows warning about no read permissions' - ); - assert.dom(FORM.inputByAttr('path')).isDisabled(); - - await click(FORM.cancelBtn); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details`, - 'Goes back to detail view' - ); - - await visit(`/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details?version=1`); - assert.dom(PAGE.detail.versionDropdown).doesNotExist('Version dropdown does not exist'); - assert.dom(PAGE.detail.versionTimestamp).doesNotExist('version created data not rendered'); - assert.dom(PAGE.infoRowValue('key-1')).doesNotExist('does not render previous data'); - - await click(PAGE.detail.createNewVersion); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details/edit?version=1`, - 'Url includes version query param' - ); - assert.dom(FORM.inputByAttr('path')).isDisabled(); - assert.dom(FORM.keyInput()).hasValue('', 'form does not pre-populate'); - assert.dom(FORM.maskedValueInput()).hasValue('', 'form does not pre-populate'); - assert.dom(FORM.noReadAlert).exists('Shows no read alert'); - await click(FORM.cancelBtn); - - await click(PAGE.secretTab('Metadata')); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/metadata`, - `goes to metadata page` - ); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath); - assert - .dom(`${PAGE.metadata.customMetadataSection} ${PAGE.emptyStateTitle}`) - .hasText('You do not have access to read custom metadata', 'shows correct empty state'); - assert.dom(PAGE.metadata.editBtn).doesNotExist('edit metadata button does not render'); - }); - test('breadcrumbs & page titles are correct (sc)', async function (assert) { - assert.expect(34); - const backend = this.backend; - await navToBackend(backend); - await click(PAGE.secretTab('Configuration')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'configuration']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for configuration'); - - await click(PAGE.secretTab('Secrets')); - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for secret list'); - - await typeIn(PAGE.list.overviewInput, secretPath); - await click(PAGE.list.overviewButton); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath]); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for secret detail'); - - await click(PAGE.detail.createNewVersion); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'edit']); - assert.dom(PAGE.title).hasText('Create New Version', 'correct page title for secret edit'); - - await click(PAGE.breadcrumbAtIdx(2)); - await click(PAGE.secretTab('Metadata')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for metadata'); - - assert.dom(PAGE.metadata.editBtn).doesNotExist('cannot edit metadata'); - - await click(PAGE.breadcrumbAtIdx(2)); - await click(PAGE.secretTab('Paths')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'paths']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for paths'); - - assert.dom(PAGE.secretTab('Version History')).doesNotExist('Version History tab not shown'); - }); - }); - - module('enterprise controlled access persona', function (hooks) { - hooks.beforeEach(async function () { - // Set up control group scenario - const userPolicy = ` -path "${this.backend}/data/*" { - capabilities = ["create", "read", "update", "delete", "list"] - control_group = { - max_ttl = "24h" - factor "ops_manager" { - controlled_capabilities = ["read"] - identity { - group_names = ["managers"] - approvals = 1 - } - } - } -} - -path "${this.backend}/*" { - capabilities = ["list"] +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package standby + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/testhelpers/corehelpers" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + "github.com/hashicorp/vault/sdk/helper/testcluster" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/vault/vault/cluster" +) + +// Test_Echo_Duration_Skew tests that the sys/health and sys/ha-status endpoints +// report reasonable values for echo duration and clock skew. +func Test_Echo_Duration_Skew(t *testing.T) { + t.Parallel() + cases := []struct { + name string + perfstandby bool + }{ + {"standby", false}, + {"perfstandby", true}, + } + for i := range cases { + perfstandby := cases[i].perfstandby + if perfstandby && !constants.IsEnterprise { + continue + } + t.Run(cases[i].name, func(t *testing.T) { + t.Parallel() + conf, opts := teststorage.ClusterSetup(nil, nil, nil) + name := strings.Replace(t.Name(), "/", "_", -1) + logger := corehelpers.NewTestLogger(t) + layers, err := cluster.NewInmemLayerCluster(name, 3, logger) + if err != nil { + t.Fatal(err) + } + opts.ClusterLayers = layers + opts.Logger = logger + conf.DisablePerformanceStandby = !perfstandby + cluster := vault.NewTestCluster(t, conf, opts) + defer cluster.Cleanup() + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + leaderIdx, err := testcluster.WaitForActiveNodeAndStandbys(ctx, cluster) + if err != nil { + t.Fatal(err) + } + leader := cluster.Nodes()[leaderIdx] + + // The delay applies in both directions, hence a 0.25s delay implies a 0.5s roundtrip delay + layers.SetReaderDelay(time.Second / 4) + + check := func(echoDuration int64, clockSkew int64) error { + if echoDuration < time.Second.Milliseconds()/2 { + return fmt.Errorf("echo duration must exceed 0.5s, got: %dms", echoDuration) + } + // Because we're using the same clock for all nodes, any clock skew will + // be negative, as it's based on the delta of server time across both nodes, + // but it doesn't factor in the round-trip time of the echo request. + if clockSkew == 0 || -clockSkew < time.Second.Milliseconds()/2 { + return fmt.Errorf("clock skew must be nonzero and exceed -0.5s, got: %dms", clockSkew) + } + + return nil + } + + // We need to wait for at least 2 heartbeats to happen (2s intervals) + corehelpers.RetryUntil(t, 5*time.Second, func() error { + haStatus, err := leader.APIClient().Sys().HAStatus() + if err != nil { + t.Fatal(err) + } + if len(haStatus.Nodes) < 3 { + return fmt.Errorf("expected 3 nodes, got %d", len(haStatus.Nodes)) + } + for _, node := range haStatus.Nodes { + if node.ActiveNode { + continue + } + + if err := check(node.EchoDurationMillis, node.ClockSkewMillis); err != nil { + return fmt.Errorf("ha-status node %s: %w", node.Hostname, err) + } + } + + for i, node := range cluster.Nodes() { + if i == leaderIdx { + continue + } + + h, err := node.APIClient().Sys().Health() + if err != nil { + t.Fatal(err) + } + + if err := check(h.EchoDurationMillis, h.ClockSkewMillis); err != nil { + return fmt.Errorf("health node %s: %w", node.APIClient().Address(), err) + } + } + return nil + }) + }) + } } -`; - const { userToken } = await setupControlGroup({ userPolicy }); - this.userToken = userToken; - await authPage.login(userToken); - clearRecords(this.store); - return; - }); - test('can access nested secret (cg)', async function (assert) { - assert.expect(42); - const backend = this.backend; - await navToBackend(backend); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'title text correct'); - assert.dom(PAGE.emptyStateTitle).doesNotExist('No empty state'); - assertCorrectBreadcrumbs(assert, ['secret', backend]); - assert.dom(PAGE.list.filter).hasNoValue('List filter input is empty'); - - // Navigate through list items - await click(PAGE.list.item('app/')); - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list/app/`); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - assert.dom(PAGE.list.filter).hasValue('app/', 'List filter input is prefilled'); - assert.dom(PAGE.list.item('nested/')).exists('Shows nested secret'); - - await click(PAGE.list.item('nested/')); - assert.strictEqual(currentURL(), `/vault/secrets/${backend}/kv/list/app/nested/`); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`); - assert.dom(PAGE.list.filter).hasValue('app/nested/', 'List filter input is prefilled'); - assert.dom(PAGE.list.item('secret')).exists('Shows deeply nested secret'); - - // For some reason when we click on the item in tests it throws a global control group error - // But not when we visit the page directly - await visit(`/vault/secrets/${backend}/kv/app%2Fnested%2Fsecret/details`); - assert.ok( - await waitUntil(() => currentRouteName() === 'vault.cluster.access.control-group-accessor'), - 'redirects to access control group route' - ); - await grantAccess({ - apiPath: `${backend}/data/app/nested/secret`, - originUrl: `/vault/secrets/${backend}/kv/list/app/nested/`, - userToken: this.userToken, - }); - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list/app/nested/`, - 'navigates to list url where secret is' - ); - await click(PAGE.list.item('secret')); - - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/app%2Fnested%2Fsecret/details?version=1`, - 'goes to secret details' - ); - assertCorrectBreadcrumbs(assert, ['secret', backend, 'app', 'nested', 'secret']); - assert.dom(PAGE.title).hasText('app/nested/secret', 'title is full secret path'); - assertDetailsToolbar(assert, ['delete', 'copy', 'createNewVersion']); - - await click(PAGE.breadcrumbAtIdx(3)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/nested/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(2)); - assert.ok( - currentURL().startsWith(`/vault/secrets/${backend}/kv/list/app/`), - 'links back to list directory' - ); - - await click(PAGE.breadcrumbAtIdx(1)); - assert.ok(currentURL().startsWith(`/vault/secrets/${backend}/kv/list`), 'links back to list root'); - }); - test('breadcrumbs & page titles are correct (cg)', async function (assert) { - assert.expect(36); - const backend = this.backend; - await navToBackend(backend); - await click(PAGE.secretTab('Configuration')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, 'configuration']); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for configuration'); - - await click(PAGE.secretTab('Secrets')); - assertCorrectBreadcrumbs(assert, ['secrets', backend]); - assert.dom(PAGE.title).hasText(`${backend} Version 2`, 'correct page title for secret list'); - - await visit(`/vault/secrets/${backend}/kv/${secretPathUrlEncoded}/details`); - - assert.ok( - await waitUntil(() => currentRouteName() === 'vault.cluster.access.control-group-accessor'), - 'redirects to access control group route' - ); - - await grantAccess({ - apiPath: `${backend}/data/${encodeURIComponent(secretPath)}`, - originUrl: `/vault/secrets/${backend}/kv/list`, - userToken: this.userToken, - }); - - assert.strictEqual( - currentURL(), - `/vault/secrets/${backend}/kv/list`, - 'navigates back to list url after authorized' - ); - await click(PAGE.list.item(secretPath)); - - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath]); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for secret detail'); - - await click(PAGE.secretTab('Metadata')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'metadata']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for metadata'); - - assert.dom(PAGE.metadata.editBtn).doesNotExist('cannot edit metadata'); - - await click(PAGE.breadcrumbAtIdx(2)); - await click(PAGE.secretTab('Paths')); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'paths']); - assert.dom(PAGE.title).hasText(secretPath, 'correct page title for paths'); - - assert.dom(PAGE.secretTab('Version History')).doesNotExist('Version History tab not shown'); - - await click(PAGE.secretTab('Secret')); - await click(PAGE.detail.createNewVersion); - assertCorrectBreadcrumbs(assert, ['secrets', backend, secretPath, 'edit']); - assert.dom(PAGE.title).hasText('Create New Version', 'correct page title for secret edit'); - }); - }); -}); diff --git a/ui/tests/helpers/openapi/auth-model-attributes.js b/ui/tests/helpers/openapi/auth-model-attributes.js index 50c86000057d..3a16a1a9b919 100644 --- a/ui/tests/helpers/openapi/auth-model-attributes.js +++ b/ui/tests/helpers/openapi/auth-model-attributes.js @@ -1,1294 +1,183 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ +--- +layout: docs +page_title: Vault Proxy Static Secret Caching +description: |- + Vault Proxy's static secret caching functionality allows you to cache KVv1 and KVv2 secrets for calling clients. + The secrets will be automatically updated by Proxy, minimizing requests made to Vault, and offering resiliency. +--- -const userpass = { - user: { - username: { - editType: 'string', - helpText: 'Username for this user.', - fieldValue: 'mutableId', - fieldGroup: 'default', - readOnly: true, - label: 'Username', - type: 'string', - }, - password: { - editType: 'string', - helpText: 'Password for this user.', - fieldGroup: 'default', - sensitive: true, - type: 'string', - }, - tokenBoundCidrs: { - editType: 'stringArray', - helpText: - 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', - fieldGroup: 'Tokens', - label: "Generated Token's Bound CIDRs", - }, - tokenExplicitMaxTtl: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', - fieldGroup: 'Tokens', - label: "Generated Token's Explicit Maximum TTL", - }, - tokenMaxTtl: { - editType: 'ttl', - helpText: 'The maximum lifetime of the generated token', - fieldGroup: 'Tokens', - label: "Generated Token's Maximum TTL", - }, - tokenNoDefaultPolicy: { - editType: 'boolean', - helpText: "If true, the 'default' policy will not automatically be added to generated tokens", - fieldGroup: 'Tokens', - label: "Do Not Attach 'default' Policy To Generated Tokens", - type: 'boolean', - }, - tokenNumUses: { - editType: 'number', - helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', - fieldGroup: 'Tokens', - label: 'Maximum Uses of Generated Tokens', - type: 'number', - }, - tokenPeriod: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', - fieldGroup: 'Tokens', - label: "Generated Token's Period", - }, - tokenPolicies: { - editType: 'stringArray', - helpText: 'A list of policies that will apply to the generated token for this user.', - fieldGroup: 'Tokens', - label: "Generated Token's Policies", - }, - tokenTtl: { - editType: 'ttl', - helpText: 'The initial ttl of the token to generate', - fieldGroup: 'Tokens', - label: "Generated Token's Initial TTL", - }, - tokenType: { - editType: 'string', - helpText: 'The type of token to generate, service or batch', - fieldGroup: 'Tokens', - label: "Generated Token's Type", - type: 'string', - }, - }, -}; +# Vault Proxy static secret caching -const azure = { - 'auth-config/azure': { - clientId: { - editType: 'string', - fieldGroup: 'default', - helpText: - 'The OAuth2 client id to connection to Azure. This value can also be provided with the AZURE_CLIENT_ID environment variable.', - label: 'Client ID', - type: 'string', - }, - clientSecret: { - editType: 'string', - fieldGroup: 'default', - helpText: - 'The OAuth2 client secret to connection to Azure. This value can also be provided with the AZURE_CLIENT_SECRET environment variable.', - type: 'string', - }, - environment: { - editType: 'string', - fieldGroup: 'default', - helpText: - 'The Azure environment name. If not provided, AzurePublicCloud is used. This value can also be provided with the AZURE_ENVIRONMENT environment variable.', - type: 'string', - }, - maxRetries: { - editType: 'number', - fieldGroup: 'default', - helpText: - 'The maximum number of attempts a failed operation will be retried before producing an error.', - type: 'number', - }, - maxRetryDelay: { - editType: 'ttl', - fieldGroup: 'default', - helpText: 'The maximum delay allowed before retrying an operation.', - }, - resource: { - editType: 'string', - fieldGroup: 'default', - helpText: - 'The resource URL for the vault application in Azure Active Directory. This value can also be provided with the AZURE_AD_RESOURCE environment variable.', - type: 'string', - }, - retryDelay: { - editType: 'ttl', - fieldGroup: 'default', - helpText: 'The initial amount of delay to use before retrying an operation, increasing exponentially.', - }, - rootPasswordTtl: { - editType: 'ttl', - fieldGroup: 'default', - helpText: - 'The TTL of the root password in Azure. This can be either a number of seconds or a time formatted duration (ex: 24h, 48ds)', - }, - tenantId: { - editType: 'string', - fieldGroup: 'default', - helpText: - 'The tenant id for the Azure Active Directory. This is sometimes referred to as Directory ID in AD. This value can also be provided with the AZURE_TENANT_ID environment variable.', - label: 'Tenant ID', - type: 'string', - }, - }, -}; +Use static secret caching with Vault Proxy to cache KVv1 and KVv2 secrets to +minimize requests made to Vault and provide resilient connections for clients. -const cert = { - 'auth-config/cert': { - disableBinding: { - editType: 'boolean', - helpText: - 'If set, during renewal, skips the matching of presented client identity with the client identity used during login. Defaults to false.', - fieldGroup: 'default', - type: 'boolean', - }, - enableIdentityAliasMetadata: { - editType: 'boolean', - helpText: - 'If set, metadata of the certificate including the metadata corresponding to allowed_metadata_extensions will be stored in the alias. Defaults to false.', - fieldGroup: 'default', - type: 'boolean', - }, - ocspCacheSize: { - editType: 'number', - helpText: 'The size of the in memory OCSP response cache, shared by all configured certs', - fieldGroup: 'default', - type: 'number', - }, - }, - cert: { - name: { - editType: 'string', - helpText: 'The name of the certificate', - fieldValue: 'mutableId', - fieldGroup: 'default', - readOnly: true, - label: 'Name', - type: 'string', - }, - allowedCommonNames: { - editType: 'stringArray', - helpText: 'A list of names. At least one must exist in the Common Name. Supports globbing.', - fieldGroup: 'Constraints', - }, - allowedDnsSans: { - editType: 'stringArray', - helpText: 'A list of DNS names. At least one must exist in the SANs. Supports globbing.', - fieldGroup: 'Constraints', - label: 'Allowed DNS SANs', - }, - allowedEmailSans: { - editType: 'stringArray', - helpText: 'A list of Email Addresses. At least one must exist in the SANs. Supports globbing.', - fieldGroup: 'Constraints', - label: 'Allowed Email SANs', - }, - allowedMetadataExtensions: { - editType: 'stringArray', - helpText: - 'A list of OID extensions. Upon successful authentication, these extensions will be added as metadata if they are present in the certificate. The metadata key will be the string consisting of the OID numbers separated by a dash (-) instead of a dot (.) to allow usage in ACL templates.', - fieldGroup: 'default', - }, - allowedNames: { - editType: 'stringArray', - helpText: - 'A list of names. At least one must exist in either the Common Name or SANs. Supports globbing. This parameter is deprecated, please use allowed_common_names, allowed_dns_sans, allowed_email_sans, allowed_uri_sans.', - fieldGroup: 'Constraints', - }, - allowedOrganizationalUnits: { - editType: 'stringArray', - helpText: 'A list of Organizational Units names. At least one must exist in the OU field.', - fieldGroup: 'Constraints', - }, - allowedUriSans: { - editType: 'stringArray', - helpText: 'A list of URIs. At least one must exist in the SANs. Supports globbing.', - fieldGroup: 'Constraints', - label: 'Allowed URI SANs', - }, - certificate: { - editType: 'file', - helpText: 'The public certificate that should be trusted. Must be x509 PEM encoded.', - fieldGroup: 'default', - type: 'string', - }, - displayName: { - editType: 'string', - helpText: 'The display name to use for clients using this certificate.', - fieldGroup: 'default', - type: 'string', - }, - ocspCaCertificates: { - editType: 'file', - helpText: 'Any additional CA certificates needed to communicate with OCSP servers', - fieldGroup: 'default', - type: 'string', - }, - ocspEnabled: { - editType: 'boolean', - helpText: 'Whether to attempt OCSP verification of certificates at login', - fieldGroup: 'default', - type: 'boolean', - }, - ocspFailOpen: { - editType: 'boolean', - helpText: - 'If set to true, if an OCSP revocation cannot be made successfully, login will proceed rather than failing. If false, failing to get an OCSP status fails the request.', - fieldGroup: 'default', - type: 'boolean', - }, - ocspQueryAllServers: { - editType: 'boolean', - helpText: - 'If set to true, rather than accepting the first successful OCSP response, query all servers and consider the certificate valid only if all servers agree.', - fieldGroup: 'default', - type: 'boolean', - }, - ocspServersOverride: { - editType: 'stringArray', - helpText: - 'A list of OCSP server addresses. If unset, the OCSP server is determined from the AuthorityInformationAccess extension on the certificate being inspected.', - fieldGroup: 'default', - }, - requiredExtensions: { - editType: 'stringArray', - helpText: - "A list of extensions formatted as 'oid:value'. Expects the extension value to be some type of ASN1 encoded string. All values much match. Supports globbing on 'value'.", - fieldGroup: 'default', - }, - tokenBoundCidrs: { - editType: 'stringArray', - helpText: - 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', - fieldGroup: 'Tokens', - label: "Generated Token's Bound CIDRs", - }, - tokenExplicitMaxTtl: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', - fieldGroup: 'Tokens', - label: "Generated Token's Explicit Maximum TTL", - }, - tokenMaxTtl: { - editType: 'ttl', - helpText: 'The maximum lifetime of the generated token', - fieldGroup: 'Tokens', - label: "Generated Token's Maximum TTL", - }, - tokenNoDefaultPolicy: { - editType: 'boolean', - helpText: "If true, the 'default' policy will not automatically be added to generated tokens", - fieldGroup: 'Tokens', - label: "Do Not Attach 'default' Policy To Generated Tokens", - type: 'boolean', - }, - tokenNumUses: { - editType: 'number', - helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', - fieldGroup: 'Tokens', - label: 'Maximum Uses of Generated Tokens', - type: 'number', - }, - tokenPeriod: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', - fieldGroup: 'Tokens', - label: "Generated Token's Period", - }, - tokenPolicies: { - editType: 'stringArray', - helpText: 'A list of policies that will apply to the generated token for this user.', - fieldGroup: 'Tokens', - label: "Generated Token's Policies", - }, - tokenTtl: { - editType: 'ttl', - helpText: 'The initial ttl of the token to generate', - fieldGroup: 'Tokens', - label: "Generated Token's Initial TTL", - }, - tokenType: { - editType: 'string', - helpText: 'The type of token to generate, service or batch', - fieldGroup: 'Tokens', - label: "Generated Token's Type", - type: 'string', - }, - }, -}; +## Step 1: Subscribe Vault Proxy to KV events -const gcp = { - 'auth-config/gcp': { - credentials: { - editType: 'string', - helpText: - 'Google credentials JSON that Vault will use to verify users against GCP APIs. If not specified, will use application default credentials', - fieldGroup: 'default', - label: 'Credentials', - type: 'string', - }, - customEndpoint: { - editType: 'object', - helpText: 'Specifies overrides for various Google API Service Endpoints used in requests.', - fieldGroup: 'default', - type: 'object', - }, - gceAlias: { - editType: 'string', - helpText: 'Indicates what value to use when generating an alias for GCE authentications.', - fieldGroup: 'default', - type: 'string', - }, - gceMetadata: { - editType: 'stringArray', - helpText: - "The metadata to include on the aliases and audit logs generated by this plugin. When set to 'default', includes: instance_creation_timestamp, instance_id, instance_name, project_id, project_number, role, service_account_id, service_account_email, zone. Not editing this field means the 'default' fields are included. Explicitly setting this field to empty overrides the 'default' and means no metadata will be included. If not using 'default', explicit fields must be sent like: 'field1,field2'.", - fieldGroup: 'default', - defaultValue: 'field1,field2', - label: 'gce_metadata', - }, - iamAlias: { - editType: 'string', - helpText: 'Indicates what value to use when generating an alias for IAM authentications.', - fieldGroup: 'default', - type: 'string', - }, - iamMetadata: { - editType: 'stringArray', - helpText: - "The metadata to include on the aliases and audit logs generated by this plugin. When set to 'default', includes: project_id, role, service_account_id, service_account_email. Not editing this field means the 'default' fields are included. Explicitly setting this field to empty overrides the 'default' and means no metadata will be included. If not using 'default', explicit fields must be sent like: 'field1,field2'.", - fieldGroup: 'default', - defaultValue: 'field1,field2', - label: 'iam_metadata', - }, - }, -}; +Vault Proxy uses Vault events and auto-auth to monitor secret status and make +appropriate cache updates. +1. Enable [auto-auth](/vault/docs/agent-and-proxy/autoauth). +1. Create an auto-auth token with permission to subscribe to KV event updates +with the [Vault event system](/vault/docs/concepts/events). For example, to +create a policy that grants access to static secret (KVv1 and KVv2) events, +we need permission to subscribe to the `events` endpoint, as well as the +`list` and `subscribe` permissions on KV secrets we want to get secrets +from: + ```hcl + path "sys/events/subscribe/kv*" { + capabilities = ["read"] + } -const github = { - 'auth-config/github': { - baseUrl: { - editType: 'string', - helpText: - 'The API endpoint to use. Useful if you are running GitHub Enterprise or an API-compatible authentication server.', - fieldGroup: 'GitHub Options', - label: 'Base URL', - type: 'string', - }, - organization: { - editType: 'string', - helpText: 'The organization users must be part of', - fieldGroup: 'default', - type: 'string', - }, - organizationId: { - editType: 'number', - helpText: 'The ID of the organization users must be part of', - fieldGroup: 'default', - type: 'number', - }, - tokenBoundCidrs: { - editType: 'stringArray', - helpText: - 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', - fieldGroup: 'Tokens', - label: "Generated Token's Bound CIDRs", - }, - tokenExplicitMaxTtl: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', - fieldGroup: 'Tokens', - label: "Generated Token's Explicit Maximum TTL", - }, - tokenMaxTtl: { - editType: 'ttl', - helpText: 'The maximum lifetime of the generated token', - fieldGroup: 'Tokens', - label: "Generated Token's Maximum TTL", - }, - tokenNoDefaultPolicy: { - editType: 'boolean', - helpText: "If true, the 'default' policy will not automatically be added to generated tokens", - fieldGroup: 'Tokens', - label: "Do Not Attach 'default' Policy To Generated Tokens", - type: 'boolean', - }, - tokenNumUses: { - editType: 'number', - helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', - fieldGroup: 'Tokens', - label: 'Maximum Uses of Generated Tokens', - type: 'number', - }, - tokenPeriod: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', - fieldGroup: 'Tokens', - label: "Generated Token's Period", - }, - tokenPolicies: { - editType: 'stringArray', - helpText: 'A list of policies that will apply to the generated token for this user.', - fieldGroup: 'Tokens', - label: "Generated Token's Policies", - }, - tokenTtl: { - editType: 'ttl', - helpText: 'The initial ttl of the token to generate', - fieldGroup: 'Tokens', - label: "Generated Token's Initial TTL", - }, - tokenType: { - editType: 'string', - helpText: 'The type of token to generate, service or batch', - fieldGroup: 'Tokens', - label: "Generated Token's Type", - type: 'string', - }, - }, -}; + path "*" { + capabilities = ["list", "subscribe"] + subscribe_event_types = ["kv*"] + } + ``` -const jwt = { - 'auth-config/jwt': { - boundIssuer: { - editType: 'string', - helpText: "The value against which to match the 'iss' claim in a JWT. Optional.", - fieldGroup: 'default', - type: 'string', - }, - defaultRole: { - editType: 'string', - helpText: - 'The default role to use if none is provided during login. If not set, a role is required during login.', - fieldGroup: 'default', - type: 'string', - }, - jwksCaPem: { - editType: 'string', - helpText: - 'The CA certificate or chain of certificates, in PEM format, to use to validate connections to the JWKS URL. If not set, system certificates are used.', - fieldGroup: 'default', - type: 'string', - }, - jwksUrl: { - editType: 'string', - helpText: - 'JWKS URL to use to authenticate signatures. Cannot be used with "oidc_discovery_url" or "jwt_validation_pubkeys".', - fieldGroup: 'default', - type: 'string', - }, - jwtSupportedAlgs: { - editType: 'stringArray', - helpText: 'A list of supported signing algorithms. Defaults to RS256.', - fieldGroup: 'default', - }, - jwtValidationPubkeys: { - editType: 'stringArray', - helpText: - 'A list of PEM-encoded public keys to use to authenticate signatures locally. Cannot be used with "jwks_url" or "oidc_discovery_url".', - fieldGroup: 'default', - }, - namespaceInState: { - editType: 'boolean', - helpText: - 'Pass namespace in the OIDC state parameter instead of as a separate query parameter. With this setting, the allowed redirect URL(s) in Vault and on the provider side should not contain a namespace query parameter. This means only one redirect URL entry needs to be maintained on the provider side for all vault namespaces that will be authenticating against it. Defaults to true for new configs.', - fieldGroup: 'default', - defaultValue: true, - label: 'Namespace in OIDC state', - type: 'boolean', - }, - oidcClientId: { - editType: 'string', - helpText: 'The OAuth Client ID configured with your OIDC provider.', - fieldGroup: 'default', - type: 'string', - }, - oidcClientSecret: { - editType: 'string', - helpText: 'The OAuth Client Secret configured with your OIDC provider.', - fieldGroup: 'default', - sensitive: true, - type: 'string', - }, - oidcDiscoveryCaPem: { - editType: 'string', - helpText: - 'The CA certificate or chain of certificates, in PEM format, to use to validate connections to the OIDC Discovery URL. If not set, system certificates are used.', - fieldGroup: 'default', - type: 'string', - }, - oidcDiscoveryUrl: { - editType: 'string', - helpText: - 'OIDC Discovery URL, without any .well-known component (base path). Cannot be used with "jwks_url" or "jwt_validation_pubkeys".', - fieldGroup: 'default', - type: 'string', - }, - oidcResponseMode: { - editType: 'string', - helpText: - "The response mode to be used in the OAuth2 request. Allowed values are 'query' and 'form_post'.", - fieldGroup: 'default', - type: 'string', - }, - oidcResponseTypes: { - editType: 'stringArray', - helpText: - "The response types to request. Allowed values are 'code' and 'id_token'. Defaults to 'code'.", - fieldGroup: 'default', - }, - providerConfig: { - editType: 'object', - helpText: 'Provider-specific configuration. Optional.', - fieldGroup: 'default', - label: 'Provider Config', - type: 'object', - }, - }, -}; +Subscribing to KV events means that Proxy receives updates as soon as a secret +changes, which reduces staleness in the cache. Vault Proxy only checks for a +secret update if an event notification indicates that the related secret was +updated. -const kubernetes = { - 'auth-config/kubernetes': { - disableLocalCaJwt: { - editType: 'boolean', - helpText: - 'Disable defaulting to the local CA cert and service account JWT when running in a Kubernetes pod', - fieldGroup: 'default', - label: 'Disable use of local CA and service account JWT', - type: 'boolean', - }, - kubernetesCaCert: { - editType: 'string', - helpText: 'PEM encoded CA cert for use by the TLS client used to talk with the API.', - fieldGroup: 'default', - label: 'Kubernetes CA Certificate', - type: 'string', - }, - kubernetesHost: { - editType: 'string', - helpText: - 'Host must be a host string, a host:port pair, or a URL to the base of the Kubernetes API server.', - fieldGroup: 'default', - type: 'string', - }, - pemKeys: { - editType: 'stringArray', - helpText: - 'Optional list of PEM-formated public keys or certificates used to verify the signatures of kubernetes service account JWTs. If a certificate is given, its public key will be extracted. Not every installation of Kubernetes exposes these keys.', - fieldGroup: 'default', - label: 'Service account verification keys', - }, - tokenReviewerJwt: { - editType: 'string', - helpText: - 'A service account JWT (or other token) used as a bearer token to access the TokenReview API to validate other JWTs during login. If not set the JWT used for login will be used to access the API.', - fieldGroup: 'default', - label: 'Token Reviewer JWT', - type: 'string', - }, - }, - role: { - name: { - editType: 'string', - helpText: 'Name of the role.', - fieldValue: 'mutableId', - fieldGroup: 'default', - readOnly: true, - label: 'Name', - type: 'string', - }, - aliasNameSource: { - editType: 'string', - helpText: - 'Source to use when deriving the Alias name. valid choices: "serviceaccount_uid" : e.g. 474b11b5-0f20-4f9d-8ca5-65715ab325e0 (most secure choice) "serviceaccount_name" : / e.g. vault/vault-agent default: "serviceaccount_uid"', - fieldGroup: 'default', - type: 'string', - }, - audience: { - editType: 'string', - helpText: 'Optional Audience claim to verify in the jwt.', - fieldGroup: 'default', - type: 'string', - }, - boundServiceAccountNames: { - editType: 'stringArray', - helpText: - 'List of service account names able to access this role. If set to "*" all names are allowed.', - fieldGroup: 'default', - }, - boundServiceAccountNamespaces: { - editType: 'stringArray', - helpText: 'List of namespaces allowed to access this role. If set to "*" all namespaces are allowed.', - fieldGroup: 'default', - }, - tokenBoundCidrs: { - editType: 'stringArray', - helpText: - 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', - fieldGroup: 'Tokens', - label: "Generated Token's Bound CIDRs", - }, - tokenExplicitMaxTtl: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', - fieldGroup: 'Tokens', - label: "Generated Token's Explicit Maximum TTL", - }, - tokenMaxTtl: { - editType: 'ttl', - helpText: 'The maximum lifetime of the generated token', - fieldGroup: 'Tokens', - label: "Generated Token's Maximum TTL", - }, - tokenNoDefaultPolicy: { - editType: 'boolean', - helpText: "If true, the 'default' policy will not automatically be added to generated tokens", - fieldGroup: 'Tokens', - label: "Do Not Attach 'default' Policy To Generated Tokens", - type: 'boolean', - }, - tokenNumUses: { - editType: 'number', - helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', - fieldGroup: 'Tokens', - label: 'Maximum Uses of Generated Tokens', - type: 'number', - }, - tokenPeriod: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', - fieldGroup: 'Tokens', - label: "Generated Token's Period", - }, - tokenPolicies: { - editType: 'stringArray', - helpText: 'A list of policies that will apply to the generated token for this user.', - fieldGroup: 'Tokens', - label: "Generated Token's Policies", - }, - tokenTtl: { - editType: 'ttl', - helpText: 'The initial ttl of the token to generate', - fieldGroup: 'Tokens', - label: "Generated Token's Initial TTL", - }, - tokenType: { - editType: 'string', - helpText: 'The type of token to generate, service or batch', - fieldGroup: 'Tokens', - label: "Generated Token's Type", - type: 'string', - }, - }, -}; +## Step 2: Ensure tokens have `capabilities-self` access -const ldap = { - 'auth-config/ldap': { - anonymousGroupSearch: { - editType: 'boolean', - helpText: - 'Use anonymous binds when performing LDAP group searches (if true the initial credentials will still be used for the initial connection test).', - fieldGroup: 'default', - label: 'Anonymous group search', - type: 'boolean', - }, - binddn: { - editType: 'string', - helpText: 'LDAP DN for searching for the user DN (optional)', - fieldGroup: 'default', - label: 'Name of Object to bind (binddn)', - type: 'string', - }, - bindpass: { - editType: 'string', - helpText: 'LDAP password for searching for the user DN (optional)', - fieldGroup: 'default', - sensitive: true, - type: 'string', - }, - caseSensitiveNames: { - editType: 'boolean', - helpText: - 'If true, case sensitivity will be used when comparing usernames and groups for matching policies.', - fieldGroup: 'default', - type: 'boolean', - }, - certificate: { - editType: 'file', - helpText: - 'CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded (optional)', - fieldGroup: 'default', - label: 'CA certificate', - type: 'string', - }, - clientTlsCert: { - editType: 'file', - helpText: 'Client certificate to provide to the LDAP server, must be x509 PEM encoded (optional)', - fieldGroup: 'default', - label: 'Client certificate', - type: 'string', - }, - clientTlsKey: { - editType: 'file', - helpText: 'Client certificate key to provide to the LDAP server, must be x509 PEM encoded (optional)', - fieldGroup: 'default', - label: 'Client key', - type: 'string', - }, - connectionTimeout: { - editType: 'ttl', - helpText: - 'Timeout, in seconds, when attempting to connect to the LDAP server before trying the next URL in the configuration.', - fieldGroup: 'default', - }, - denyNullBind: { - editType: 'boolean', - helpText: - "Denies an unauthenticated LDAP bind request if the user's password is empty; defaults to true", - fieldGroup: 'default', - type: 'boolean', - }, - dereferenceAliases: { - editType: 'string', - helpText: - "When aliases should be dereferenced on search operations. Accepted values are 'never', 'finding', 'searching', 'always'. Defaults to 'never'.", - possibleValues: ['never', 'finding', 'searching', 'always'], - fieldGroup: 'default', - type: 'string', - }, - discoverdn: { - editType: 'boolean', - helpText: 'Use anonymous bind to discover the bind DN of a user (optional)', - fieldGroup: 'default', - label: 'Discover DN', - type: 'boolean', - }, - groupattr: { - editType: 'string', - helpText: - 'LDAP attribute to follow on objects returned by in order to enumerate user group membership. Examples: "cn" or "memberOf", etc. Default: cn', - fieldGroup: 'default', - defaultValue: 'cn', - label: 'Group Attribute', - type: 'string', - }, - groupdn: { - editType: 'string', - helpText: 'LDAP search base to use for group membership search (eg: ou=Groups,dc=example,dc=org)', - fieldGroup: 'default', - label: 'Group DN', - type: 'string', - }, - groupfilter: { - editType: 'string', - helpText: - 'Go template for querying group membership of user (optional) The template can access the following context variables: UserDN, Username Example: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}})) Default: (|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))', - fieldGroup: 'default', - label: 'Group Filter', - type: 'string', - }, - insecureTls: { - editType: 'boolean', - helpText: 'Skip LDAP server SSL Certificate verification - VERY insecure (optional)', - fieldGroup: 'default', - label: 'Insecure TLS', - type: 'boolean', - }, - maxPageSize: { - editType: 'number', - helpText: - "If set to a value greater than 0, the LDAP backend will use the LDAP server's paged search control to request pages of up to the given size. This can be used to avoid hitting the LDAP server's maximum result size limit. Otherwise, the LDAP backend will not use the paged search control.", - fieldGroup: 'default', - type: 'number', - }, - requestTimeout: { - editType: 'ttl', - helpText: - 'Timeout, in seconds, for the connection when making requests against the server before returning back an error.', - fieldGroup: 'default', - }, - starttls: { - editType: 'boolean', - helpText: 'Issue a StartTLS command after establishing unencrypted connection (optional)', - fieldGroup: 'default', - label: 'Issue StartTLS', - type: 'boolean', - }, - tlsMaxVersion: { - editType: 'string', - helpText: - "Maximum TLS version to use. Accepted values are 'tls10', 'tls11', 'tls12' or 'tls13'. Defaults to 'tls12'", - possibleValues: ['tls10', 'tls11', 'tls12', 'tls13'], - fieldGroup: 'default', - label: 'Maximum TLS Version', - type: 'string', - }, - tlsMinVersion: { - editType: 'string', - helpText: - "Minimum TLS version to use. Accepted values are 'tls10', 'tls11', 'tls12' or 'tls13'. Defaults to 'tls12'", - possibleValues: ['tls10', 'tls11', 'tls12', 'tls13'], - fieldGroup: 'default', - label: 'Minimum TLS Version', - type: 'string', - }, - tokenBoundCidrs: { - editType: 'stringArray', - helpText: - 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', - fieldGroup: 'Tokens', - label: "Generated Token's Bound CIDRs", - }, - tokenExplicitMaxTtl: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', - fieldGroup: 'Tokens', - label: "Generated Token's Explicit Maximum TTL", - }, - tokenMaxTtl: { - editType: 'ttl', - helpText: 'The maximum lifetime of the generated token', - fieldGroup: 'Tokens', - label: "Generated Token's Maximum TTL", - }, - tokenNoDefaultPolicy: { - editType: 'boolean', - helpText: "If true, the 'default' policy will not automatically be added to generated tokens", - fieldGroup: 'Tokens', - label: "Do Not Attach 'default' Policy To Generated Tokens", - type: 'boolean', - }, - tokenNumUses: { - editType: 'number', - helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', - fieldGroup: 'Tokens', - label: 'Maximum Uses of Generated Tokens', - type: 'number', - }, - tokenPeriod: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', - fieldGroup: 'Tokens', - label: "Generated Token's Period", - }, - tokenPolicies: { - editType: 'stringArray', - helpText: 'A list of policies that will apply to the generated token for this user.', - fieldGroup: 'Tokens', - label: "Generated Token's Policies", - }, - tokenTtl: { - editType: 'ttl', - helpText: 'The initial ttl of the token to generate', - fieldGroup: 'Tokens', - label: "Generated Token's Initial TTL", - }, - tokenType: { - editType: 'string', - helpText: 'The type of token to generate, service or batch', - fieldGroup: 'Tokens', - label: "Generated Token's Type", - type: 'string', - }, - upndomain: { - editType: 'string', - helpText: 'Enables userPrincipalDomain login with [username]@UPNDomain (optional)', - fieldGroup: 'default', - label: 'User Principal (UPN) Domain', - type: 'string', - }, - url: { - editType: 'string', - helpText: - 'LDAP URL to connect to (default: ldap://127.0.0.1). Multiple URLs can be specified by concatenating them with commas; they will be tried in-order.', - fieldGroup: 'default', - label: 'URL', - type: 'string', - }, - usePre111GroupCnBehavior: { - editType: 'boolean', - helpText: - 'In Vault 1.1.1 a fix for handling group CN values of different cases unfortunately introduced a regression that could cause previously defined groups to not be found due to a change in the resulting name. If set true, the pre-1.1.1 behavior for matching group CNs will be used. This is only needed in some upgrade scenarios for backwards compatibility. It is enabled by default if the config is upgraded but disabled by default on new configurations.', - fieldGroup: 'default', - type: 'boolean', - }, - useTokenGroups: { - editType: 'boolean', - helpText: - 'If true, use the Active Directory tokenGroups constructed attribute of the user to find the group memberships. This will find all security groups including nested ones.', - fieldGroup: 'default', - type: 'boolean', - }, - userattr: { - editType: 'string', - helpText: 'Attribute used for users (default: cn)', - fieldGroup: 'default', - defaultValue: 'cn', - label: 'User Attribute', - type: 'string', - }, - userdn: { - editType: 'string', - helpText: 'LDAP domain to use for users (eg: ou=People,dc=example,dc=org)', - fieldGroup: 'default', - label: 'User DN', - type: 'string', - }, - userfilter: { - editType: 'string', - helpText: - 'Go template for LDAP user search filer (optional) The template can access the following context variables: UserAttr, Username Default: ({{.UserAttr}}={{.Username}})', - fieldGroup: 'default', - label: 'User Search Filter', - type: 'string', - }, - usernameAsAlias: { - editType: 'boolean', - helpText: 'If true, sets the alias name to the username', - fieldGroup: 'default', - type: 'boolean', - }, - }, - group: { - name: { - editType: 'string', - helpText: 'Name of the LDAP group.', - fieldValue: 'mutableId', - fieldGroup: 'default', - readOnly: true, - label: 'Name', - type: 'string', - }, - policies: { - editType: 'stringArray', - helpText: 'A list of policies associated to the group.', - fieldGroup: 'default', - }, - }, - user: { - name: { - editType: 'string', - helpText: 'Name of the LDAP user.', - fieldValue: 'mutableId', - fieldGroup: 'default', - readOnly: true, - label: 'Name', - type: 'string', - }, - groups: { - editType: 'stringArray', - helpText: 'A list of additional groups associated with the user.', - fieldGroup: 'default', - }, - policies: { - editType: 'stringArray', - helpText: 'A list of policies associated with the user.', - fieldGroup: 'default', - }, - }, -}; +Tokens require `update` access to the +[`sys/capabilies-self`](/vault/api-docs/system/capabilities-self) endpoint to +request cached secrets. Vault tokens receive `update` permissions +[by default](/vault/docs/concepts/policies#default-policy). If you have modified +or removed the default policy, you must explicitly create a policy with the +appropriate permissions. For example: +```hcl + path "sys/capabilities-self" { + capabilities = ["update"] + } +``` -const okta = { - 'auth-config/okta': { - apiToken: { - editType: 'string', - helpText: 'Okta API key.', - fieldGroup: 'default', - label: 'API Token', - type: 'string', - }, - baseUrl: { - editType: 'string', - helpText: - 'The base domain to use for the Okta API. When not specified in the configuration, "okta.com" is used.', - fieldGroup: 'default', - label: 'Base URL', - type: 'string', - }, - bypassOktaMfa: { - editType: 'boolean', - helpText: - 'When set true, requests by Okta for a MFA check will be bypassed. This also disallows certain status checks on the account, such as whether the password is expired.', - fieldGroup: 'default', - label: 'Bypass Okta MFA', - type: 'boolean', - }, - orgName: { - editType: 'string', - helpText: 'Name of the organization to be used in the Okta API.', - fieldGroup: 'default', - label: 'Organization Name', - type: 'string', - }, - tokenBoundCidrs: { - editType: 'stringArray', - helpText: - 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', - fieldGroup: 'Tokens', - label: "Generated Token's Bound CIDRs", - }, - tokenExplicitMaxTtl: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', - fieldGroup: 'Tokens', - label: "Generated Token's Explicit Maximum TTL", - }, - tokenMaxTtl: { - editType: 'ttl', - helpText: 'The maximum lifetime of the generated token', - fieldGroup: 'Tokens', - label: "Generated Token's Maximum TTL", - }, - tokenNoDefaultPolicy: { - editType: 'boolean', - helpText: "If true, the 'default' policy will not automatically be added to generated tokens", - fieldGroup: 'Tokens', - label: "Do Not Attach 'default' Policy To Generated Tokens", - type: 'boolean', - }, - tokenNumUses: { - editType: 'number', - helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', - fieldGroup: 'Tokens', - label: 'Maximum Uses of Generated Tokens', - type: 'number', - }, - tokenPeriod: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', - fieldGroup: 'Tokens', - label: "Generated Token's Period", - }, - tokenPolicies: { - editType: 'stringArray', - helpText: 'A list of policies that will apply to the generated token for this user.', - fieldGroup: 'Tokens', - label: "Generated Token's Policies", - }, - tokenTtl: { - editType: 'ttl', - helpText: 'The initial ttl of the token to generate', - fieldGroup: 'Tokens', - label: "Generated Token's Initial TTL", - }, - tokenType: { - editType: 'string', - helpText: 'The type of token to generate, service or batch', - fieldGroup: 'Tokens', - label: "Generated Token's Type", - type: 'string', - }, - }, - group: { - name: { - editType: 'string', - helpText: 'Name of the Okta group.', - fieldValue: 'mutableId', - fieldGroup: 'default', - readOnly: true, - label: 'Name', - type: 'string', - }, - policies: { - editType: 'stringArray', - helpText: 'A list of policies associated to the group.', - fieldGroup: 'default', - }, - }, - user: { - name: { - editType: 'string', - helpText: 'Name of the user.', - fieldValue: 'mutableId', - fieldGroup: 'default', - readOnly: true, - label: 'Name', - type: 'string', - }, - groups: { - editType: 'stringArray', - helpText: 'List of groups associated with the user.', - fieldGroup: 'default', - }, - policies: { - editType: 'stringArray', - helpText: 'List of policies associated with the user.', - fieldGroup: 'default', - }, - }, -}; +## Step 3: Configure an appropriate refresh interval +By default, Vault Proxy refreshes tokens every five minutes. You can change the +default behavior and configure Proxy to verify and update cached token +capabilities with the `static_secret_token_capability_refresh_interval` +parameter in the `cache` configuration stanza. For example, to set a refresh +interval of one minute: +```hcl +cache { + cache_static_secrets = true + static_secret_token_capability_refresh_interval = "1m" +} +``` -const radius = { - 'auth-config/radius': { - dialTimeout: { - editType: 'ttl', - helpText: 'Number of seconds before connect times out (default: 10)', - fieldGroup: 'default', - defaultValue: 10, - }, - host: { - editType: 'string', - helpText: 'RADIUS server host', - fieldGroup: 'default', - label: 'Host', - type: 'string', - }, - nasIdentifier: { - editType: 'string', - helpText: 'RADIUS NAS Identifier field (optional)', - fieldGroup: 'default', - label: 'NAS Identifier', - type: 'string', - }, - nasPort: { - editType: 'number', - helpText: 'RADIUS NAS port field (default: 10)', - fieldGroup: 'default', - defaultValue: 10, - label: 'NAS Port', - type: 'number', - }, - port: { - editType: 'number', - helpText: 'RADIUS server port (default: 1812)', - fieldGroup: 'default', - defaultValue: 1812, - type: 'number', - }, - readTimeout: { - editType: 'ttl', - helpText: 'Number of seconds before response times out (default: 10)', - fieldGroup: 'default', - defaultValue: 10, - }, - secret: { - editType: 'string', - helpText: 'Secret shared with the RADIUS server', - fieldGroup: 'default', - type: 'string', - }, - tokenBoundCidrs: { - editType: 'stringArray', - helpText: - 'A list of CIDR blocks. If set, specifies the blocks of IP addresses which are allowed to use the generated token.', - fieldGroup: 'Tokens', - label: "Generated Token's Bound CIDRs", - }, - tokenExplicitMaxTtl: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role carry an explicit maximum TTL. During renewal, the current maximum TTL values of the role and the mount are not checked for changes, and any updates to these values will have no effect on the token being renewed.', - fieldGroup: 'Tokens', - label: "Generated Token's Explicit Maximum TTL", - }, - tokenMaxTtl: { - editType: 'ttl', - helpText: 'The maximum lifetime of the generated token', - fieldGroup: 'Tokens', - label: "Generated Token's Maximum TTL", - }, - tokenNoDefaultPolicy: { - editType: 'boolean', - helpText: "If true, the 'default' policy will not automatically be added to generated tokens", - fieldGroup: 'Tokens', - label: "Do Not Attach 'default' Policy To Generated Tokens", - type: 'boolean', - }, - tokenNumUses: { - editType: 'number', - helpText: 'The maximum number of times a token may be used, a value of zero means unlimited', - fieldGroup: 'Tokens', - label: 'Maximum Uses of Generated Tokens', - type: 'number', - }, - tokenPeriod: { - editType: 'ttl', - helpText: - 'If set, tokens created via this role will have no max lifetime; instead, their renewal period will be fixed to this value. This takes an integer number of seconds, or a string duration (e.g. "24h").', - fieldGroup: 'Tokens', - label: "Generated Token's Period", - }, - tokenPolicies: { - editType: 'stringArray', - helpText: 'A list of policies that will apply to the generated token for this user.', - fieldGroup: 'Tokens', - label: "Generated Token's Policies", - }, - tokenTtl: { - editType: 'ttl', - helpText: 'The initial ttl of the token to generate', - fieldGroup: 'Tokens', - label: "Generated Token's Initial TTL", - }, - tokenType: { - editType: 'string', - helpText: 'The type of token to generate, service or batch', - fieldGroup: 'Tokens', - label: "Generated Token's Type", - type: 'string', - }, - unregisteredUserPolicies: { - editType: 'string', - helpText: - 'List of policies to grant upon successful RADIUS authentication of an unregistered user (default: empty)', - fieldGroup: 'default', - label: 'Policies for unregistered users', - type: 'string', - }, - }, - user: { - name: { - editType: 'string', - helpText: 'Name of the RADIUS user.', - fieldValue: 'mutableId', - fieldGroup: 'default', - readOnly: true, - label: 'Name', - type: 'string', - }, - policies: { - editType: 'stringArray', - helpText: 'A list of policies associated to the user.', - fieldGroup: 'default', - }, - }, -}; +## Functionality -export default { - azure, - userpass, - cert, - gcp, - github, - jwt, - kubernetes, - ldap, - okta, - radius, - // aws is the only method that doesn't leverage OpenApi in practice -}; +With static secret caching, Vault Proxy caches `GET` requests for KVv1 and KVv2 +endpoints. + +When a client sends a `GET` request for a new KV secret, Proxy forwards the +request to Vault but caches the response before forwarding it to the client. If +that client makes subsequent `GET` requests for the same secret, Vault Proxy +serves the cached response rather than forwarding the request to Vault. + +Similarly, when a token requests access to a KV secret, it must complete a +success `GET` request. If the request is successful, Proxy caches the fact that +the token was successful in addition to the result. Subsequent requests by the +same token can then access this secret from the cache instead of Vault. + +Vault Proxy uses the [event system](/vault/docs/concepts/events) to keep the +cache up to date. It monitors the KV event feed for events related to any secret +currently stored in the cache, including modification events like updates and +deletes. When Proxy detects a change in a cached secret, it will update or +evict the cache entry as appropriate. + +Vault Proxy also checks and refreshes the access permissions of known tokens +according to the window set with `static_secret_token_capability_refresh_interval`. +By default, the refresh interval is five minutes. + +Every interval, Proxy calls [`sys/capabilies-self`](/vault/api-docs/system/capabilities-self) on +behalf of every token in the cache to confirm the token still has permission to +access the cached secret. If the result from Vault indicates that permission (or +the token itself) was revoked, Proxy updates the cache entry so that the affected +token can no longer access the relevant paths from the cache. The refresh interval +is essentially the maximum period after which permission to read a KV secret is +fully revoked for the relevant token. + +For token refresh to work, any token that will access the cache also needs +`update` permission for [`sys/capabilies-self`](/vault/api-docs/system/capabilities-self). +Having `update` permission for the token lets Proxy test capabilities for the +token against multiple paths with a single request instead of testing for a `403` +response for each path explicitly. + + + + If Proxy's API proxy is configured to use auto-authentication for tokens, and **all** + requests that pass through Vault Proxy use the same token, Proxy only + makes a single request to Vault every refresh interval, no matter how many + secrets are currently cached. + + + +When static secret caching is enabled, Proxy returns `HIT` or `MISS` in the `X-Cache` +response header for requests so client can tell if the response was served from +the cache or forwarded from Vault. In the event of a hit, Proxy also sets the +`Age` header to indicate, in seconds, how old the cache entry is. + + + + The fact that a cache entry is old, does not necessarily mean that the + information is out of date. Vault Proxy continually monitors KV events for + updates. A large value for `Age` may simply mean that the secret has not been + rotated recently. + + + +## Configuration + +The top level `cache` block has the following configuration entries relating to static secret caching: + +- `cache_static_secrets` `(bool: false)` - Enables static secret caching when +set to `true`. When `cache_static_secrets` and `auth_auth` are both enabled, +Vault Proxy serves KV secrets directly from the cache to clients with +sufficient permission. + +- `static_secret_token_capability_refresh_interval` `(duration: "5m", optional)` - +Sets the interval as a [duration format string](/vault/docs/concepts/duration-format) +at which Vault Proxy rechecks the permissions of tokens used to access cached +secrets. The refresh interval is the maximum period after which permission to +read a cached KV secret is fully revoked. Ignored when `cache_static_secrets` +is `false`. + +### Example configuration + +The following example Vault Proxy configuration: +- Defines a TCP listener (`listener`) with TLS disabled. +- Forces clients using API proxy (`api_proxy`) to identify with an auto-auth token. +- Configures auto-authentication (`auto-auth`) for `approle`. +- Enables static secret caching with `cache_static_secrets`. +- Sets an explicit token capability refresh window of 1 hour with `static_secret_token_capability_refresh_interval`. + +```hcl +# Other Vault Proxy configuration blocks +# ... + +cache { + cache_static_secrets = true + static_secret_token_capability_refresh_interval = "1h" +} + +api_proxy { + use_auto_auth_token = "force" +} + +listener "tcp" { + address = "127.0.0.1:8100" + tls_disable = true +} + +auto_auth { + method { + type = "approle" + config = { + role_id_file_path = "roleid" + secret_id_file_path = "secretid" + remove_secret_id_file_after_reading = false + } + } +``` +[event-system]: /vault/docs/concepts/events diff --git a/ui/tests/helpers/replication.js b/ui/tests/helpers/replication.js index be3cc8ab0d32..9f7c7010f86d 100644 --- a/ui/tests/helpers/replication.js +++ b/ui/tests/helpers/replication.js @@ -1,36 +1,99 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { click, fillIn, findAll, currentURL, visit, settled, waitUntil } from '@ember/test-helpers'; - -export const disableReplication = async (type, assert) => { - // disable performance replication - await visit(`/vault/replication/${type}`); - - if (findAll('[data-test-replication-link="manage"]').length) { - await click('[data-test-replication-link="manage"]'); - - await click('[data-test-disable-replication] button'); - - const typeDisplay = type === 'dr' ? 'Disaster Recovery' : 'Performance'; - await fillIn('[data-test-confirmation-modal-input="Disable Replication?"]', typeDisplay); - await click('[data-test-confirm-button]'); - await settled(); // eslint-disable-line - - if (assert) { - // bypassing for now -- remove if tests pass reliably - // assert.strictEqual( - // flash.latestMessage, - // 'This cluster is having replication disabled. Vault will be unavailable for a brief period and will resume service shortly.', - // 'renders info flash when disabled' - // ); - assert.ok( - await waitUntil(() => currentURL() === '/vault/replication'), - 'redirects to the replication page' - ); - } - await settled(); - } -}; +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/cli" + "github.com/posener/complete" +) + +var ( + _ cli.Command = (*StatusCommand)(nil) + _ cli.CommandAutocomplete = (*StatusCommand)(nil) +) + +type StatusCommand struct { + *BaseCommand +} + +func (c *StatusCommand) Synopsis() string { + return "Print seal and HA status" +} + +func (c *StatusCommand) Help() string { + helpText := ` +Usage: vault status [options] + + Prints the current state of Vault including whether it is sealed and if HA + mode is enabled. This command prints regardless of whether the Vault is + sealed. + + The exit code reflects the seal status: + + - 0 - unsealed + - 1 - error + - 2 - sealed + +` + c.Flags().Help() + + return strings.TrimSpace(helpText) +} + +func (c *StatusCommand) Flags() *FlagSets { + return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) +} + +func (c *StatusCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *StatusCommand) AutocompleteFlags() complete.Flags { + return c.Flags().Completions() +} + +func (c *StatusCommand) Run(args []string) int { + f := c.Flags() + + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = f.Args() + if len(args) > 0 { + c.UI.Error(fmt.Sprintf("Too many arguments (expected 0, got %d)", len(args))) + return 1 + } + + client, err := c.Client() + if err != nil { + c.UI.Error(err.Error()) + // We return 2 everywhere else, but 2 is reserved for "sealed" here + return 1 + } + + // Always query in the root namespace. + // Although seal-status is present in other namespaces, it will not + // be available until Vault is unsealed. + client.SetNamespace("") + + status, err := client.Sys().SealStatus() + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking seal status: %s", err)) + return 1 + } + + // Do not return the int here yet, since we may want to return a custom error + // code depending on the seal status. + code := OutputSealStatus(c.UI, client, status) + + if status.Sealed { + return 2 + } + + return code +} diff --git a/ui/tests/integration/components/alert-inline-test.js b/ui/tests/integration/components/alert-inline-test.js index 50be71efbc95..47a2803d66cb 100644 --- a/ui/tests/integration/components/alert-inline-test.js +++ b/ui/tests/integration/components/alert-inline-test.js @@ -1,89 +1,118 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { render, settled, find, waitUntil } from '@ember/test-helpers'; -import hbs from 'htmlbars-inline-precompile'; - -module('Integration | Component | alert-inline', function (hooks) { - setupRenderingTest(hooks); - - hooks.beforeEach(function () { - this.set('message', 'some very important alert'); - this.set('type', 'warning'); - }); - - test('it renders alert message with correct class args', async function (assert) { - await render(hbs` - - `); - assert.dom('[data-test-inline-error-message]').hasText('some very important alert'); - assert - .dom('[data-test-inline-alert]') - .hasAttribute('class', 'is-flex-center has-top-padding-xs is-marginless size-small'); - }); - - test('it yields to block text', async function (assert) { - await render(hbs` - - A much more important alert - - `); - assert.dom('[data-test-inline-error-message]').hasText('A much more important alert'); - }); - - test('it renders correctly for type=danger', async function (assert) { - this.set('type', 'danger'); - await render(hbs` - - `); - assert - .dom('[data-test-inline-error-message]') - .hasAttribute('class', 'has-text-danger', 'has danger text'); - assert.dom('[data-test-icon="x-square-fill"]').exists('danger icon exists'); - }); - - test('it renders correctly for type=warning', async function (assert) { - await render(hbs` - - `); - assert.dom('[data-test-inline-error-message]').doesNotHaveAttribute('class', 'does not have styled text'); - assert.dom('[data-test-icon="alert-triangle-fill"]').exists('warning icon exists'); - }); - - test('it mimics loading when message changes', async function (assert) { - await render(hbs` - - `); - assert - .dom('[data-test-inline-error-message]') - .hasText('some very important alert', 'it renders original message'); - - this.set('message', 'some changed alert!!!'); - await waitUntil(() => find('[data-test-icon="loading"]')); - assert.ok(find('[data-test-icon="loading"]'), 'it shows loading icon when message changes'); - await settled(); - assert - .dom('[data-test-inline-error-message]') - .hasText('some changed alert!!!', 'it shows updated message'); - }); -}); +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package command + +import ( + "strings" + "testing" + + "github.com/hashicorp/cli" +) + +func testStatusCommand(tb testing.TB) (*cli.MockUi, *StatusCommand) { + tb.Helper() + + ui := cli.NewMockUi() + return ui, &StatusCommand{ + BaseCommand: &BaseCommand{ + UI: ui, + }, + } +} + +func TestStatusCommand_Run(t *testing.T) { + t.Parallel() + + cases := []struct { + name string + args []string + sealed bool + out string + code int + }{ + { + "unsealed", + nil, + false, + "Sealed false", + 0, + }, + { + "sealed", + nil, + true, + "Sealed true", + 2, + }, + { + "args", + []string{"foo"}, + false, + "Too many arguments", + 1, + }, + } + + t.Run("validations", func(t *testing.T) { + t.Parallel() + + for _, tc := range cases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if tc.sealed { + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + } + + ui, cmd := testStatusCommand(t) + cmd.client = client + + code := cmd.Run(tc.args) + if code != tc.code { + t.Errorf("expected %d to be %d", code, tc.code) + } + + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, tc.out) { + t.Errorf("expected %q to contain %q", combined, tc.out) + } + }) + } + }) + + t.Run("communication_failure", func(t *testing.T) { + t.Parallel() + + client, closer := testVaultServerBad(t) + defer closer() + + ui, cmd := testStatusCommand(t) + cmd.client = client + + code := cmd.Run([]string{}) + if exp := 1; code != exp { + t.Errorf("expected %d to be %d", code, exp) + } + + expected := "Error checking seal status: " + combined := ui.OutputWriter.String() + ui.ErrorWriter.String() + if !strings.Contains(combined, expected) { + t.Errorf("expected %q to contain %q", combined, expected) + } + }) + + t.Run("no_tabs", func(t *testing.T) { + t.Parallel() + + _, cmd := testStatusCommand(t) + assertNoTabs(t, cmd) + }) +} diff --git a/ui/tests/integration/components/app-footer-test.js b/ui/tests/integration/components/app-footer-test.js new file mode 100644 index 000000000000..b838f29efdf9 --- /dev/null +++ b/ui/tests/integration/components/app-footer-test.js @@ -0,0 +1,67 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
    + {{#if @label}} + + {{#if @subText}} +

    + {{@subText}} +

    + {{/if}} + {{/if}} + {{#each this.inputList as |data index|}} +
    +
    +