From cefe25d6591f319cea7724204d4086a126bd3c10 Mon Sep 17 00:00:00 2001 From: Christoph Hartmann Date: Sun, 22 Oct 2023 19:51:08 +0200 Subject: [PATCH] =?UTF-8?q?=E2=AD=90=EF=B8=8F=20add=20integration=20tests?= =?UTF-8?q?=20to=20cnspec?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Makefile | 7 +- test/bundles.go | 58 + test/go.mod | 397 + test/go.sum | 1539 ++++ test/k8s_test.go | 61 + test/terraform_test.go | 80 + .../mondoo-kubernetes-security.mql.yaml | 6454 +++++++++++++++++ .../mondoo-terraform-aws-security.mql.yaml | 563 ++ .../mondoo-terraform-gcp-security.mql.yaml | 2297 ++++++ 9 files changed, 11453 insertions(+), 3 deletions(-) create mode 100644 test/bundles.go create mode 100644 test/go.mod create mode 100644 test/go.sum create mode 100644 test/k8s_test.go create mode 100644 test/terraform_test.go create mode 100644 test/testdata/mondoo-kubernetes-security.mql.yaml create mode 100644 test/testdata/mondoo-terraform-aws-security.mql.yaml create mode 100644 test/testdata/mondoo-terraform-gcp-security.mql.yaml diff --git a/Makefile b/Makefile index 374664d29..0c4e2958d 100644 --- a/Makefile +++ b/Makefile @@ -115,11 +115,12 @@ test: test/go test/lint test/go: cnspec/generate test/go/plain test/go/plain: - # TODO /motor/docker/docker_engine cannot be executed inside of docker - go test -cover $(shell go list ./... | grep -v '/motor/discovery/docker_engine') + go test -cover $(shell go list ./...) + cd test && go test -cover $(shell go list test/...) test/go/plain-ci: prep/tools - gotestsum --junitfile report.xml --format pkgname -- -cover $(shell go list ./... | grep -v '/vendor/' | grep -v '/motor/discovery/docker_engine') + gotestsum --junitfile report.xml --format pkgname -- -cover $(shell go list ./... | grep -v '/vendor/') + cd test && gotestsum --junitfile report.xml --format pkgname -- -cover $(shell go list ./... | grep -v '/vendor/') .PHONY: test/lint/staticcheck test/lint/staticcheck: diff --git a/test/bundles.go b/test/bundles.go new file mode 100644 index 000000000..fb83acc40 --- /dev/null +++ b/test/bundles.go @@ -0,0 +1,58 @@ +// Copyright (c) Mondoo, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package test + +import ( + "context" + "go.mondoo.com/cnquery/v9/logger" + "go.mondoo.com/cnquery/v9/providers" + "go.mondoo.com/cnquery/v9/providers-sdk/v1/inventory" + "go.mondoo.com/cnspec/v9/policy" + "go.mondoo.com/cnspec/v9/policy/scan" +) + +func init() { + logger.Set("info") +} + +func runBundle(policyBundlePath string, policyMrn string, asset *inventory.Asset) (*policy.Report, error) { + ctx := context.Background() + policyBundle, err := policy.BundleFromPaths(policyBundlePath) + if err != nil { + return nil, err + } + + policyBundle.OwnerMrn = "//policy.api.mondoo.app" + var results *policy.Report + + policyFilters := []string{} + if policyMrn != "" { + policyFilters = append(policyFilters, policyMrn) + } + + scanner := scan.NewLocalScanner(scan.WithRecording(providers.NullRecording{})) // TODO: fix recording + result, err := scanner.RunIncognito(ctx, &scan.Job{ + Inventory: &inventory.Inventory{ + Spec: &inventory.InventorySpec{ + Assets: []*inventory.Asset{asset}, + }, + }, + Bundle: policyBundle, + PolicyFilters: policyFilters, + ReportType: scan.ReportType_FULL, + }) + if err != nil { + return nil, err + } + + reports := result.GetFull().Reports + if len(reports) > 0 { + for _, report := range reports { + results = report + break + } + } + + return results, err +} diff --git a/test/go.mod b/test/go.mod new file mode 100644 index 000000000..a49cea54a --- /dev/null +++ b/test/go.mod @@ -0,0 +1,397 @@ +module go.mondoo.com/cnspec/v9/test + +replace go.mondoo.com/cnspec/v9 => ../ + +replace go.mondoo.com/cnquery/v9/providers/k8s => ../cnquery/providers/k8s + +replace go.mondoo.com/cnquery/v9/providers/terraform => ../cnquery/providers/terraform + +go 1.21.3 + +require ( + github.com/stretchr/testify v1.8.4 + go.mondoo.com/cnquery/v9 v9.2.4-0.20231020125424-f2f6ce5a5ad0 + go.mondoo.com/cnquery/v9/providers/k8s v0.0.0-00010101000000-000000000000 + go.mondoo.com/cnquery/v9/providers/terraform v0.0.0-00010101000000-000000000000 + go.mondoo.com/cnspec/v9 v9.0.0-00010101000000-000000000000 +) + +require ( + 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect + 4d63.com/gochecknoglobals v0.2.1 // indirect + cloud.google.com/go v0.110.8 // indirect + cloud.google.com/go/compute v1.23.1 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.3 // indirect + cloud.google.com/go/kms v1.15.3 // indirect + cloud.google.com/go/secretmanager v1.11.2 // indirect + cloud.google.com/go/storage v1.33.0 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/4meepo/tagalign v1.3.2 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect + github.com/Abirdcfly/dupword v0.0.13 // indirect + github.com/Antonboom/errname v0.1.12 // indirect + github.com/Antonboom/nilnil v0.1.7 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect + github.com/GoogleCloudPlatform/berglas v1.0.3 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect + github.com/StackExchange/wmi v1.2.1 // indirect + github.com/acomagu/bufpipe v1.0.4 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/alecthomas/participle v0.3.0 // indirect + github.com/alecthomas/participle/v2 v2.1.0 // indirect + github.com/alexkohler/nakedret/v2 v2.0.2 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/ashanbrown/forbidigo v1.6.0 // indirect + github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/atotto/clipboard v0.1.4 // indirect + github.com/aws/aws-sdk-go v1.45.26 // indirect + github.com/aws/aws-sdk-go-v2 v1.21.2 // indirect + github.com/aws/aws-sdk-go-v2/config v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.43 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.125.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect v1.17.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 // indirect + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssm v1.38.2 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect + github.com/aws/smithy-go v1.15.0 // indirect + github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231003182221-725682229e60 // indirect + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bkielbasa/cyclop v1.2.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v3 v3.4.0 // indirect + github.com/breml/bidichk v0.2.7 // indirect + github.com/breml/errchkjson v0.3.6 // indirect + github.com/butuzov/ireturn v0.2.0 // indirect + github.com/butuzov/mirror v1.1.0 // indirect + github.com/c-bata/go-prompt v0.2.6 // indirect + github.com/ccojocar/zxcvbn-go v1.0.1 // indirect + github.com/cenkalti/backoff/v3 v3.2.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/charithe/durationcheck v0.0.10 // indirect + github.com/charmbracelet/bubbles v0.16.1 // indirect + github.com/charmbracelet/bubbletea v0.24.2 // indirect + github.com/charmbracelet/harmonica v0.2.0 // indirect + github.com/charmbracelet/lipgloss v0.9.1 // indirect + github.com/chavacava/garif v0.1.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect + github.com/cockroachdb/errors v1.11.1 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect + github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/daixiang0/gci v0.11.2 // indirect + github.com/danieljoos/wincred v1.2.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/cli v24.0.6+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v24.0.6+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/esimonov/ifshort v1.0.4 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/firefart/nonamedreturns v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/getsentry/sentry-go v0.25.0 // indirect + github.com/glebarez/go-sqlite v1.21.2 // indirect + github.com/go-critic/go-critic v0.9.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-git/go-git/v5 v5.9.0 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.20.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.1.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/uuid v4.4.0+incompatible // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect + github.com/golangci/gofmt v0.0.0-20231004121655-933a45ce4cd8 // indirect + github.com/golangci/golangci-lint v1.54.2 // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.4.1 // indirect + github.com/golangci/revgrep v0.5.0 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-containerregistry v0.16.1 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-plugin v1.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hashicorp/go-rootcerts v1.0.2 // indirect + github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect + github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.5 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl/v2 v2.18.0 // indirect + github.com/hashicorp/vault/api v1.10.0 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/hnakamur/go-scp v1.0.2 // indirect + github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jgautheron/goconst v1.6.0 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/julz/importas v0.1.0 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/kisielk/errcheck v1.6.3 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.4 // indirect + github.com/klauspost/compress v1.17.1 // indirect + github.com/knqyf263/go-rpmdb v0.0.0-20231008124120-ac49267ab4e1 // indirect + github.com/kofalt/go-memoize v0.0.0-20220914132407-0b5d6a304579 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/kulti/thelper v0.6.3 // indirect + github.com/kunwardeep/paralleltest v1.0.8 // indirect + github.com/kyoh86/exportloopref v0.1.11 // indirect + github.com/ldez/gomoddirectives v0.2.3 // indirect + github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/leonklingele/grouper v1.1.1 // indirect + github.com/lithammer/fuzzysearch v1.1.8 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/lufeee/execinquery v1.2.1 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect + github.com/maratori/testpackage v1.1.1 // indirect + github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-localereader v0.0.1 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-tty v0.0.5 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/revive v1.3.4 // indirect + github.com/miekg/dns v1.1.56 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/moricho/tparallel v0.3.1 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect + github.com/muesli/cancelreader v0.2.2 // indirect + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.15.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nishanths/exhaustive v0.11.0 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect + github.com/nunnatsa/ginkgolinter v0.14.0 // indirect + github.com/oklog/run v1.1.0 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/patrickmn/go-cache v2.1.0+incompatible // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pkg/sftp v1.13.6 // indirect + github.com/pkg/term v1.2.0-beta.2 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/polyfloyd/go-errorlint v1.4.5 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/quasilyte/go-ruleguard v0.4.0 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/rivo/uniseg v0.4.4 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rs/zerolog v1.31.0 // indirect + github.com/ryancurrah/gomodguard v1.3.0 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect + github.com/ryanuber/go-glob v1.0.0 // indirect + github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sahilm/fuzzy v0.1.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect + github.com/securego/gosec/v2 v2.18.1 // indirect + github.com/segmentio/fasthash v1.0.3 // indirect + github.com/segmentio/ksuid v1.0.4 // indirect + github.com/sergi/go-diff v1.3.1 // indirect + github.com/sethvargo/go-password v0.2.0 // indirect + github.com/sethvargo/go-retry v0.2.4 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect + github.com/sivchari/nosnakecase v1.7.0 // indirect + github.com/sivchari/tenv v1.7.1 // indirect + github.com/skeema/knownhosts v1.2.1 // indirect + github.com/sonatard/noctx v0.0.2 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect + github.com/spf13/afero v1.10.0 // indirect + github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c // indirect + github.com/spf13/viper v1.17.0 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stretchr/objx v0.5.1 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect + github.com/tdakkota/asciicheck v0.2.0 // indirect + github.com/tetafro/godot v1.4.15 // indirect + github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect + github.com/timonwong/loggercheck v0.9.4 // indirect + github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect + github.com/ultraware/funlen v0.1.0 // indirect + github.com/ultraware/whitespace v0.0.5 // indirect + github.com/uudashr/gocognit v1.1.1 // indirect + github.com/vbatts/tar-split v0.11.5 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.2.0 // indirect + github.com/ykadowak/zerologlint v0.1.3 // indirect + github.com/zclconf/go-cty v1.14.0 // indirect + gitlab.com/bosi/decorder v0.4.1 // indirect + go.mondoo.com/ranger-rpc v0.5.2 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.tmz.dev/musttag v0.7.2 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.26.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/exp/typeparams v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/mod v0.13.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect + golang.org/x/text v0.13.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/api v0.147.0 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/square/go-jose.v2 v2.6.0 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.4.6 // indirect + howett.net/plist v1.0.0 // indirect + k8s.io/api v0.28.2 // indirect + k8s.io/apiextensions-apiserver v0.28.2 // indirect + k8s.io/apimachinery v0.28.2 // indirect + k8s.io/client-go v0.28.2 // indirect + k8s.io/component-base v0.28.2 // indirect + k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-openapi v0.0.0-20230928205116-a78145627833 // indirect + k8s.io/kubelet v0.28.2 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + modernc.org/libc v1.24.1 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.7.2 // indirect + modernc.org/sqlite v1.26.0 // indirect + moul.io/http2curl v1.0.0 // indirect + mvdan.cc/gofumpt v0.5.0 // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20230917202934-3ee2d22f45fb // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) diff --git a/test/go.sum b/test/go.sum new file mode 100644 index 000000000..bf5caad25 --- /dev/null +++ b/test/go.sum @@ -0,0 +1,1539 @@ +4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc= +cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= +cloud.google.com/go/kms v1.15.3 h1:RYsbxTRmk91ydKCzekI2YjryO4c5Y2M80Zwcs9/D/cI= +cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/secretmanager v1.11.2 h1:52Z78hH8NBWIqbvIG0wi0EoTaAmSx99KIOAmDXIlX0M= +cloud.google.com/go/secretmanager v1.11.2/go.mod h1:MQm4t3deoSub7+WNwiC4/tRYgDBHJgJPvswqQVB1Vss= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.33.0 h1:PVrDOkIC8qQVa1P3SXGpQvfuJhN2LHOoyZvWs8D2X5M= +cloud.google.com/go/storage v1.33.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/4meepo/tagalign v1.3.2 h1:1idD3yxlRGV18VjqtDbqYvQ5pXqQS0wO2dn6M3XstvI= +github.com/4meepo/tagalign v1.3.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo= +github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y= +github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= +github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= +github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow= +github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 h1:3ZBs7LAezy8gh0uECsA6CGU43FF3zsx5f4eah5FxTMA= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0/go.mod h1:rZLTje5A9kFBe0pzhpe2TdhRniBF++PRHQuRpR8esVc= +github.com/GoogleCloudPlatform/berglas v1.0.3 h1:NjJYDz13vWct7+joxkBkIZhD6Cmwf5XP5t0jGTvHyJk= +github.com/GoogleCloudPlatform/berglas v1.0.3/go.mod h1:JBsGyi6Z5RwyHXMdEebok6MChukLE+dWXzPor2aeMtw= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY= +github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ= +github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX7IL/m9Y5LO+KQYv+t1CQOiFe6+SV2J7bE= +github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= +github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/alecthomas/assert/v2 v2.3.0 h1:mAsH2wmvjsuvyBvAmCtm7zFsBlb8mIHx5ySLVdDZXL0= +github.com/alecthomas/assert/v2 v2.3.0/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/participle v0.3.0 h1:e8vhrYR1nDjzDxyDwpLO27TWOYWilaT+glkwbPadj50= +github.com/alecthomas/participle v0.3.0/go.mod h1:SW6HZGeZgSIpcUWX3fXpfZhuaWHnmoD5KCVaqSaNTkk= +github.com/alecthomas/participle/v2 v2.1.0 h1:z7dElHRrOEEq45F2TG5cbQihMtNTv8vwldytDj7Wrz4= +github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE= +github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= +github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= +github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= +github.com/aws/aws-sdk-go v1.45.26 h1:PJ2NJNY5N/yeobLYe1Y+xLdavBi67ZI8gvph6ftwVCg= +github.com/aws/aws-sdk-go v1.45.26/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= +github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= +github.com/aws/aws-sdk-go-v2/config v1.19.0 h1:AdzDvwH6dWuVARCl3RTLGRc4Ogy+N7yLFxVxXe1ClQ0= +github.com/aws/aws-sdk-go-v2/config v1.19.0/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= +github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8= +github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43 h1:nFBQlGtkbPzp/NjZLuFxRqmT91rLJkgvsEQs68h962Y= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7E0LDl8K9/G36gjlqca5iQbaqc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.125.0 h1:XnUTMYuPGOrQjlTcf9XoRYBLrN7xjzakkK0+QyP8fO0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.125.0/go.mod h1:raUdIDoNuDPn9dMG3cCmIm8RoWOmZUqQPzuw8xpmB8Y= +github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect v1.17.2 h1:bAONrTLzDpmF3udUQciVtDaeeE0aX+txzfjV37H8P4g= +github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect v1.17.2/go.mod h1:cBOWuMN9XoKfsroI4Om3t7Fh171LzZoWizZOSm0soU0= +github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 h1:y6LX9GUoEA3mO0qpFl1ZQHj1rFyPWVphlzebiSt2tKE= +github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2/go.mod h1:Q0LcmaN/Qr8+4aSBrdrXXePqoX0eOuYpJLbYpilmWnA= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4/jyq3Z8gNzmoJupHAoBp0= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.21.5 h1:BvRGAAdEHo+0tpyOlKV14Z49O/iyhqiddIntd0KQ3EA= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.21.5/go.mod h1:A108ijf0IFtqhYApU+Gia80aPSAUfi9dItm+h5fWGJE= +github.com/aws/aws-sdk-go-v2/service/ssm v1.38.2 h1:NMZiW2pbSW/PFCGT/J6R/8xaiFsF/SDdRN49q0NUhA8= +github.com/aws/aws-sdk-go-v2/service/ssm v1.38.2/go.mod h1:qpnJ98BgJ3YUEvHMgJ1OADwaOgqhgv0nxnqAjTKupeY= +github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k= +github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= +github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU= +github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= +github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8= +github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231003182221-725682229e60 h1:ONd54l3oubhjMPcj7HpjPWvlFI6WXsu0/W7DsKCPI9w= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231003182221-725682229e60/go.mod h1:eSn65Noe23f/Z7A2ESqw3dbhAFSEyzZf38nXcKVNxtE= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= +github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= +github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4= +github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= +github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/c-bata/go-prompt v0.2.6 h1:POP+nrHE+DfLYx370bedwNhsqmpCUynWPxuHi0C5vZI= +github.com/c-bata/go-prompt v0.2.6/go.mod h1:/LMAke8wD2FsNu9EXNdHxNLbd9MedkPnCdfpU9wwHfY= +github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4= +github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/charmbracelet/bubbles v0.16.1 h1:6uzpAAaT9ZqKssntbvZMlksWHruQLNxg49H5WdeuYSY= +github.com/charmbracelet/bubbles v0.16.1/go.mod h1:2QCp9LFlEsBQMvIYERr7Ww2H2bA7xen1idUDIzm/+Xc= +github.com/charmbracelet/bubbletea v0.24.2 h1:uaQIKx9Ai6Gdh5zpTbGiWpytMU+CfsPp06RaW2cx/SY= +github.com/charmbracelet/bubbletea v0.24.2/go.mod h1:XdrNrV4J8GiyshTtx3DNuYkR1FDaJmO3l2nejekbsgg= +github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ= +github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao= +github.com/charmbracelet/lipgloss v0.9.1 h1:PNyd3jvaJbg4jRHKWXnCj1akQm4rh8dbEzN1p/u1KWg= +github.com/charmbracelet/lipgloss v0.9.1/go.mod h1:1mPmG4cxScwUQALAAnacHaigiiHB9Pmr+v1VEawJl6I= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= +github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 h1:q2hJAaP1k2wIvVRd/hEHD7lacgqrCPS+k8g1MndzfWY= +github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= +github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= +github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= +github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y= +github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= +github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= +github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v24.0.6+incompatible h1:fF+XCQCgJjjQNIMjzaSmiKJSCcfcXb3TWTcc7GAneOY= +github.com/docker/cli v24.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= +github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= +github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI= +github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= +github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= +github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U= +github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40= +github.com/go-errors/errors v1.5.0 h1:/EuijeGOu7ckFxzhkj4CXJ8JaenxK7bKUxpPYqeLHqQ= +github.com/go-errors/errors v1.5.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo= +github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= +github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= +github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= +github.com/golangci/gofmt v0.0.0-20231004121655-933a45ce4cd8 h1:Ul3cKQpxtg3mOShYi9tLQmY3c9z4i+yzAWP3Q5SPLss= +github.com/golangci/gofmt v0.0.0-20231004121655-933a45ce4cd8/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= +github.com/golangci/golangci-lint v1.54.2 h1:oR9zxfWYxt7hFqk6+fw6Enr+E7F0SN2nqHhJYyIb0yo= +github.com/golangci/golangci-lint v1.54.2/go.mod h1:vnsaCTPKCI2wreL9tv7RkHDwUrz3htLjed6+6UsvcwU= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= +github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= +github.com/golangci/revgrep v0.5.0 h1:GGBqHFtFOeHiSUQtFVZXPJtVZYOGB4iVlAjaoFRBQvY= +github.com/golangci/revgrep v0.5.0/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= +github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8= +github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= +github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs= +github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= +github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= +github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl/v2 v2.18.0 h1:wYnG7Lt31t2zYkcquwgKo6MWXzRUDIeIVU5naZwHLl8= +github.com/hashicorp/hcl/v2 v2.18.0/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ= +github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hnakamur/go-scp v1.0.2 h1:i2I0O0pjAaX4BXJFrp1blsIdjOBekc5QOaB0AbdO1d0= +github.com/hnakamur/go-scp v1.0.2/go.mod h1:Dh9GtPFBkiDI1KY1nmf+W7eVCWWmRjJitkCYgvWv+Zc= +github.com/hnakamur/go-sshd v0.0.0-20170228152141-dccc3399d26a h1:p8dbHRhXhPSwVZqk76FguLzyeCZuvCqFlaYSqXOzbyI= +github.com/hnakamur/go-sshd v0.0.0-20170228152141-dccc3399d26a/go.mod h1:R+6I3EdoV6ofbNqJsArhT9+Pnu57DxtmDJAQfxkCbGo= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA= +github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a h1:sfe532Ipn7GX0V6mHdynBk393rDmqgI0QmjLK7ct7TU= +github.com/joncrlsn/dque v0.0.0-20211108142734-c2ef48c5192a/go.mod h1:dNKs71rs2VJGBAmttu7fouEsRQlRjxy0p1Sx+T5wbpY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= +github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= +github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= +github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= +github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/knqyf263/go-rpmdb v0.0.0-20231008124120-ac49267ab4e1 h1:lrciwn7tj0j7HS5DfpAFnFZEqxzPGIkVOVS89dLOkf0= +github.com/knqyf263/go-rpmdb v0.0.0-20231008124120-ac49267ab4e1/go.mod h1:9LQcoMCMQ9vrF7HcDtXfvqGO4+ddxFQ8+YF/0CVGDww= +github.com/kofalt/go-memoize v0.0.0-20220914132407-0b5d6a304579 h1:RbY+urZu3ri7Medi8pY3ovt1+XQxxv7zSkgmEZ5E0CU= +github.com/kofalt/go-memoize v0.0.0-20220914132407-0b5d6a304579/go.mod h1:PifxINf6wYU0USPBk0z1Z8Pka1AqeyCJAp9ecCcNL5Q= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.8 h1:Ul2KsqtzFxTlSU7IP0JusWlLiNqQaloB9vguyjbE558= +github.com/kunwardeep/paralleltest v1.0.8/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= +github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= +github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= +github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= +github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8LFgLN4= +github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= +github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-tty v0.0.3/go.mod h1:ihxohKRERHTVzN+aSVRwACLCeqIoZAWpoICkkvrWyR0= +github.com/mattn/go-tty v0.0.5 h1:s09uXI7yDbXzzTTfw3zonKFzwGkyYlgU3OMjqA0ddz4= +github.com/mattn/go-tty v0.0.5/go.mod h1:u5GGXBtZU6RQoKV8gY5W6UhMudbR5vXnUe7j3pxse28= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc= +github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= +github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= +github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= +github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= +github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0= +github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.14.0 h1:XQPNmw+kZz5cC/HbFK3mQutpjzAQv1dHregRA+4CGGg= +github.com/nunnatsa/ginkgolinter v0.14.0/go.mod h1:cm2xaqCUCRd7qcP4DqbVvpcyEMkuLM9CF0wY6VASohk= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo/v2 v2.12.1 h1:uHNEO1RP2SpuZApSkel9nEh1/Mu+hmQe7Q+Pepg5OYA= +github.com/onsi/ginkgo/v2 v2.12.1/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/pkg/term v1.2.0-beta.2 h1:L3y/h2jkuBVFdWiJvNfYfKmzcCnILw7mJWm2JQuMppw= +github.com/pkg/term v1.2.0-beta.2/go.mod h1:E25nymQcrSllhX42Ok8MRm1+hyBdHY0dCeiKZ9jpNGw= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v1.4.5 h1:70YWmMy4FgRHehGNOUask3HtSFSOLKgmDn7ryNe7LqI= +github.com/polyfloyd/go-errorlint v1.4.5/go.mod h1:sIZEbFoDOCnTYYZoVkjc4hTnM459tuWA9H/EkdXwsKk= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo= +github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= +github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.31.0 h1:FcTR3NnLWW+NnTwwhFWiJSZr4ECLpqCm6QsEnyvbV4A= +github.com/rs/zerolog v1.31.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= +github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= +github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sahilm/fuzzy v0.1.0 h1:FzWGaw2Opqyu+794ZQ9SYifWv2EIXpwP4q8dY1kDAwI= +github.com/sahilm/fuzzy v0.1.0/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= +github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.24.0 h1:MKNzmXtGh5N0y74Z/CIaJh4GlB364l0K1RUT08WSWAc= +github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSCnTVUC1OQP/bSiiBhq3OZE= +github.com/securego/gosec/v2 v2.18.1 h1:xnnehWg7dIW8qrRPGm8ykY21zp2MueKyC99Vlcuj96I= +github.com/securego/gosec/v2 v2.18.1/go.mod h1:ZUTcKD9gAFip1lLGHWCjkoBQJyaEzePTNzjwlL2HHoE= +github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= +github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= +github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= +github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= +github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= +github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/smarty/assertions v1.15.1 h1:812oFiXI+G55vxsFf+8bIZ1ux30qtkdqzKbEFwyX3Tk= +github.com/smarty/assertions v1.15.1/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/smartystreets/gunit v1.4.2 h1:tyWYZffdPhQPfK5VsMQXfauwnJkqg7Tv5DLuQVYxq3Q= +github.com/smartystreets/gunit v1.4.2/go.mod h1:ZjM1ozSIMJlAz/ay4SG8PeKF00ckUp+zMHZXV9/bvak= +github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c h1:zqmyTlQyufRC65JnImJ6H1Sf7BDj8bG31EV919NVEQc= +github.com/spf13/pflag v1.0.6-0.20201009195203-85dd5c8bc61c/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= +github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.1 h1:4VhoImhV/Bm0ToFkXFi8hXNXwpDRZ/ynw3amt82mzq0= +github.com/stretchr/objx v0.5.1/go.mod h1:/iHQpkQwBD6DLUmQ4pE+s1TXdob1mORJ4/UFdrifcy0= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= +github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.15 h1:QzdIs+XB8q+U1WmQEWKHQbKmCw06QuQM7gLx/dky2RM= +github.com/tetafro/godot v1.4.15/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= +github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= +github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= +github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/uudashr/gocognit v1.1.1 h1:qIj6KhmcGQGBiWtaKH6ZlIyDGa6br2febZNZ6MDzqMw= +github.com/uudashr/gocognit v1.1.1/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= +github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= +github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= +github.com/ykadowak/zerologlint v0.1.3 h1:TLy1dTW3Nuc+YE3bYRPToG1Q9Ej78b5UUN6bjbGdxPE= +github.com/ykadowak/zerologlint v0.1.3/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zclconf/go-cty v1.14.0 h1:/Xrd39K7DXbHzlisFP9c4pHao4yyf+/Ug9LEz+Y/yhc= +github.com/zclconf/go-cty v1.14.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= +gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= +go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E= +go-simpler.org/assert v0.6.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go.mondoo.com/cnquery/v9 v9.2.4-0.20231020125424-f2f6ce5a5ad0 h1:dQTbrsfIl6sVeY4PKPE7w41iX63TvvWh3dvUIMoqYeY= +go.mondoo.com/cnquery/v9 v9.2.4-0.20231020125424-f2f6ce5a5ad0/go.mod h1:lBzCOIonmKRewB+o2trn7R6r4exONd9V6UJM600tHds= +go.mondoo.com/ranger-rpc v0.5.2 h1:UrcVtMIinzfWsuSzZKibbMqcGZSARInKJi0Xs2AxXeU= +go.mondoo.com/ranger-rpc v0.5.2/go.mod h1:y5qqdFhOnSLKnAo8lXC0FKZoKLUYgLENvneww+q+7ws= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= +go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s= +go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20231006140011-7918f672742d h1:NRn/Afz91uVUyEsxMp4lGGxpr5y1qz+Iko60dbkfvLQ= +golang.org/x/exp/typeparams v0.0.0-20231006140011-7918f672742d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828081204-131dc92a58d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= +golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.147.0 h1:Can3FaQo9LlVqxJCodNmeZW/ib3/qKAY3rFeXiHo5gc= +google.golang.org/api v0.147.0/go.mod h1:pQ/9j83DcmPd/5C9e2nFOdjjNkDZ1G+zkbK2uvdkJMs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= +gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= +honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= +howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= +howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU= +k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= +k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= +k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230928205116-a78145627833 h1:iFFEmmB7szQhJP42AvRD2+gzdVP7EuIKY1rJgxf0JZY= +k8s.io/kube-openapi v0.0.0-20230928205116-a78145627833/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubelet v0.28.2 h1:wqe5zKtVhNWwtdABU0mpcWVe8hc6VdVvs2kqQridZRw= +k8s.io/kubelet v0.28.2/go.mod h1:rvd0e7T5TjPcfZvy62P90XhFzp0IhPIOy+Pqy3Rtipo= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +modernc.org/libc v1.24.1 h1:uvJSeCKL/AgzBo2yYIPPTy82v21KgGnizcGYfBHaNuM= +modernc.org/libc v1.24.1/go.mod h1:FmfO1RLrU3MHJfyi9eYYmZBfi/R+tqZ6+hQ3yQQUkak= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E= +modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E= +modernc.org/sqlite v1.26.0 h1:SocQdLRSYlA8W99V8YH0NES75thx19d9sB/aFc4R8Lw= +modernc.org/sqlite v1.26.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU= +moul.io/http2curl v1.0.0 h1:6XwpyZOYsgZJrU8exnG87ncVkU1FVCcTRpwzOkTDUi8= +moul.io/http2curl v1.0.0/go.mod h1:f6cULg+e4Md/oW1cYmwW4IWQOVl2lGbmCNGOHvzX2kE= +mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= +mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20230917202934-3ee2d22f45fb h1:xiF91GJnDSbyPdiZB5d52N2VpZfGhjM4Ji75cjzuooQ= +mvdan.cc/unparam v0.0.0-20230917202934-3ee2d22f45fb/go.mod h1:ZzZjEpJDOmx8TdVU6umamY3Xy0UAQUI2DHbf05USVbI= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= +sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/test/k8s_test.go b/test/k8s_test.go new file mode 100644 index 000000000..661f1472b --- /dev/null +++ b/test/k8s_test.go @@ -0,0 +1,61 @@ +// Copyright (c) Mondoo, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mondoo.com/cnquery/v9/providers" + "go.mondoo.com/cnquery/v9/providers-sdk/v1/inventory" + "go.mondoo.com/cnquery/v9/providers/k8s/connection/shared" +) + +func TestKubernetesBundles(t *testing.T) { + providers.EnsureProvider(providers.ProviderLookup{ID: "go.mondoo.com/cnquery/v9/providers/k8s"}, true, nil) + + type TestCase struct { + bundleFile string + testDir string + policyMrn string + score uint32 + } + + tests := []TestCase{ + { + bundleFile: "./testdata/mondoo-kubernetes-security.mql.yaml", + testDir: "./testdata/k8s/pass/pod.yaml", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-kubernetes-security", + score: 100, + }, + { + bundleFile: "./testdata/mondoo-kubernetes-security.mql.yaml", + testDir: "./testdata/k8s/fail/pod-nonroot.yaml", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-kubernetes-security", + score: 0x0, + }, + } + + for i := range tests { + test := tests[i] + t.Run(test.testDir, func(t *testing.T) { + report, err := runBundle(test.bundleFile, test.policyMrn, &inventory.Asset{ + Connections: []*inventory.Config{{ + Type: "k8s", + Options: map[string]string{ + shared.OPTION_MANIFEST: test.testDir, + }, + Discover: &inventory.Discovery{ + Targets: []string{"pods"}, // ignore the manifest which does not return anything + }, + }}, + }) + require.NoError(t, err) + + score := report.Scores[test.policyMrn] + assert.Equal(t, test.score, score.Value) + }) + } +} diff --git a/test/terraform_test.go b/test/terraform_test.go new file mode 100644 index 000000000..c557f4689 --- /dev/null +++ b/test/terraform_test.go @@ -0,0 +1,80 @@ +// Copyright (c) Mondoo, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +package test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mondoo.com/cnquery/v9/providers" + "go.mondoo.com/cnquery/v9/providers-sdk/v1/inventory" +) + +func TestTerraformBundles(t *testing.T) { + providers.EnsureProvider(providers.ProviderLookup{ID: "go.mondoo.com/cnquery/v9/providers/terraform"}, true, nil) + type TestCase struct { + bundleFile string + testDir string + policyMrn string + score uint32 + } + + tests := []TestCase{ + { + bundleFile: "./testdata/mondoo-terraform-aws-security.mql.yaml", + testDir: "./testdata/terraform/aws-3.xx/pass", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-aws-security", + score: 100, + }, { + bundleFile: "./testdata/mondoo-terraform-aws-security.mql.yaml", + testDir: "./testdata/terraform/aws-3.xx/fail", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-aws-security", + // NOTE: terraform-aws-security-s3-bucket-level-public-access-prohibited is not correctly implemented but needs pay the piper. + // 3/28/2022 - Tests are passing now but not for the right reasons. We still need to revisit this query since it involves testing + // whether configuration was applied to a specific bucket. + score: 0, + }, { + bundleFile: "./testdata/mondoo-terraform-aws-security.mql.yaml", + testDir: "./testdata/terraform/aws-4.xx/pass", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-aws-security", + score: 100, + }, { + bundleFile: "./testdata/mondoo-terraform-aws-security.mql.yaml", + testDir: "./testdata/terraform/aws-4.xx/fail", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-aws-security", + score: 0, + }, { + bundleFile: "./testdata/mondoo-terraform-gcp-security.mql.yaml", + testDir: "./testdata/terraform/gcp/pass", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-gcp-security", + score: 100, + }, { + bundleFile: "./testdata/mondoo-terraform-gcp-security.mql.yaml", + testDir: "./testdata/terraform/gcp/fail", + policyMrn: "//policy.api.mondoo.app/policies/mondoo-terraform-gcp-security", + score: 0, + }, + } + + for i := range tests { + test := tests[i] + t.Run(test.testDir, func(t *testing.T) { + report, err := runBundle(test.bundleFile, test.policyMrn, &inventory.Asset{ + Connections: []*inventory.Config{ + { + Type: "terraform-hcl", + Options: map[string]string{ + "path": test.testDir, + }, + }, + }, + }) + require.NoError(t, err) + + score := report.Scores[test.policyMrn] + assert.Equal(t, test.score, score.Value) + }) + } +} diff --git a/test/testdata/mondoo-kubernetes-security.mql.yaml b/test/testdata/mondoo-kubernetes-security.mql.yaml new file mode 100644 index 000000000..cea4a7b1c --- /dev/null +++ b/test/testdata/mondoo-kubernetes-security.mql.yaml @@ -0,0 +1,6454 @@ +# Copyright (c) Mondoo, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +policies: + - uid: mondoo-kubernetes-security + name: Kubernetes Cluster and Workload Security + version: 1.1.0 + license: BUSL-1.1 + tags: + mondoo.com/category: security + mondoo.com/platform: linux,kubernetes,k8s + authors: + - name: Mondoo, Inc + email: hello@mondoo.com + docs: + desc: |- + # Overview + + The Kubernetes Cluster and Workload Security by Mondoo provides guidance for establishing secure Kubernetes cluster configurations and workload deployments. + + If you have questions, comments, or have identified ways to improve this policy, please write us at hello@mondoo.com, or reach out in [GitHub Discussions](https://github.com/orgs/mondoohq/discussions). + + ## Remote scan + + Remote scans use native transports in cnspec to provide on demand scan results without the need to install any agents, or integration. + + For a complete list of native transports run: + + ```bash + cnspec scan --help + ``` + + ### Prerequisites + + Remote scans of Kubernetes clusters requires a `KUBECONFIG` with access to the cluster you want to scan. + + ### Scan a Kubernetes cluster + + Open a terminal and configure an environment variable with the path to your `KUBECONFIG`: + + ```bash + export KUBECONFIG=/path/to/kubeconfig + ``` + + Run a scan of the Kubernetes cluster: + + ```bash + cnspec scan k8s + ``` + + ## Join the community! + + Our goal is to build policies that are simple to deploy, accurate, and actionable. + + If you have any suggestions for how to improve this policy, or if you need support, [join the community](https://github.com/orgs/mondoohq/discussions) in GitHub Discussions. + groups: + - title: Kubernetes API Server + filters: | + asset.family.contains(_ == 'linux') + processes.where( executable == /kube-apiserver/ ).list != [] + checks: + - uid: mondoo-kubernetes-security-api-server-no-anonymous-auth + - uid: mondoo-kubernetes-security-https-api-server + - uid: mondoo-kubernetes-security-secure-admin-conf + - uid: mondoo-kubernetes-security-secure-controller-manager_conf + - uid: mondoo-kubernetes-security-secure-etcd-data-dir + - uid: mondoo-kubernetes-security-secure-kube-apiserver-yml + - uid: mondoo-kubernetes-security-secure-pki-directory + - uid: mondoo-kubernetes-security-secure-scheduler_conf + - title: Kubernetes kubelet + filters: | + asset.family.contains(_ == 'linux') + processes.where( executable == /kubelet/ ).list != [] + checks: + - uid: mondoo-kubernetes-security-kubelet-anonymous-authentication + - uid: mondoo-kubernetes-security-kubelet-authorization-mode + - uid: mondoo-kubernetes-security-kubelet-event-record-qps + - uid: mondoo-kubernetes-security-kubelet-iptables-util-chains + - uid: mondoo-kubernetes-security-kubelet-protect-kernel-defaults + - uid: mondoo-kubernetes-security-kubelet-read-only-port + - uid: mondoo-kubernetes-security-kubelet-rotate-certificates + - uid: mondoo-kubernetes-security-kubelet-strong-ciphers + - uid: mondoo-kubernetes-security-kubelet-tls-certificate + - uid: mondoo-kubernetes-security-secure-kubelet-cert-authorities + - uid: mondoo-kubernetes-security-secure-kubelet-config + - title: Kubernetes CronJobs Security + filters: asset.platform == "k8s-cronjob" + checks: + - uid: mondoo-kubernetes-security-cronjob-allowprivilegeescalation + - uid: mondoo-kubernetes-security-cronjob-capability-net-raw + - uid: mondoo-kubernetes-security-cronjob-capability-sys-admin + - uid: mondoo-kubernetes-security-cronjob-containerd-socket + - uid: mondoo-kubernetes-security-cronjob-crio-socket + - uid: mondoo-kubernetes-security-cronjob-docker-socket + - uid: mondoo-kubernetes-security-cronjob-hostipc + - uid: mondoo-kubernetes-security-cronjob-hostnetwork + - uid: mondoo-kubernetes-security-cronjob-hostpath-readonly + - uid: mondoo-kubernetes-security-cronjob-hostpid + - uid: mondoo-kubernetes-security-cronjob-imagepull + - uid: mondoo-kubernetes-security-cronjob-limitcpu + - uid: mondoo-kubernetes-security-cronjob-limitmemory + - uid: mondoo-kubernetes-security-cronjob-ports-hostport + - uid: mondoo-kubernetes-security-cronjob-privilegedcontainer + - uid: mondoo-kubernetes-security-cronjob-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-cronjob-runasnonroot + - uid: mondoo-kubernetes-security-cronjob-serviceaccount + - title: Kubernetes StatefulSets Security + filters: asset.platform == "k8s-statefulset" + checks: + - uid: mondoo-kubernetes-security-statefulset-allowprivilegeescalation + - uid: mondoo-kubernetes-security-statefulset-capability-net-raw + - uid: mondoo-kubernetes-security-statefulset-capability-sys-admin + - uid: mondoo-kubernetes-security-statefulset-containerd-socket + - uid: mondoo-kubernetes-security-statefulset-crio-socket + - uid: mondoo-kubernetes-security-statefulset-docker-socket + - uid: mondoo-kubernetes-security-statefulset-hostipc + - uid: mondoo-kubernetes-security-statefulset-hostnetwork + - uid: mondoo-kubernetes-security-statefulset-hostpath-readonly + - uid: mondoo-kubernetes-security-statefulset-hostpid + - uid: mondoo-kubernetes-security-statefulset-imagepull + - uid: mondoo-kubernetes-security-statefulset-limitcpu + - uid: mondoo-kubernetes-security-statefulset-limitmemory + - uid: mondoo-kubernetes-security-statefulset-ports-hostport + - uid: mondoo-kubernetes-security-statefulset-privilegedcontainer + - uid: mondoo-kubernetes-security-statefulset-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-statefulset-runasnonroot + - uid: mondoo-kubernetes-security-statefulset-serviceaccount + - title: Kubernetes Deployments Security + filters: asset.platform == "k8s-deployment" + checks: + - uid: mondoo-kubernetes-security-deployment-allowprivilegeescalation + - uid: mondoo-kubernetes-security-deployment-capability-net-raw + - uid: mondoo-kubernetes-security-deployment-capability-sys-admin + - uid: mondoo-kubernetes-security-deployment-containerd-socket + - uid: mondoo-kubernetes-security-deployment-crio-socket + - uid: mondoo-kubernetes-security-deployment-docker-socket + - uid: mondoo-kubernetes-security-deployment-hostipc + - uid: mondoo-kubernetes-security-deployment-hostnetwork + - uid: mondoo-kubernetes-security-deployment-hostpath-readonly + - uid: mondoo-kubernetes-security-deployment-hostpid + - uid: mondoo-kubernetes-security-deployment-imagepull + - uid: mondoo-kubernetes-security-deployment-k8s-dashboard + - uid: mondoo-kubernetes-security-deployment-limitcpu + - uid: mondoo-kubernetes-security-deployment-limitmemory + - uid: mondoo-kubernetes-security-deployment-ports-hostport + - uid: mondoo-kubernetes-security-deployment-privilegedcontainer + - uid: mondoo-kubernetes-security-deployment-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-deployment-runasnonroot + - uid: mondoo-kubernetes-security-deployment-serviceaccount + - uid: mondoo-kubernetes-security-deployment-tiller + - title: Kubernetes Jobs Security + filters: asset.platform == "k8s-job" + checks: + - uid: mondoo-kubernetes-security-job-allowprivilegeescalation + - uid: mondoo-kubernetes-security-job-capability-net-raw + - uid: mondoo-kubernetes-security-job-capability-sys-admin + - uid: mondoo-kubernetes-security-job-containerd-socket + - uid: mondoo-kubernetes-security-job-crio-socket + - uid: mondoo-kubernetes-security-job-docker-socket + - uid: mondoo-kubernetes-security-job-hostipc + - uid: mondoo-kubernetes-security-job-hostnetwork + - uid: mondoo-kubernetes-security-job-hostpath-readonly + - uid: mondoo-kubernetes-security-job-hostpid + - uid: mondoo-kubernetes-security-job-imagepull + - uid: mondoo-kubernetes-security-job-limitcpu + - uid: mondoo-kubernetes-security-job-limitmemory + - uid: mondoo-kubernetes-security-job-ports-hostport + - uid: mondoo-kubernetes-security-job-privilegedcontainer + - uid: mondoo-kubernetes-security-job-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-job-runasnonroot + - uid: mondoo-kubernetes-security-job-serviceaccount + - title: Kubernetes ReplicaSets Security + filters: asset.platform == "k8s-replicaset" + checks: + - uid: mondoo-kubernetes-security-replicaset-allowprivilegeescalation + - uid: mondoo-kubernetes-security-replicaset-capability-net-raw + - uid: mondoo-kubernetes-security-replicaset-capability-sys-admin + - uid: mondoo-kubernetes-security-replicaset-containerd-socket + - uid: mondoo-kubernetes-security-replicaset-crio-socket + - uid: mondoo-kubernetes-security-replicaset-docker-socket + - uid: mondoo-kubernetes-security-replicaset-hostipc + - uid: mondoo-kubernetes-security-replicaset-hostnetwork + - uid: mondoo-kubernetes-security-replicaset-hostpath-readonly + - uid: mondoo-kubernetes-security-replicaset-hostpid + - uid: mondoo-kubernetes-security-replicaset-imagepull + - uid: mondoo-kubernetes-security-replicaset-limitcpu + - uid: mondoo-kubernetes-security-replicaset-limitmemory + - uid: mondoo-kubernetes-security-replicaset-ports-hostport + - uid: mondoo-kubernetes-security-replicaset-privilegedcontainer + - uid: mondoo-kubernetes-security-replicaset-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-replicaset-runasnonroot + - uid: mondoo-kubernetes-security-replicaset-serviceaccount + - title: Kubernetes DaemonSets Security + filters: asset.platform == "k8s-daemonset" + checks: + - uid: mondoo-kubernetes-security-daemonset-allowprivilegeescalation + - uid: mondoo-kubernetes-security-daemonset-capability-net-raw + - uid: mondoo-kubernetes-security-daemonset-capability-sys-admin + - uid: mondoo-kubernetes-security-daemonset-containerd-socket + - uid: mondoo-kubernetes-security-daemonset-crio-socket + - uid: mondoo-kubernetes-security-daemonset-docker-socket + - uid: mondoo-kubernetes-security-daemonset-hostipc + - uid: mondoo-kubernetes-security-daemonset-hostnetwork + - uid: mondoo-kubernetes-security-daemonset-hostpath-readonly + - uid: mondoo-kubernetes-security-daemonset-hostpid + - uid: mondoo-kubernetes-security-daemonset-imagepull + - uid: mondoo-kubernetes-security-daemonset-limitcpu + - uid: mondoo-kubernetes-security-daemonset-limitmemory + - uid: mondoo-kubernetes-security-daemonset-ports-hostport + - uid: mondoo-kubernetes-security-daemonset-privilegedcontainer + - uid: mondoo-kubernetes-security-daemonset-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-daemonset-runasnonroot + - uid: mondoo-kubernetes-security-daemonset-serviceaccount + - title: Kubernetes Pods Security + filters: asset.platform == "k8s-pod" + checks: + - uid: mondoo-kubernetes-security-pod-allowprivilegeescalation + - uid: mondoo-kubernetes-security-pod-capability-net-raw + - uid: mondoo-kubernetes-security-pod-capability-sys-admin + - uid: mondoo-kubernetes-security-pod-containerd-socket + - uid: mondoo-kubernetes-security-pod-crio-socket + - uid: mondoo-kubernetes-security-pod-docker-socket + - uid: mondoo-kubernetes-security-pod-hostipc + - uid: mondoo-kubernetes-security-pod-hostnetwork + - uid: mondoo-kubernetes-security-pod-hostpath-readonly + - uid: mondoo-kubernetes-security-pod-hostpid + - uid: mondoo-kubernetes-security-pod-imagepull + - uid: mondoo-kubernetes-security-pod-k8s-dashboard + - uid: mondoo-kubernetes-security-pod-limitcpu + - uid: mondoo-kubernetes-security-pod-limitmemory + - uid: mondoo-kubernetes-security-pod-ports-hostport + - uid: mondoo-kubernetes-security-pod-privilegedcontainer + - uid: mondoo-kubernetes-security-pod-readonlyrootfilesystem + - uid: mondoo-kubernetes-security-pod-runasnonroot + - uid: mondoo-kubernetes-security-pod-serviceaccount + - uid: mondoo-kubernetes-security-pod-tiller + scoring_system: 2 +props: + - uid: allowedCiphers + title: Define the hardened SSL/ TLS ciphers + mql: | + return ["TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_GCM_SHA384"] +queries: + - uid: mondoo-kubernetes-security-kubelet-anonymous-authentication + title: Disable anonymous authentication for kubelet + impact: 100 + mql: | + k8s.kubelet.configuration['authentication']['anonymous']['enabled'] == false + docs: + desc: | + Ensure that the kubelet is configured to disable anonymous requests to the kubelet server. + Otherwise the kubelet will allow unauthenticated access to its HTTPS endpoint. Request will have the privileges of the role `system:public-info-viewer`. This might expose data to an attacker. + audit: | + If running the kubelet with the CLI parameter '--anonymous-auth', or running with 'authentication.anonymous.enabled' defined in the kubelet configuration file, ensure that the value is set to 'false'. + remediation: | + Set the '--anonymous-auth' CLI parameter and/or the 'authentication.anonymous.enabled' field in the kubelet configuration file to 'false'. + refs: + - url: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication + title: Kubelet authentication + - uid: mondoo-kubernetes-security-kubelet-event-record-qps + title: Configure kubelet to capture all event creation + impact: 30 + mql: | + k8s.kubelet.configuration['eventRecordQPS'] == 0 + docs: + desc: | + Ensure that the kubelet is configured to capture all event creation so as to avoid potentially not logging important events. + Be aware that this might expose your Cluster to a DoS risk. + audit: | + If running the kubelet with the CLI parameter '--event-qps', or running with 'eventRecordQPS' defined in the kubelet configuration file, ensure that the value is set to '0'. + remediation: | + Set the '--event-qps' CLI parameter and/or the 'eventRecordQPS' field in the kubelet configuration file to '0'. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-iptables-util-chains + title: Configure kubelet to ensure IPTables rules are set on host + impact: 30 + mql: | + k8s.kubelet.configuration['makeIPTablesUtilChains'] == true + docs: + desc: | + Ensure that the kubelet is set up to create IPTable utility rules for various kubernetes components. + audit: | + If running the kubelet with the CLI parameter '--make-iptables-util-chains', or running with 'makeIPTablesUtilChains' defined in the kubelet configuration file, ensure that the value is set to 'true'. + remediation: | + Set the '--make-iptables-util-chains' CLI parameter and/or the 'makeIPTablesUtilChains' field in the kubelet configuration file to 'true'. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-protect-kernel-defaults + title: Configure kubelet to protect kernel defaults + impact: 60 + mql: | + k8s.kubelet.configuration["protectKernelDefaults"] == "true" + docs: + desc: | + Ensure that the kubelet is set up to error if the underlying kernel tunables are different than the kubelet defaults. By default the kubelet will attempt to modify the kernel as the kubelet starts up. + audit: | + If running the kubelet with the CLI parameter '--protect-kernel-defaults', or running with 'protectKernelDefaults' defined in the kubelet configuration file, ensure that the value is set to 'true'. + remediation: | + Set the '--protect-kernel-defaults' CLI parameter and/or the 'protectKernelDefaults' field in the kubelet configuration file to 'true'. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-read-only-port + title: Do not allow unauthenticated read-only port on kubelet + impact: 60 + mql: | + k8s.kubelet.configuration['readOnlyPort'] == 0 || k8s.kubelet.configuration['readOnlyPort'] == null + docs: + desc: | + Ensure the kubelet is not configured to serve up unauthenticated read-only access. + This would expose data to unauthenticated users. + audit: | + If running the kubelet with the CLI parameter '--read-only-port', or running with 'readOnlyPort' defined in the kubelet configuration file, ensure that the value is either '0' or simply not set ('0' is the default). + remediation: | + Set the '--read-only-port' CLI parameter or the 'readOnlyPort' field in the kubelet configuration file to '0'. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-authorization-mode + title: Ensure the kubelet is not configured with the AlwaysAllow authorization mode + impact: 100 + mql: | + k8s.kubelet.configuration['authorization']['mode'] != "AlwaysAllow" + docs: + desc: | + Ensure the kubelet is not configured with the AlwaysAllow authorization mode. + It would allow all requests. + audit: | + If running the kubelet with the CLI parameter '--authorization-mode', or running with 'authorization.mode' defined in the kubelet configuration file, ensure that the value is not set to 'AlwaysAllow'. + remediation: | + If the kubelet is configured with the CLI parameter '--authorization-mode', set it to something that isn't 'AlwaysAllow' (eg 'Webhook'). + + If the kubelet is configured via the kubelet config file with the 'authorization.mode' parameter, set it to something that isn't 'AlwaysAllow' (eg. 'Webhook'). + refs: + - url: https://kubernetes.io/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authorization + title: Kubelet authorization + - uid: mondoo-kubernetes-security-kubelet-strong-ciphers + title: Configure kubelet to use only strong cryptography + impact: 100 + props: + - uid: allowedCiphers + title: Define the hardened SSL/ TLS ciphers + mql: | + return ["TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_GCM_SHA384"] + mql: | + k8s.kubelet.configuration['tlsCipherSuites'] != null + if (k8s.kubelet.configuration['tlsCipherSuites'] != null) { + k8s.kubelet.configuration['tlsCipherSuites'].map( _.trim ).containsOnly(props.allowedCiphers) + } + docs: + desc: | + Ensure the kubelet runs with only strong cryptography support. Weak or old ciphers might expose your data. + audit: | + If running the kubelet with the CLI parameter '--tls-cipher-suites', or running with 'tlsCipherSuites' defined in the kubelet configuration file, ensure that the list of allowed ciphers is not empty and that all included ciphers are included in the following list: + + "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", "TLS_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256", "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_GCM_SHA384" + remediation: | + Define the list of allowed TLS ciphers to include only items from the strong list of ciphers. + + If the kubelet is configured with the CLI parameter '--tls-cipher-suites', update the list (or define the parameter) to only include strong ciphers. + + If the kubelet is configured via the kubelet config file with the 'tlsCipherSuites' parameter, update the list (or create an entry for 'tlsCipherSuites') to only include string ciphers. + refs: + - url: https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration + title: Kubelet configuration + - uid: mondoo-kubernetes-security-kubelet-tls-certificate + title: Run kubelet with a user-provided certificate/key + impact: 100 + mql: | + k8s.kubelet.configuration["tlsCertFile"] != null + k8s.kubelet.configuration["tlsPrivateKeyFile"] != null + docs: + desc: | + Ensure that the kubelet is not running with self-signed certificates generated by the kubelet itself. + audit: | + The kubelet CLI parameters override values in the kubelet configuration file. + + Check the kubelet CLI parameters to see whether '--tls-cert-file' and '--tls-private-key' are set to a non-empty path/string. + + Check the kubelet configuration file to see whether 'tlsCertFile' and 'tlsPrivateKeyFile' are set to a non-empty path/string. + remediation: | + Configure the kubelet to use a user-provided certificate/key pair for serving up HTTPS. + + After acquiring the TLS certificate/key pair, update the kubelet configuration file + + Or if using the deprecated kubelet CLI parameters, update the '--tls-cert-file' and '--tls-private-key-file' parameters to use the new certificate/key. + - uid: mondoo-kubernetes-security-kubelet-rotate-certificates + title: Run kubelet with automatic certificate rotation + impact: 80 + mql: | + k8s.kubelet.configuration["rotateCertificates"] != "false" + docs: + desc: | + Ensure the kubelet is running with automatic certificate rotation so that the kubelet will automatically renew certificates with the API server as certificates near expiration. + Otherwise the communication between the kubelet and the API server will be interrupted. + audit: | + Check the kubelet CLI parameters to ensure '--rotate-certificates' is not set to false, and that the kubelet config file has not set 'rotateCertificates' to false. + remediation: | + Depending on where the configuration behavior is defined (CLI parameters override config file values), update the kubelet CLI parameters to set '--rotate-certificates' to true, and/or update the kubelet configuration to set 'rotateCertificates' to true. + refs: + - url: https://kubernetes.io/docs/tasks/tls/certificate-rotation/ + title: Configure Certificate Rotation for the Kubelet + - uid: mondoo-kubernetes-security-secure-kubelet-config + title: Ownership and permissions of kubelet configuration should be restricted + impact: 80 + mql: | + if (k8s.kubelet.configFile != null) { + if (k8s.kubelet.configFile.exists) { + k8s.kubelet.configFile { + user.name == "root" + group.name == "root" + } + k8s.kubelet.configFile.permissions { + user_readable == true + user_executable == false + group_readable == false + group_writeable == false + group_executable == false + other_readable == false + other_writeable == false + other_executable == false + } + } + } + docs: + desc: | + Ensure proper file ownership and read-write-execute permissions for kubelet configuration file. + Otherwise unprivileged users might get access to sensitive information. + audit: | + View the kubelet configuration file details: + + ``` + $ ls -l /etc/kubernetes/kubelet.conf + -rw-r--r-- 1 root root 1155 Sep 21 15:03 /etc/kubernetes/kubelet.conf + ``` + remediation: | + Update the ownership and permissions: + + ``` + chown root:root /etc/kubernetes/kubelet.conf + chmod 600 /etc/kubernetes/kubelet.conf + ``` + - uid: mondoo-kubernetes-security-secure-kubelet-cert-authorities + title: Specify a kubelet certificate authorities file and ensure proper ownership and permissions + impact: 100 + mql: | + k8s.kubelet.configuration['authentication']['x509']['clientCAFile'] != null + if (k8s.kubelet.configuration['authentication']['x509']['clientCAFile'] != null) { + cafile = k8s.kubelet.configuration["authentication"]["x509"]["clientCAFile"] + file(cafile) { + user.name == "root" + group.name == "root" + } + file(cafile).permissions { + user_readable == true + user_executable == false + group_readable == false + group_writeable == false + group_executable == false + other_readable == false + other_writeable == false + other_executable == false + } + } + docs: + desc: | + Ensure appropriate ownership and permissions for the kubelet's certificate authorities configuration file. + audit: | + View the ownership and permissions: + + ``` + $ ls -l /etc/srv/kubernetes/pki/ca-certificates.crt + -rw------- 1 root root 1159 Sep 13 04:14 /etc/srv/kubernetes/pki/ca-certificates.crt + ``` + remediation: | + Update the ownership and permissions: + + ``` + chown root:root /etc/srv/kubernetes/pki/ca-certificates.crt + chmod 600 /etc/srv/kubernetes/pki/ca-certificates.crt + ``` + - uid: mondoo-kubernetes-security-secure-kube-apiserver-yml + title: Set secure file permissions on the API server pod specification file + impact: 60 + mql: | + if (file("/etc/kubernetes/manifests/kube-apiserver.yaml").exists) { + file("/etc/kubernetes/manifests/kube-apiserver.yaml") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == false + permissions.group_executable == false + permissions.other_executable == false + user.name == "root" + group.name == "root" + } + } + docs: + desc: | + Ensure that the API server pod specification file has permissions of `600` and is owned by `root:root`. + Otherwise unprivileged users might change it. + remediation: |- + Run this command on the Control Plane node: + + ``` + chmod 600 /etc/kubernetes/manifests/kube-apiserver.yaml + chown root:root /etc/kubernetes/manifests/kube-apiserver.yaml + ``` + - uid: mondoo-kubernetes-security-secure-etcd-data-dir + title: | + Set secure directory permissions on the etcd data directory. + Otherwise unprivileged users might get access to sensitive data stored in etcd, i.e., Kubernetes Secrets. + impact: 60 + mql: | + if (file("/var/lib/etcd").exists) { + file("/var/lib/etcd") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == true + permissions.group_executable == false + permissions.other_executable == false + user.name == "etcd" + group.name == "etcd" + } + } else { + dir = processes.where( executable == /etcd/ ).list[0].flags["data-dir"] + file(dir) { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == true + permissions.group_executable == false + permissions.other_executable == false + user.name == "etcd" + group.name == "etcd" + } + } + docs: + desc: Ensure that the etcd data directory has permissions of `700` and is owned by `etcd:etcd`. + remediation: |- + On the etcd server node, get the etcd data directory, passed as an argument `--data-dir`, from the below command: + + ``` + ps -ef | grep etcd + ``` + + Run the below command: + + ``` + chmod 700 /var/lib/etcd + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/secret/ + title: Kubernetes Secrets + - uid: mondoo-kubernetes-security-secure-admin-conf + title: Set secure file permissions on the admin.conf file + impact: 60 + mql: | + if (file("/etc/kubernetes/admin.conf").exists) { + file("/etc/kubernetes/admin.conf") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == false + permissions.group_executable == false + permissions.other_executable == false + user.name == "root" + group.name == "root" + } + } + docs: + desc: | + Ensure that the `admin.conf` file has permissions of `600` and is owned by root:root. + Otherwise unprivileged users might get admin access to the Kubernetes API server. + remediation: |- + Run this command on the Control Plane node: + + ``` + chmod 600 /etc/kubernetes/admin.conf + chown root:root /etc/kubernetes/admin.conf + ``` + refs: + - url: https://kubernetes.io/docs/setup/ + title: Kubernetes Setup + - uid: mondoo-kubernetes-security-secure-scheduler_conf + title: Set secure file permissions on the scheduler.conf file + impact: 60 + mql: | + if (file("/etc/kubernetes/scheduler.conf").exists) { + file("/etc/kubernetes/scheduler.conf") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == false + permissions.group_executable == false + permissions.other_executable == false + user.name == "root" + group.name == "root" + } + } + docs: + desc: Ensure that the `scheduler.conf` file has permissions of `600` and is owned by `root:root`. + remediation: |- + Run this command on the Control Plane node: + + ``` + chmod 600 /etc/kubernetes/scheduler.conf + chown root:root /etc/kubernetes/scheduler.conf + ``` + - uid: mondoo-kubernetes-security-secure-controller-manager_conf + title: Set secure file permissions on the controller-manager.conf file + impact: 60 + mql: | + if (file("/etc/kubernetes/controller-manager.conf").exists) { + file("/etc/kubernetes/controller-manager.conf") { + permissions.user_writeable == true + permissions.group_writeable == false + permissions.other_writeable == false + permissions.user_readable == true + permissions.group_readable == false + permissions.other_readable == false + permissions.user_executable == false + permissions.group_executable == false + permissions.other_executable == false + user.name == "root" + group.name == "root" + } + } + docs: + desc: Ensure that the `controller-manager.conf` file has permissions of `600` and is owned by `root:root`. + remediation: |- + Run this command on the Control Plane node: + + ``` + chmod 600 /etc/kubernetes/controller-manager.conf + chown root:root /etc/kubernetes/controller-manager.conf + ``` + - uid: mondoo-kubernetes-security-secure-pki-directory + title: Ensure that the Kubernetes PKI/SSL directory is owned by root:root + impact: 65 + mql: | + if (processes.where(executable == /kube-apiserver/).list[0].flags["etcd-certfile"] != null) { + clientCAFile = processes.where(executable == /kube-apiserver/).list[0].flags["etcd-certfile"] + ssldir = file(clientCAFile).dirname + file(ssldir) { + user.name == "root" + group.name == "root" + } + } else { + file("/etc/kubernetes/pki") { + user.name == "root" + group.name == "root" + } + } + docs: + desc: | + Ensure that the Kubernetes PKI/SSL directory is owned by `root:root`. + Otherwise unprivileged users could change the PKI/SSL certificates the whole encryption of the cluster relies on. + remediation: |- + Run one of these commands on the Control Plane node depending on the location of your PKI/SSL directory: + + ``` + chown -R root:root /etc/kubernetes/pki/ + ``` + + or + + ``` + chown -R root:root /etc/kubernetes/ssl/ + ```` + refs: + - url: https://kubernetes.io/docs/setup/best-practices/certificates/ + title: PKI certificates and requirements + - uid: mondoo-kubernetes-security-https-api-server + title: Ensure the kube-apiserver is not listening on an insecure HTTP port + impact: 70 + mql: | + processes.where(executable == /kube-apiserver/).list { + flags["insecure-port"] == 0 + } + docs: + desc: | + Ensure the kube-apiserver is not listening on an insecure HTTP port. + Otherwise unencrypted traffic could be intercepted and sensitive data could be leaked. + remediation: |- + Find the kube-apiserver process and check the `insecure-port` argument. If the argument is set to `0`, then the kube-apiserver is not listening on an insecure HTTP port: + ``` + ps aux | grep kube-apiserver + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/controlling-access/#transport-security + title: Controlling Access to the Kubernetes API - Transport security + - uid: mondoo-kubernetes-security-api-server-no-anonymous-auth + title: | + Ensure the kube-apiserver does not allow anonymous authentication. + When allowed, request will have the privileges of the role `system:public-info-viewer`. This might expose data to an attacker. + impact: 100 + mql: | + processes.where(executable == /kube-apiserver/).list { + flags["anonymous-auth"] == "false" + } + docs: + desc: Ensure the kube-apiserver does not allow anonymous authentication. + remediation: |- + Find the kube-apiserver process and check the `--anonymous-auth` argument. If the argument is set to `false`, then the kube-apiserver does not allow anonymous authentication: + ``` + ps aux | grep kube-apiserver + ``` + refs: + - url: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#anonymous-requests + title: Anonymous requests + - uid: mondoo-kubernetes-security-pod-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: | + k8s.pod { + podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + } + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-cronjob-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: | + k8s.cronjob { + podSpec['volumes'] == null || podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + } + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-statefulset-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-deployment-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-job-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-replicaset-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-daemonset-docker-socket + title: Container should not mount the Docker socket + impact: 100 + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/docker.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/docker.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/docker.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/docker.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-pod-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.pod.podSpec['volumes'] == null || k8s.pod.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-cronjob-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.cronjob.podSpec['volumes'] == null || k8s.cronjob.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-statefulset-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-deployment-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-job-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-replicaset-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-daemonset-containerd-socket + title: Container should not mount the containerd socket + impact: 100 + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/run/containerd/containerd.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /run/containerd/containerd.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /run/containerd/containerd.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /run/containerd/containerd.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-pod-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.pod.podSpec['volumes'] == null || k8s.pod.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-cronjob-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.cronjob.podSpec['volumes'] == null || k8s.cronjob.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-statefulset-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.statefulset.podSpec['volumes'] == null || k8s.statefulset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-deployment-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.deployment.podSpec['volumes'] == null || k8s.deployment.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-job-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.job.podSpec['volumes'] == null || k8s.job.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-replicaset-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.replicaset.podSpec['volumes'] == null || k8s.replicaset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-daemonset-crio-socket + title: Container should not mount the CRI-O socket + impact: 100 + mql: k8s.daemonset.podSpec['volumes'] == null || k8s.daemonset.podSpec['volumes'].all(_['hostPath']['path'] != '/var/run/crio/crio.sock') + docs: + desc: | + Do not mount the container runtime socket into any container. + This would allow direct access to the container runtime without any authentication. + This would allow to create privileged containers and to access the host file system. + Or create containers which would not show up in the Kubernetes API. + audit: | + Check for the existence of `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + volumeMounts: + - mountPath: /var/run/crio/crio.sock + name: vol + ``` + remediation: | + Ensure workloads do not have `hostPath.path: /var/run/crio/crio.sock` setting in the `volumes`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + volumes: + - name: vol + hostPath: + - path: /var/run/crio/crio.sock # <--- this shouldn't be there + ``` + refs: + - url: https://cheatsheetseries.owasp.org/cheatsheets/Docker_Security_Cheat_Sheet.html#rule-1-do-not-expose-the-docker-daemon-socket-even-to-the-containers + title: Docker security + - uid: mondoo-kubernetes-security-pod-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.pod.ephemeralContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.pod.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.pod.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-cronjob-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.cronjob.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.cronjob.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-statefulset-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.statefulset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.statefulset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-deployment-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.deployment.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.deployment.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-job-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.job.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.job.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-replicaset-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.replicaset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.replicaset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-daemonset-allowprivilegeescalation + title: Container should not allow privilege escalation + impact: 100 + mql: | + k8s.daemonset.initContainers.all( securityContext['allowPrivilegeEscalation'] != true ) + k8s.daemonset.containers.all( securityContext['allowPrivilegeEscalation'] != true ) + docs: + desc: | + Do not allow privilege escalation in containers. + Even, when the container is not running as root, it could still escalate privileges. + audit: | + Check for the existence of `allowPrivilegeEscalation: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: true + ``` + remediation: | + Ensure `allowPrivilegeEscalation` is set to `false` or not present in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + securityContext: + allowPrivilegeEscalation: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-pod-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.pod.ephemeralContainers.all( securityContext['privileged'] != true ) + k8s.pod.initContainers.all( securityContext['privileged'] != true ) + k8s.pod.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-cronjob-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.cronjob.initContainers.all( securityContext['privileged'] != true ) + k8s.cronjob.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-statefulset-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.statefulset.initContainers.all( securityContext['privileged'] != true ) + k8s.statefulset.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-deployment-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.deployment.containers.all( securityContext['privileged'] != true ) + k8s.deployment.initContainers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means that the container has the host's capabilities including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-job-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.job.initContainers.all( securityContext['privileged'] != true ) + k8s.job.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-replicaset-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.replicaset.initContainers.all( securityContext['privileged'] != true ) + k8s.replicaset.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-daemonset-privilegedcontainer + title: Container should not run as a privileged container + impact: 100 + mql: | + k8s.daemonset.initContainers.all( securityContext['privileged'] != true ) + k8s.daemonset.containers.all( securityContext['privileged'] != true ) + docs: + desc: | + Running a privileged container means the container has the host's capabilities, including access to all devices and the host's network. + audit: | + Check for the existence of `privileged: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: true + ``` + remediation: | + Remove the `privileged` setting from the container spec: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + + Or explicitly set `privileged` to `false`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + privileged: false + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-pod-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.pod.ephemeralContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.pod.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.pod.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-cronjob-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.cronjob.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.cronjob.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-statefulset-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.statefulset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.statefulset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-deployment-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.deployment.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.deployment.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-job-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.job.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.job.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-replicaset-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.replicaset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.replicaset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-daemonset-readonlyrootfilesystem + title: Container should use an immutable root filesystem + impact: 80 + mql: | + k8s.daemonset.initContainers.all( securityContext['readOnlyRootFilesystem'] == true ) + k8s.daemonset.containers.all( securityContext['readOnlyRootFilesystem'] == true ) + docs: + desc: | + Running a container with an immutable (read-only) file system prevents the modification of running containers. + audit: | + Check for the existence of `readOnlyRootFilesystem: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + remediation: | + Ensure `readOnlyRootFilesystem` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + readOnlyRootFilesystem: true + ``` + refs: + - url: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-pod-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + if (k8s.pod.annotations['policies.k8s.mondoo.com/mondoo-kubernetes-security-pod-runasnonroot'] != 'ignore') { + k8s.pod { + podSecurityContext=podSpec['securityContext'] + ephemeralContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-cronjob-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + if (k8s.cronjob.annotations['policies.k8s.mondoo.com/mondoo-kubernetes-security-cronjob-runasnonroot'] != 'ignore') { + k8s.cronjob { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-statefulset-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + k8s.statefulset { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-deployment-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + k8s.deployment.containers.all( securityContext['runAsNonRoot'] == true ) + k8s.deployment.initContainers.all( securityContext['runAsNonRoot'] == true ) + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-job-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + if (k8s.job.annotations['policies.k8s.mondoo.com/mondoo-kubernetes-security-job-runasnonroot'] != 'ignore') { + k8s.job { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-replicaset-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + k8s.replicaset { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-daemonset-runasnonroot + title: Container should not run as root + impact: 100 + mql: | + k8s.daemonset { + podSecurityContext=podSpec['securityContext'] + initContainers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + containers { + a = podSecurityContext['runAsNonRoot'] == true && securityContext['runAsNonRoot'] == null + res = securityContext['runAsNonRoot'] == true || a + res == true + } + } + docs: + desc: | + Set the `runAsNonRoot: true` `securityContext` to ensure containers do not run as the root user. + When containers run as the `root` user, they have the same privileges as `root` on the host system. + audit: | + Check for the existence of `runAsNonRoot: true` setting in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `runAsNonRoot` is set to `true` in the `securityContext`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + securityContext: + runAsNonRoot: true + ``` + + It is also possible to set it for all containers at the Pod level: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + securityContext: + runAsNonRoot: true + containers: + - name: container-name + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#securitycontext-v1-core + title: Configure a Security Context for a Pod or Container + - uid: mondoo-kubernetes-security-pod-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.pod.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network including loopback devices. This capability can be used to intercept network traffic including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-cronjob-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.cronjob.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-statefulset-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.statefulset.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-deployment-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.deployment.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-job-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: k8s.job.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-replicaset-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: | + k8s.replicaset.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-daemonset-hostnetwork + title: Pod should not run with hostNetwork + impact: 80 + mql: | + k8s.daemonset.podSpec['hostNetwork'] != true + docs: + desc: Running pods with the `hostNetwork` namespace gives containers access to the host's network, including loopback devices. This capability can be used to intercept network traffic, including the traffic of other pods. + audit: | + Check for the existence of `hostNetwork: true` setting in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostNetwork` is set to `false` or not present in `spec`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + hostNetwork: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-pod-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.pod.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-cronjob-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.cronjob.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-statefulset-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.statefulset.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-deployment-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.deployment.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-job-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.job.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-replicaset-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: k8s.replicaset.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-daemonset-hostpid + title: Pod should not run with hostPID + impact: 80 + mql: | + k8s.daemonset.podSpec['hostPID'] != true + docs: + desc: Running pods with the `hostPID` namespace gives containers access to the host's process ID namespace and can be used to escalate privileges outside a container. + audit: | + Check for the existence of `hostPID: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostPID` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostPID: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-pod-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.pod.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-cronjob-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.cronjob.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-statefulset-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.statefulset.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-deployment-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: k8s.deployment.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-job-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.job.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-replicaset-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.replicaset.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-daemonset-hostipc + title: Pod should not run with hostIPC + impact: 80 + mql: | + k8s.daemonset.podSpec['hostIPC'] != true + docs: + desc: | + Enabling `hostIPC` gives containers access to the host's IPC namespace and breaks container isolation. + audit: | + Check for the existence of `hostIPC: true` setting in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: true + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + remediation: | + Ensure `hostIPC` is set to `false` or not present in `spec`: + + ```yaml + apiVersion: v1 + kind: Pod + spec: + hostIPC: false + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: Host namespaces + - uid: mondoo-kubernetes-security-pod-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.pod.podSpec['serviceAccount'] == null || k8s.pod.podSpec['serviceAccount'] == k8s.pod.podSpec['serviceAccountName'] + k8s.pod.podSpec['serviceAccountName'] != '' || k8s.pod.podSpec['automountServiceAccountToken'] == false + k8s.pod.podSpec['serviceAccountName'] != 'default' || k8s.pod.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-cronjob-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.cronjob.podSpec['serviceAccount'] == null || k8s.cronjob.podSpec['serviceAccount'] == k8s.cronjob.podSpec['serviceAccountName'] + k8s.cronjob.podSpec['serviceAccountName'] != '' || k8s.cronjob.podSpec['automountServiceAccountToken'] == false + k8s.cronjob.podSpec['serviceAccountName'] != 'default' || k8s.cronjob.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-statefulset-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.statefulset.podSpec['serviceAccount'] == null || k8s.statefulset.podSpec['serviceAccount'] == k8s.statefulset.podSpec['serviceAccountName'] + k8s.statefulset.podSpec['serviceAccountName'] != '' || k8s.statefulset.podSpec['automountServiceAccountToken'] == false + k8s.statefulset.podSpec['serviceAccountName'] != 'default' || k8s.statefulset.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-deployment-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.deployment.podSpec['serviceAccount'] == null || k8s.deployment.podSpec['serviceAccount'] == k8s.deployment.podSpec['serviceAccountName'] + k8s.deployment.podSpec['serviceAccountName'] != '' || k8s.deployment.podSpec['automountServiceAccountToken'] == false + k8s.deployment.podSpec['serviceAccountName'] != 'default' || k8s.deployment.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-job-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.job.podSpec['serviceAccount'] == null || k8s.job.podSpec['serviceAccount'] == k8s.job.podSpec['serviceAccountName'] + k8s.job.podSpec['serviceAccountName'] != '' || k8s.job.podSpec['automountServiceAccountToken'] == false + k8s.job.podSpec['serviceAccountName'] != 'default' || k8s.job.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-replicaset-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.replicaset.podSpec['serviceAccount'] == null || k8s.replicaset.podSpec['serviceAccount'] == k8s.replicaset.podSpec['serviceAccountName'] + k8s.replicaset.podSpec['serviceAccountName'] != '' || k8s.replicaset.podSpec['automountServiceAccountToken'] == false + k8s.replicaset.podSpec['serviceAccountName'] != 'default' || k8s.replicaset.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-daemonset-serviceaccount + title: Pod should not run with the default service account + impact: 30 + mql: | + k8s.daemonset.podSpec['serviceAccount'] == null || k8s.daemonset.podSpec['serviceAccount'] == k8s.daemonset.podSpec['serviceAccountName'] + k8s.daemonset.podSpec['serviceAccountName'] != '' || k8s.daemonset.podSpec['automountServiceAccountToken'] == false + k8s.daemonset.podSpec['serviceAccountName'] != 'default' || k8s.daemonset.podSpec['automountServiceAccountToken'] == false + docs: + desc: | + Pods that interact with the Kubernetes API using a ServiceAccount should use specific ServiceAccounts. + These ServiceAccounts should only have the permissions necessary. + The Pods should not use the default ServiceAccount (named 'default') that is included in every Namespace. + The only valid use for the default ServiceAccount is for Pods that set '.spec.automountServiceAccountToken' to 'false'. + In this case, the Pod explicitly asks for no ServiceAccount to be mounted into the Pod's filesystem, and the Pod is therefore a ServiceAccount-less Pod. + When every Pods uses the default ServiceAccount and the ServiceAccount's privileges get extended, all Pods get these permissions. + When a Pod is compromised, the attacker has access to the API using the default ServiceAccount. + audit: | + Check that Pods do not set the legacy '.spec.serviceAccount': + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccount: some-account + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Check that Pods do not set the '.spec.serviceAccountName' to the empty string (which is interpreted as 'default'), or to 'default'. + + ```yaml + apiVersion: v1 + kind: Pod + spec: + serviceAccountName: "" + containers: + - name: example-app + image: index.docker.io/yournamespace/repository + ``` + + Even when the deprecated field '.spec.serviceAccount' is not specified, it will get populated by Kubernetes inside the cluster when a manifest is applied. + Because of that, we also need to check for the field. + remediation: | + Create a ServiceAccount specifically for the Pod with only the permissions it needs when interacting with the Kubernetes API. Update the Pod's '.spec.serviceAccountName' to the name of the ServiceAccount created for the Pod. + + Or if the Pod doesn't interact with the Kubernetes API, set the Pod's `.spec.automountServiceAccountToken` field to false so that no ServiceAccount is available to the Pod. + - uid: mondoo-kubernetes-security-pod-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.pod.ephemeralContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.pod.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.pod.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-cronjob-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.cronjob.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.cronjob.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-statefulset-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.statefulset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.statefulset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-deployment-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.deployment.initContainers.all( imagePullPolicy == 'Always' && correctImage = image != /:latest/ && image.contains(':') == true ) + k8s.deployment.containers.all( imagePullPolicy == 'Always' && correctImage = image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-job-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.job.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.job.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-replicaset-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.replicaset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.replicaset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-daemonset-imagepull + title: Container image pull should be consistent + impact: 60 + mql: | + k8s.daemonset.containers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + k8s.daemonset.initContainers.all( imagePullPolicy == 'Always' && image != /:latest/ && image.contains(':') == true ) + docs: + desc: | + It's important that each time a pod is started the same container is pulled, so that services across pods behave the same. To ensure the same container is always used, manifests should set `imagePullPolicy: Always` and the `image` configuration should pull either a tag or a digest (SHA). + Avoid using rolling tags like `latest` or `master` as they can change over time. + audit: | + Check for the existence of `imagePullPolicy: Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + remediation: | + Ensure `imagePullPolicy` is set to `Always` and ensure `image` uses either a tag or a digest (SHA): + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + imagePullPolicy: Always + ``` + refs: + - url: https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy + title: Image pull policy + - uid: mondoo-kubernetes-security-pod-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.pod.initContainers.all( resources['limits']['cpu'] != null ) + k8s.pod.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes Pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-cronjob-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.cronjob.initContainers.all( resources['limits']['cpu'] != null ) + k8s.cronjob.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-statefulset-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.statefulset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.statefulset.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-deployment-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.deployment.initContainers.all( resources['limits']['cpu'] != null ) + k8s.deployment.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-job-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.job.initContainers.all( resources['limits']['cpu'] != null ) + k8s.job.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-replicaset-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.replicaset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.replicaset.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-daemonset-limitcpu + title: Container should have a CPU limit + impact: 20 + mql: | + k8s.daemonset.initContainers.all( resources['limits']['cpu'] != null ) + k8s.daemonset.containers.all( resources['limits']['cpu'] != null ) + docs: + desc: | + Kubernetes pod configurations should set CPU limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of CPU resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + remediation: | + Define the required resources for CPU `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + cpu: "500m" + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + title: Resource Management for Pods and Containers + - uid: mondoo-kubernetes-security-pod-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.pod.initContainers.all( resources['limits']['memory'] != null ) + k8s.pod.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-cronjob-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.cronjob.initContainers.all( resources['limits']['memory'] != null ) + k8s.cronjob.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-statefulset-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.statefulset.initContainers.all( resources['limits']['memory'] != null ) + k8s.statefulset.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-deployment-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.deployment.initContainers.all( resources['limits']['memory'] != null ) + k8s.deployment.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-job-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.job.initContainers.all( resources['limits']['memory'] != null ) + k8s.job.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-replicaset-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.replicaset.initContainers.all( resources['limits']['memory'] != null ) + k8s.replicaset.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-daemonset-limitmemory + title: Container should have a memory limit + impact: 20 + mql: | + k8s.daemonset.initContainers.all( resources['limits']['memory'] != null ) + k8s.daemonset.containers.all( resources['limits']['memory'] != null ) + docs: + desc: | + Kubernetes pod configurations should set memory limits for containers defined in the manifest. This prevents the pod from exhausting the host's resources in case of an application malfunction or an attack. + audit: | + Check for the existence of memory resources in `limits`: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + remediation: | + Define the required resources for memory `limits` in the manifest: + + ```yaml + --- + apiVersion: v1 + kind: Pod + spec: + containers: + - name: app + image: images.my-company.example/app:v1.2.3 + resources: + limits: + memory: "1Gi" + ``` + - uid: mondoo-kubernetes-security-pod-capability-net-raw + title: Pods should not run with NET_RAW capability + impact: 80 + mql: | + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + Pods should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no Pods have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get pods -A -o json | jq -r '.items[] | select(.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a Pod that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these Pods with: + + ```kubectl get pods -A -o json | jq -r '.items[] | select( .spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Pods that explicitly add the NET_RAW or ALL capability, update the Pods (or the Deployments/DaemonSets/CronJobs/etc that produced the Pods) to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: examplePod + namespace: example-namespace + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any Pods that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update the Pods (or the Deployments/DaemonSets/CronJobs/etc that produced the Pods) to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: example + namespace: example-namespace + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-daemonset-capability-net-raw + title: DaemonSets should not run with NET_RAW capability + impact: 80 + mql: | + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + DaemonSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no DaemonSets have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a DaemonSet that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any DaemonSets that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any DaemonSets that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-replicaset-capability-net-raw + title: ReplicaSets should not run with NET_RAW capability + impact: 80 + mql: | + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + ReplicaSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no ReplicaSets have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get replicasets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a ReplicaSet that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get replicasets -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSets that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any ReplicaSets that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-job-capability-net-raw + title: Jobs should not run with NET_RAW capability + impact: 80 + mql: | + k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + Jobs should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no Jobs have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get jobs -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a Job that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get jobs -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Jobs that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any Jobs that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-deployment-capability-net-raw + title: Deployments should not run with NET_RAW capability + impact: 80 + mql: | + k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'].none( _['add'].contains("NET_RAW") )) + k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'].none( _['add'].contains("ALL") )) + docs: + desc: | + Deployments should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no Deployments have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get deployments -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a Deployment that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get deployments -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Deployments that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any Deployments that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-statefulset-capability-net-raw + title: StatefulSets should not run with NET_RAW capability + impact: 80 + mql: | + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + StatefulSets should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no StatefulSets have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a StatefulSet that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | select( .spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any StatefulSets that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any StatefulSets that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-cronjob-capability-net-raw + title: CronJobs should not run with NET_RAW capability + impact: 80 + mql: | + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['drop'].any( _.upcase == /^NET_RAW$|^ALL$/ ) } ) + docs: + desc: | + CronJobs should not run with NET_RAW capability. This allows a process to write raw packets to the network interface which can allow crafting packets like malicious ARP and/or DNS responses. + audit: | + Check to ensure no CronJobs have explicitly asked for the NET_RAW capability (or asked for ALL capabilities which includes NET_RAW): + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | select(.spec.jobTemplate.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|NET_RAW")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + + + Additionally, a CronJob that doesn't define a list of capabilities to drop at all, or that has a non-empty drop list that doesn't drop NET_RAW (or the ALL capability which includes NET_RAW) will implicitly run with NET_RAW. List these DaemonSets with: + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | select( .spec.jobTemplate.spec.template.spec.containers[].securityContext.capabilities.drop | . == null or ( any(.[] ; ascii_upcase | test("ALL|NET_RAW")) | not) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any CronJobs that explicitly add the NET_RAW or ALL capability, update them to ensure they do not ask for the NET_RAW or ALL capability: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + jobTemplate: + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "NET_RAW" or "ALL" in the list of capabilities added + ``` + + For any CronJobs that do not define a list of capabilities to drop or that define a list but do not drop NET_RAW, update them to ensure they drop ALL or NET_RAW: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + jobTemplate: + spec: + template: + spec: + containers: + - securityContext: + capabilities: + drop: ["NET_RAW"] # <-- or ensure "ALL" in the list of capabilities to drop + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + title: 'Kubernetes Security Standards: Capabilities' + - uid: mondoo-kubernetes-security-pod-capability-sys-admin + title: Pods should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.pod.podSpec['initContainers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + k8s.pod.podSpec['ephemeralContainers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + k8s.pod.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + Pods should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no Pods have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get pods -A -o json | jq -r '.items[] | select(.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Pods that explicitly add the SYS_ADMIN or ALL capability, update the Pods (or the Deployments/DaemonSets/CronJobs/etc that produced the Pods) to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: examplePod + namespace: example-namespace + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-daemonset-capability-sys-admin + title: DaemonSets should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.daemonset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + DaemonSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no DaemonSets have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any DaemonSets that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-replicaset-capability-sys-admin + title: ReplicaSets should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.replicaset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + ReplicaSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no ReplicaSets have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get replicasets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSets that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-job-capability-sys-admin + title: Jobs should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.job.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + Jobs should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no Jobs have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get jobs -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Jobs that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-deployment-capability-sys-admin + title: Deployments should not run with SYS_ADMIN capability + impact: 80 + mql: k8s.deployment.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + Deployments should not run wIt even allows containers not running as root to run certain tasks as if the user was root with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no Deployments have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get deployments -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Deployments that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-statefulset-capability-sys-admin + title: StatefulSets should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.statefulset.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + StatefulSets should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no StatefulSets have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | select(.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any StatefulSets that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-cronjob-capability-sys-admin + title: CronJobs should not run with SYS_ADMIN capability + impact: 80 + mql: | + k8s.cronjob.podSpec['containers'].all( _['securityContext']['capabilities'] { _['add'] == null || _['add'].none( _.upcase == /^SYS_ADMIN$|^ALL$/ ) } ) + docs: + desc: | + CronJobs should not run with SYS_ADMIN capability. The SYS_ADMIN capability enables a wide range of elevated system calls. + It even allows containers not running as root to run certain tasks as if the user was root. + audit: | + Check to ensure no CronJobs have explicitly asked for the SYS_ADMIN capability (or asked for ALL capabilities which includes SYS_ADMIN): + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | select(.spec.jobTemplate.spec.template.spec.containers[].securityContext.capabilities.add | . != null and any(.[] ; ascii_upcase | test("ALL|SYS_ADMIN")) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any CronJobs that explicitly add the SYS_ADMIN or ALL capability, update them to ensure they do not ask for the SYS_ADMIN or ALL capability: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + jobTemplate: + spec: + template: + spec: + containers: + - securityContext: + capabilities: + add: [] # <-- ensure no "SYS_ADMIN" or "ALL" in the list of capabilities added + ``` + refs: + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Capabilities' + - url: https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities + title: Docker default capabilities + - uid: mondoo-kubernetes-security-pod-ports-hostport + title: Pods should not bind to a host port + impact: 80 + mql: | + k8s.pod.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + k8s.pod.podSpec['initContainers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + Pods should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no Pods are binding any of their containers to a host port: + + ```kubectl get pods -A -o json | jq -r '.items[] | select( (.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Pods that bind to a host port, update the Pods (or the Deployments/DaemonSets/CronJobs/etc that produced the Pods) to ensure they do not bind to a host port: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: example + namespace: example-namespace + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-daemonset-ports-hostport + title: DaemonSets should not bind to a host port + impact: 80 + mql: | + k8s.daemonset.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + DaemonSets should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no DaemonSets are binding any of their containers to a host port: + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any DaemonSets that bind to a host port, update the DaemonSets to ensure they do not bind to a host port: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-replicaset-ports-hostport + title: ReplicaSets should not bind to a host port + impact: 80 + mql: | + k8s.replicaset.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + ReplicaSets should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no ReplicaSets are binding any of their containers to a host port: + + ```kubectl get replicasets -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSets that bind to a host port, update the ReplicaSets to ensure they do not bind to a host port: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-job-ports-hostport + title: Jobs should not bind to a host port + impact: 80 + mql: | + k8s.job.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + Jobs should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no Jobs are binding any of their containers to a host port: + + ```kubectl get jobs -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSets that bind to a host port, update the Jobs to ensure they do not bind to a host port: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-deployment-ports-hostport + title: Deployments should not bind to a host port + impact: 80 + mql: | + k8s.deployment.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + Deployments should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no Deployments are binding any of their containers to a host port: + + ```kubectl get deployments -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Deployments that bind to a host port, update the Deployments to ensure they do not bind to a host port: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-statefulset-ports-hostport + title: StatefulSets should not bind to a host port + impact: 80 + mql: | + k8s.statefulset.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + StatefulSets should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no StatefulSets are binding any of their containers to a host port: + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | select( (.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any StatefulSets that bind to a host port, update the StatefulSets to ensure they do not bind to a host port: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-cronjob-ports-hostport + title: CronJobs should not bind to a host port + impact: 80 + mql: | + k8s.cronjob.podSpec['containers'].all( _['ports'] == null || _['ports'].none( _['hostPort'])) + docs: + desc: | + CronJobs should not bind to the underlying host port. This allows bypassing certain network access control systems. + Host ports also expose the Container outside the Kubernetes cluster, which might not be intended. + audit: | + Check to ensure no CronJobs are binding any of their containers to a host port: + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | select( (.spec.jobTemplate.spec.template.spec.containers[].ports | . != null and any(.[].hostPort; . != null) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any CronJobs that bind to a host port, update the CronJobs to ensure they do not bind to a host port: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + jobTemplate: + spec: + template: + spec: + containers: + - ports: + - containerPort: 80 # <-- ensure no 'hostPort' is defined in any entries of the port configurations + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + ``` + refs: + - url: https://kubernetes.io/docs/concepts/configuration/overview/#services + title: 'Kubernetes Configuration Best Practices: hostPort' + - url: https://kubernetes.io/docs/concepts/security/pod-security-standards/#baseline + title: 'Kubernetes Security Standards: Host Ports' + - uid: mondoo-kubernetes-security-pod-hostpath-readonly + title: Pods should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.pod.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + k8s.pod.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['initContainers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + k8s.pod.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['ephemeralContainers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + Pods should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a Pod are mounting hostPath volumes as read-write: + + ```kubectl get pods -A -o json | jq -r '.items[] | [.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Pod containers that mount a hostPath volume as read-write, update them (or the Deployment/StatefulSet/etc that created the Pod): + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: example + namespace: example-namespace + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-daemonset-hostpath-readonly + title: DaemonSets should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.daemonset.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + DaemonSets should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a DaemonSet are mounting hostPath volumes as read-write: + + ```kubectl get daemonsets -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any DaemonSet containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-replicaset-hostpath-readonly + title: ReplicaSets should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.replicaset.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + ReplicaSets should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a ReplicaSet are mounting hostPath volumes as read-write: + + ```kubectl get replicasets -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any ReplicaSet containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: apps/v1 + kind: ReplicaSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-job-hostpath-readonly + title: Jobs should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.job.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + Jobs should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a Job are mounting hostPath volumes as read-write: + + ```kubectl get jobs -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Job containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: batch/v1 + kind: Job + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-deployment-hostpath-readonly + title: Deployments should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.deployment.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + Deployments should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a Deployment are mounting hostPath volumes as read-write: + + ```kubectl get deployments -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any Deployment containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-statefulset-hostpath-readonly + title: StatefulSets should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.statefulset.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + StatefulSets should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a StatefulSet are mounting hostPath volumes as read-write: + + ```kubectl get statefulsets -A -o json | jq -r '.items[] | [.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any StatefulSet containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: apps/v1 + kind: StatefulSet + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-cronjob-hostpath-readonly + title: CronJobs should mount any host path volumes as read-only + impact: 80 + mql: | + k8s.cronjob.podSpec { + hostPathVolumes = _['volumes'].where(_['hostPath'] != null).map(_['name']) + _['containers'] { + _['name'] + if( _['volumeMounts'] != null ) { + _['volumeMounts'] { + n = _['name'] + if( hostPathVolumes.contains(n) ) { + _['readOnly'] == true + } else { + true + } + } + } else { + true + } + } + } + docs: + desc: | + CronJobs should not mount volumes of type hostPath as read-write. Containers should not be granted the ability to mutate the underlying host they are running on. + This can even lead to container escapes. + audit: | + Check to ensure no containers in a CronJob are mounting hostPath volumes as read-write: + + ```kubectl get cronjobs -A -o json | jq -r '.items[] | [.spec.jobTemplate.spec.template.spec.volumes[] | select(.hostPath != null) | .name] as $myVar | select(.spec.jobTemplate.spec.template.spec.containers[].volumeMounts | (. != null and ( .[] | ( [.name] | inside($myVar) ) and .readOnly != true ) ) ) | .metadata.namespace + "/" + .metadata.name' | uniq``` + remediation: | + For any CronJob containers that mount a hostPath volume as read-write, update them: + + ```yaml + apiVersion: batch/v1 + kind: CronJob + metadata: + name: example + namespace: example-namespace + spec: + template: + spec: + containers: + - volumeMounts: + - mountPath: /host + name: hostpath-volume + readOnly: true # <-- ensure readOnly is set to true + volumes: + - hostPath: + path: /etc + name: hostpath-volume + ``` + - uid: mondoo-kubernetes-security-deployment-tiller + title: Deployments should not run Tiller (Helm v2) + impact: 40 + mql: | + k8s.deployment.podSpec["containers"].none( _["image"].contains("tiller") ) + docs: + desc: | + Tiller is the in-cluster component for the Helm v2 package manager. It is communicating directly to the Kubernetes API and therefore it has broad RBAC permissions. An attacker can use that to get cluster-wide access. + audit: | + Verify there are no deployments running Tiller: + ```kubectl get deployments -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` + remediation: | + Delete any deployments that are running Tiller. + - uid: mondoo-kubernetes-security-pod-tiller + title: Pods should not run Tiller (Helm v2) + impact: 40 + mql: | + k8s.pod.podSpec["containers"].none( _["image"].contains("tiller") ) + k8s.pod.podSpec["initContainers"].none( _["image"].contains("tiller") ) + k8s.pod.podSpec["ephemeralContainers"].none( _["image"].contains("tiller") ) + docs: + desc: | + Tiller is the in-cluster component for the Helm v2 package manager. It is communicating directly to the Kubernetes API and therefore it has broad RBAC permissions. An attacker can use that to get cluster-wide access. + audit: | + Verify there are no pods running Tiller: + ```kubectl get pods -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` + remediation: | + Delete any pods that are running Tiller. + - uid: mondoo-kubernetes-security-deployment-k8s-dashboard + title: Pods should not run Kubernetes dashboard + impact: 40 + mql: | + k8s.deployment.podSpec["containers"].none( _["image"].contains("kubernetes-dashboard") || _["image"].contains("kubernetesui") ) + k8s.deployment.labels["app"] == null || k8s.deployment.labels["app"] != "kubernetes-dashboard" + k8s.deployment.labels["k8s-app"] == null || k8s.deployment.labels["k8s-app"] != "kubernetes-dashboard" + docs: + desc: | + The Kubernetes dashboard allows browsing through cluster resources such as workloads, configmaps and secrets. In 2019 Tesla was hacked because their Kubernetes dashboard was publicly exposed. This allowed the attackers to extract credentials and deploy Bitcoin miners on the cluster. + audit: | + Verify there are no deployments running Kubernetes dashboard: + ```kubectl get deployments -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` + remediation: | + Delete any deployments that are running Kubernetes dashboard. + - uid: mondoo-kubernetes-security-pod-k8s-dashboard + title: Pods should not run Kubernetes dashboard + impact: 40 + mql: | + k8s.pod.podSpec["containers"].none( _["image"].contains("kubernetes-dashboard") || _["image"].contains("kubernetesui") ) + k8s.pod.podSpec["initContainers"].none( _["image"].contains("kubernetes-dashboard") || _["image"].contains("kubernetesui") ) + k8s.pod.podSpec["ephemeralContainers"].none( _["image"].contains("kubernetes-dashboard") || _["image"].contains("kubernetesui") ) + k8s.pod.labels["app"] == null || k8s.pod.labels["app"] != "kubernetes-dashboard" + k8s.pod.labels["k8s-app"] == null || k8s.pod.labels["k8s-app"] != "kubernetes-dashboard" + docs: + desc: | + The Kubernetes dashboard allows browsing through cluster resources such as workloads, configmaps and secrets. In 2019 Tesla was hacked because their Kubernetes dashboard was publicly exposed. This allowed the attackers to extract credentials and deploy Bitcoin miners on the cluster. + audit: | + Verify there are no pods running Kubernetes dashboard: + ```kubectl get pods -A -o=custom-columns="NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image"``` + remediation: | + Delete any pods that are running Kubernetes dashboard. diff --git a/test/testdata/mondoo-terraform-aws-security.mql.yaml b/test/testdata/mondoo-terraform-aws-security.mql.yaml new file mode 100644 index 000000000..860b399e7 --- /dev/null +++ b/test/testdata/mondoo-terraform-aws-security.mql.yaml @@ -0,0 +1,563 @@ +# Copyright (c) Mondoo, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +policies: + - uid: mondoo-terraform-aws-security + name: Terraform HCL Security Static Analysis for AWS + version: 1.2.0 + license: BUSL-1.1 + tags: + mondoo.com/category: security + mondoo.com/platform: aws,cloud,terraform + authors: + - name: Mondoo, Inc + email: hello@mondoo.com + docs: + desc: | + ## Overview + + This policy checks for security misconfigurations in Terraform for Amazon Web Services. + + ## Local scan + + Local scan refer to scans of files and operating systems where cnspec is installed. + + ### Scan a Terraform project + + Open a terminal and run this command: + + ```bash + cnspec scan terraform /path/to/terraform/directory + ``` + + ## Join the community! + + Our goal is to build policies that are simple to deploy, accurate, and actionable. + + If you have any suggestions for how to improve this policy, or if you need support, [join the community](https://github.com/orgs/mondoohq/discussions) in GitHub Discussions. + groups: + - title: AWS General + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-no-static-credentials-in-providers + - title: Amazon API Gateway + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-api-gw-cache-enabled-and-encrypted + - uid: terraform-aws-security-api-gw-execution-logging-enabled + - uid: terraform-aws-security-api-gw-require-authentication + - uid: terraform-aws-security-api-gw-tls + - uid: terraform-aws-security-api-gw-xray-enabled + - title: Amazon Elastic Compute Cloud (Amazon EC2) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-ec2-ebs-encryption-by-default + - uid: terraform-aws-security-ec2-imdsv2 + - uid: terraform-aws-security-ec2-user-data-no-secrets + - title: AWS Identity and Access Management (IAM) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-iam-no-wildcards-policies + - title: Amazon Simple Storage Service (Amazon S3) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-s3-bucket-level-public-access-prohibited + - uid: terraform-aws-security-s3-bucket-logging-enabled + - uid: terraform-aws-security-s3-bucket-public-read-and-write-prohibited + - uid: terraform-aws-security-s3-bucket-server-side-encryption-enabled + - uid: terraform-aws-security-s3-bucket-versioning-enabled + - title: AWS Elastic Kubernetes Service (EKS) Security for Terraform + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any(nameLabel == "aws") + checks: + - uid: terraform-aws-security-eks-encrypt-secrets + - uid: terraform-aws-security-eks-no-public-cluster-access-to-cidr +queries: + - uid: terraform-aws-security-no-static-credentials-in-providers + title: Providers should not contain hard-coded credentials + mql: | + terraform.providers.where( nameLabel == "aws" ) { + arguments["access_key"] == null || arguments["access_key"].find(/(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}/).all("AKIAIOSFODNN7EXAMPLE") + arguments["secret_key"] == null || arguments["secret_key"].find(/([A-Za-z0-9\\\/+\\]{40})/).all( "wJalrXUtnFEMI/A1AAAAA/bPxRfiCYAAAAAAAKEY") + } + docs: + desc: | + Hard-coded credentials are not recommended in any Terraform configuration, and risks secret leakage should this file ever be committed to a public version control system. + audit: | + Check for the existence of hard-coded credentials in the AWS provider + + ```hcl + provider "aws" { + region = "us-west-2" + access_key = "my-access-key" + secret_key = "my-secret-key" + } + ``` + remediation: | + The following are more secure alternatives for configuring the AWS provider: + + __Environment Variables__ + You can provide your credentials via the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables representing your AWS Access Key and AWS Secret Key, respectively. Note that setting your AWS credentials using either these (or legacy) environment variables will override the use of `AWS_SHARED_CREDENTIALS_FILE` and `AWS_PROFILE`. The `AWS_DEFAULT_REGION` and `AWS_SESSION_TOKEN` environment variables are also used, if applicable: + + ```bash + $ export AWS_ACCESS_KEY_ID="an_accesskey" + $ export AWS_SECRET_ACCESS_KEY="a_secretkey" + $ export AWS_DEFAULT_REGION="us-west-2" + $ terraform plan + ``` + + ```hcl + provider "aws" {} + ``` + + __Assumed Role__ + If provided with a role ARN, Terraform will attempt to assume this role using the supplied credentials. + + ```hcl + provider "aws" { + assume_role { + role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME" + session_name = "SESSION_NAME" + external_id = "EXTERNAL_ID" + } + } + ``` + - uid: terraform-aws-security-api-gw-cache-enabled-and-encrypted + title: API Gateway must have cache enabled + mql: | + terraform.resources.where( nameLabel == "aws_api_gateway_method_settings") { + blocks.one(type == "settings" && arguments["cache_data_encrypted"] == true) + } + docs: + desc: Ensure that all methods in Amazon API Gateway stages have cache enabled and cache encrypted + audit: | + Check if `cache_data_encrypted` is set to `false` + + ```hcl + resource "aws_api_gateway_method_settings" "fail_example" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "path1/GET" + + settings { + metrics_enabled = true + logging_level = "INFO" + cache_data_encrypted = false + } + } + ``` + remediation: | + Enable cache encryption by setting `cache_data_encrypted` to `true` + + ```hcl + resource "aws_api_gateway_method_settings" "good_example" { + rest_api_id = aws_api_gateway_rest_api.example.id + stage_name = aws_api_gateway_stage.example.stage_name + method_path = "path1/GET" + + settings { + metrics_enabled = true + logging_level = "INFO" + cache_data_encrypted = true + } + } + ``` + - uid: terraform-aws-security-api-gw-execution-logging-enabled + title: Ensure that all methods in Amazon API Gateway stage have logging enabled + mql: | + terraform.resources.where( nameLabel == "aws_api_gateway_stage") { + blocks.one(type == "access_log_settings" && arguments["destination_arn"] != "" ) + } + + terraform.resources.where( nameLabel == "aws_apigatewayv2_stage") { + blocks.one(type == "access_log_settings" && arguments["destination_arn"] != "" ) + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/api-gw-execution-logging-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_method_settings#cache_data_encrypted + title: Terraform Documentation - api_gateway_method_settings Resource + - uid: terraform-aws-security-api-gw-xray-enabled + title: Ensure AWS X-Ray tracing is enabled on Amazon API Gateway REST APIs + mql: | + terraform.resources.where( nameLabel == "aws_api_gateway_stage") { + arguments["xray_tracing_enabled"] == true + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/api-gw-xray-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_stage#xray_tracing_enabled + title: Terraform Documentation - api_gateway_stage Resource + - uid: terraform-aws-security-api-gw-require-authentication + title: Ensure Authentication for API Gateway methods is activated + mql: | + terraform.resources + .where( nameLabel == "aws_api_gateway_method" && arguments["authorization"].upcase == "NONE" && arguments["http_method"].upcase != "OPTION" ) + .all(arguments["api_key_required"] == true ) + refs: + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_method#authorization + title: Terraform Documentation - api_gateway_method Resource + - uid: terraform-aws-security-api-gw-tls + title: Ensure that the API Gateway uses a secure SSL/TLS configuration + mql: | + terraform.resources.where( nameLabel == "aws_api_gateway_domain_name") { + arguments["security_policy"] == "TLS-1-2" + } + refs: + - url: https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-custom-domain-tls-version.html + title: Choosing a minimum TLS version for a custom domain in API Gateway + - url: https://docs.aws.amazon.com/config/latest/developerguide/api-gw-ssl-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/api_gateway_domain_name#security_policy + title: Terraform Documentation - api_gateway_domain_name Resource + - uid: terraform-aws-security-ec2-ebs-encryption-by-default + title: Ensure that Amazon Elastic Block Store (EBS) encryption is enabled by default + mql: | + terraform.resources.where( nameLabel == "aws_ebs_volume").all(arguments["encrypted"] == true) || + terraform.resources.one( nameLabel == "aws_ebs_encryption_by_default" && arguments["enabled"] == true ) + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/ec2-ebs-encryption-by-default.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ebs_volume#encrypted + title: Terraform Documentation - ebs_volume Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ebs_encryption_by_default + title: Terraform Documentation - ebs_encryption_by_default Resource + - uid: terraform-aws-security-ec2-imdsv2 + title: Ensure Instance Metadata Service Version 2 (IMDSv2) with session authentication tokens is active + mql: | + terraform.resources.where( nameLabel == "aws_instance") { + blocks.one(type == "metadata_options") + blocks.where(type == "metadata_options") { + arguments["http_tokens"] == "required" || arguments["http_endpoint"] == "disabled" + } + } + refs: + - url: https://aws.amazon.com/blogs/security/defense-in-depth-open-firewalls-reverse-proxies-ssrf-vulnerabilities-ec2-instance-metadata-service + title: Add defense in depth against open firewalls, reverse proxies, and SSRF vulnerabilities with enhancements to the EC2 Instance Metadata Service + - url: https://docs.aws.amazon.com/config/latest/developerguide/ec2-imdsv2-check.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance#metadata-options + title: Terraform Documentation - Metadata Options + - uid: terraform-aws-security-ec2-user-data-no-secrets + title: Ensure EC2 instance user data does not contain secrets + mql: | + terraform.resources.where( nameLabel == "aws_instance" && arguments["user_data"] != null ) { + # ensure that all used AWS_ACCESS_KEY_ID are the sample key + arguments["user_data"] { + _.find(/(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}/).all("AKIAIOSFODNN7EXAMPLE") + } + + # ensure that all used secret keys are the sample key + arguments["user_data"] { + _.find(/([A-Za-z0-9\\\/+\\]{40})/).all( "wJalrXUtnFEMI/A1AAAAA/bPxRfiCYAAAAAAAKEY") + } + } + refs: + - url: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-add-user-data.html + title: Work with instance user data + - url: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html + title: Run commands on your Linux instance at launch + - url: https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-windows-user-data.html + title: Run commands on your Windows instance at launch + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance#user_data + title: Terraform Documentation - aws_instance Resource + - uid: terraform-aws-security-iam-no-wildcards-policies + title: Ensure IAM policy do not use wildcards and instead apply the principle of least privilege + mql: | + # verify aws_iam_policy + terraform.resources.where( nameLabel == "aws_iam_policy" && arguments["policy"] != null ) { + arguments["policy"].where( _["Statement"] != null) { + _["Statement"] { + # Resource is either not * or DENY is used (where wildcard is great) + _["Resource"] != "*" || _["Effect"].upcase == "DENY" + } + } + } + + # verify aws_iam_user_policy + terraform.resources.where( nameLabel == "aws_iam_user_policy" && arguments["policy"] != null ) { + arguments["policy"].where( _["Statement"] != null) { + _["Statement"] { + # Resource is either not * or DENY is used (where wildcard is great) + _["Resource"] != "*" || _["Effect"].upcase == "DENY" + } + } + } + + # verify iam_role_policy + terraform.resources.where( nameLabel == "iam_role_policy" && arguments["policy"] != null ) { + arguments["policy"].where( _["Statement"] != null) { + _["Statement"] { + # Resource is either not * or DENY is used (where wildcard is great) + _["Resource"] != "*" || _["Effect"].upcase == "DENY" + } + } + } + + # verify iam_group_policy + terraform.resources.where( nameLabel == "iam_group_policy" && arguments["policy"] != null ) { + arguments["policy"].where( _["Statement"] != null) { + _["Statement"] { + # Resource is either not * or DENY is used (where wildcard is great) + _["Resource"] != "*" || _["Effect"].upcase == "DENY" + } + } + } + refs: + - url: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html + title: Security best practices in IAM + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy + title: Terraform Documentation - iam_policy Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_user_policy + title: Terraform Documentation - iam_user_policy Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy + title: Terraform Documentation - iam_role_policy Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_group_policy + title: Terraform Documentation - iam_group_policy Resource + - uid: terraform-aws-security-s3-bucket-versioning-enabled + title: Ensure that versioning is enabled for your S3 buckets + mql: | + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+3\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket') { + blocks.one(type == 'versioning') + blocks.where(type == 'versioning') { + arguments['enabled'] == true + } + } + } + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+4\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket').map(labels.last) { + bucketnames = _ + terraform.resources.where( nameLabel == 'aws_s3_bucket_versioning' && arguments['bucket'].split('.')[1] == bucketnames ) != [] + terraform.resources.where( nameLabel == 'aws_s3_bucket_versioning' && arguments['bucket'].split('.')[1] == bucketnames ) { + blocks.one( type.downcase == 'versioning_configuration' ) + blocks.where( type.downcase == 'versioning_configuration' ) { + arguments['status'].downcase == 'enabled' + } + } + } + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-versioning-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket + title: Terraform Documentation - s3_bucket Resource + - uid: terraform-aws-security-s3-bucket-logging-enabled + title: Ensure logging is enabled for your S3 buckets + mql: | + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+3\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket') { + blocks.one( type == 'logging') + blocks.where(type == 'logging') { + arguments['target_bucket'] != null + } + } + } + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+4\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket').map(labels.last) { + bucketnames = _ + terraform.resources.where( nameLabel == 'aws_s3_bucket_logging' && arguments['bucket'].split('.')[1] == bucketnames ) != [] + terraform.resources.where( nameLabel == 'aws_s3_bucket_logging' && arguments['bucket'].split('.')[1] == bucketnames ) { + arguments['target_bucket'] != null + } + } + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-logging-enabled.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket + title: Terraform Documentation - s3_bucket Resource + - url: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html + title: Logging requests using server access logging + - uid: terraform-aws-security-s3-bucket-level-public-access-prohibited + title: Ensure Amazon Simple Storage Service (Amazon S3) buckets are not publicly accessible + mql: | + terraform.resources.where( nameLabel == 'aws_s3_bucket').map(labels.last) { + bucketnames = _ + terraform.resources.where( nameLabel == 'aws_s3_bucket_public_access_block' && arguments['bucket'].split('.')[1] == bucketnames ) != [] + terraform.resources.where( nameLabel == 'aws_s3_bucket_public_access_block' && arguments['bucket'].split('.')[1] == bucketnames ) { + arguments['block_public_acls'] == true + arguments['block_public_policy'] == true + arguments['ignore_public_acls'] == true + arguments['restrict_public_buckets'] == true + } + } + docs: + desc: | + [Blocking public access to your Amazon S3 storage](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) provides very specific documentation about the various settings for s3 bucket policies. + + - `block_public_acls` - By blocking `public` ACLs, PUT requests will fail if the object has any public ACL defined + - `ignore_public_acls` - By ignoring the bucket ACL, PUT calls with public ACLs will still work, but public ACL will be ignored + - `block_public_policy` - Prevents users from putting a policy that enable public access + - `restrict_public_buckets` - Restricts access to the bucket owner and AWS Services if the bucket has a public policy + remediation: "" + refs: + - url: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html + title: Blocking public access to your Amazon S3 storage + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-level-public-access-prohibited.html + title: AWS Config Managed Rules + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket + title: Terraform Documentation - s3_bucket Resource + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket_public_access_block#restrict_public_buckets + title: Terraform Documentation - s3_bucket Resource - restrict_public_buckets Argument + - uid: terraform-aws-security-s3-bucket-server-side-encryption-enabled + title: Ensure S3 buckets has the Amazon S3 default encryption enabled + mql: | + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+3\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket') { + blocks.one( type == "server_side_encryption_configuration" ) + blocks.where( type == "server_side_encryption_configuration" ) { + blocks.one( _.type == "rule" && _.blocks.one( type == 'apply_server_side_encryption_by_default' )) + } + } + } + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+4\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket_server_side_encryption_configuration') { + blocks.one( type == "rule" ) + blocks.where( type == "rule" ) { + blocks.one( _.type == 'apply_server_side_encryption_by_default' ) + } + } + } + docs: + desc: | + Amazon S3 default encryption is an optional configuration that sets the default encryption behavior for an S3 bucket. Enabling default SSE configures S3 buckets so that all new objects are encrypted when they are stored in the bucket. The objects are encrypted using server-side encryption with either Amazon S3-managed keys (SSE-S3) or AWS KMS keys stored in AWS Key Management Service (AWS KMS) (SSE-KMS). + + Enabling SSE by default reduces the risk of unauthorized access to objects stored in the bucket. + remediation: "" + refs: + - url: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucket-encryption.html + title: Setting default server-side encryption behavior for Amazon S3 buckets + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket + title: Terraform Documentation - s3_bucket Resource + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-server-side-encryption-enabled.html + title: AWS Config Managed Rules + - uid: terraform-aws-security-s3-bucket-public-read-and-write-prohibited + title: Ensure Amazon S3 buckets do not allow public read access + mql: | + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+3\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket') { + arguments['acl'] != /public-read/ + } + } + if ( terraform.settings.requiredProviders["aws"]["version"] == /[~><=\s]+4\./ ) { + terraform.resources.where( nameLabel == 'aws_s3_bucket').map(labels.last) { + bucketnames = _ + terraform.resources.where( nameLabel == 'aws_s3_bucket_acl' && arguments['bucket'].split('.')[1] == bucketnames ) != [] + terraform.resources.where( nameLabel == 'aws_s3_bucket_acl' && arguments['bucket'].split('.')[1] == bucketnames ) { + arguments['acl'].downcase != /public-read/ + } + } + } + refs: + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-public-read-prohibited.html + title: AWS Config Managed Rules - public read + - url: https://docs.aws.amazon.com/config/latest/developerguide/s3-bucket-public-write-prohibited.html + title: AWS Config Managed Rules - public write + - url: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/s3_bucket#acl + title: Terraform Documentation - s3_bucket Resource - acl Argument + - uid: terraform-aws-security-eks-encrypt-secrets + title: EKS should have the encryption of secrets enabled + mql: | + terraform.resources.where( nameLabel == "aws_eks_cluster" ) { + blocks.one( type == "encryption_config" ) + } + docs: + desc: | + EKS cluster resources should have the encryption_config block set with protection of the secrets resource. + + __Possible Impact__ + + EKS secrets could be read if compromised + + __Suggested Resolution__ + + Enable encryption of EKS secrets + audit: | + The following example will fail the `eks-encrypt-secrets` check: + + ```hcl + resource "aws_eks_cluster" "bad_example" { + name = "bad_example_cluster" + + role_arn = var.cluster_arn + vpc_config { + endpoint_public_access = false + } + } + ``` + remediation: | + The following example will pass the `eks-encrypt-secrets` check: + + ```hcl + resource "aws_eks_cluster" "good_example" { + encryption_config { + resources = [ "secrets" ] + provider { + key_arn = var.kms_arn + } + } + + name = "good_example_cluster" + role_arn = var.cluster_arn + vpc_config { + endpoint_public_access = false + } + } + ``` + - uid: terraform-aws-security-eks-no-public-cluster-access-to-cidr + title: EKS Clusters should restrict access to public API server + mql: |- + terraform.resources.where( nameLabel == "aws_eks_cluster" ) { + blocks.where( type == "vpc_config" ) { + arguments['endpoint_public_access'] == false || arguments['public_access_cidrs'].none( "0.0.0.0/0") + } + } + docs: + desc: | + EKS Clusters have public access CIDRs set to 0.0.0.0/0 by default which is wide open to the internet. This should be explicitly set to a more specific private CIDR range. + + __Possible Impact__ + + EKS can be accessed from the internet + + __Suggested Resolution__ + + Don't enable public access to EKS Clusters. + audit: | + The following example will fail the eks-no-public-cluster-access-to-cidr check. + + ```hcl + resource "aws_eks_cluster" "bad_example" { + + name = "bad_example_cluster" + role_arn = var.cluster_arn + vpc_config { + endpoint_public_access = true + } + } + ``` + remediation: | + The following example will pass the eks-no-public-cluster-access-to-cidr check: + + ```hcl + resource "aws_eks_cluster" "good_example" { + name = "good_example_cluster" + role_arn = var.cluster_arn + vpc_config { + endpoint_public_access = true + public_access_cidrs = ["10.2.0.0/8"] + } + } + ``` diff --git a/test/testdata/mondoo-terraform-gcp-security.mql.yaml b/test/testdata/mondoo-terraform-gcp-security.mql.yaml new file mode 100644 index 000000000..284df76d0 --- /dev/null +++ b/test/testdata/mondoo-terraform-gcp-security.mql.yaml @@ -0,0 +1,2297 @@ +# Copyright (c) Mondoo, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +policies: + - uid: mondoo-terraform-gcp-security + name: Terraform HCL Security Static Analysis for Google Cloud + version: 1.2.0 + license: BUSL-1.1 + tags: + mondoo.com/category: security + mondoo.com/platform: gcp,cloud,terraform + authors: + - name: Mondoo, Inc + email: hello@mondoo.com + docs: + desc: | + ## Overview + + This checks for security misconfigurations in Terraform HCL for Google Cloud. + + ## Local scan + + Local scan refer to scans of files and operating systems where cnspec is installed. + + ### Scan a Terraform project + + Open a terminal and run this command: + + ```bash + cnspec scan terraform /path/to/terraform/directory + ``` + + ## Join the community! + + Our goal is to build policies that are simple to deploy, accurate, and actionable. + + If you have any suggestions for how to improve this policy, or if you need support, [join the community](https://github.com/orgs/mondoohq/discussions) in GitHub Discussions. + groups: + - title: GCP BigQuery + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-bigquery-no-public-access + - title: GCP Identity and Access Management (IAM) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-iam-no-folder-level-default-service-account-assignment + - uid: terraform-gcp-security-iam-no-folder-level-service-account-impersonation + - uid: terraform-gcp-security-iam-no-privileged-service-accounts + - title: GCP Cloud Storage + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-storage-enable-ubla + - uid: terraform-gcp-security-storage-no-public-access + - title: GCP Compute + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-compute-disk-encryption-customer-key + - uid: terraform-gcp-security-compute-disk-encryption-required + - uid: terraform-gcp-security-compute-enable-shielded-vm + - uid: terraform-gcp-security-compute-enable-vpc-flow-logs + - uid: terraform-gcp-security-compute-no-default-service-account + - uid: terraform-gcp-security-compute-no-ip-forwarding + - uid: terraform-gcp-security-compute-no-plaintext-vm-disk-keys + - uid: terraform-gcp-security-compute-no-public-ip + - title: GCP DNS + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-dns-enable-dnssec + - uid: terraform-gcp-security-dns-no-rsa-sha1 + - title: GCP Google Kubernetes Engine (GKE) + filters: | + asset.platform == "terraform" || asset.platform == "terraform-hcl" + terraform.providers.any( nameLabel == "google" ) + checks: + - uid: terraform-gcp-security-gke-enable-auto-repair + - uid: terraform-gcp-security-gke-enable-auto-upgrade + - uid: terraform-gcp-security-gke-enable-ip-aliasing + - uid: terraform-gcp-security-gke-enable-master-networks + - uid: terraform-gcp-security-gke-enable-network-policy + - uid: terraform-gcp-security-gke-enable-private-cluster + - uid: terraform-gcp-security-gke-enable-stackdriver-logging + - uid: terraform-gcp-security-gke-enable-stackdriver-monitoring + - uid: terraform-gcp-security-gke-metadata-endpoints-disabled + - uid: terraform-gcp-security-gke-no-basic-authentication + - uid: terraform-gcp-security-gke-no-client-cert-authentication + - uid: terraform-gcp-security-gke-no-public-control-plane + - uid: terraform-gcp-security-gke-node-metadata-security + - uid: terraform-gcp-security-gke-node-pool-uses-cos + - uid: terraform-gcp-security-gke-node-shielding-enabled + - uid: terraform-gcp-security-gke-use-cluster-labels + - uid: terraform-gcp-security-gke-use-rbac-permissions + - uid: terraform-gcp-security-gke-use-service-account +queries: + - uid: terraform-gcp-security-iam-no-folder-level-default-service-account-assignment + title: Roles should not be assigned to default service accounts + mql: | + terraform.resources.where( nameLabel == "google_folder_iam_member") { + arguments['member'] != /.+@appspot\.gserviceaccount\.com/ && + arguments['member'] != /.+-compute@developer\.gserviceaccount\.com/ && + arguments['member'] != /data\.google_compute_default_account/ + } + docs: + desc: | + Default service accounts should not be used when granting access to folders as this can violate least privilege. It is recommended to use specialized service accounts instead. + + Some Google Cloud services create default service accounts when you first enable the API in a Google Cloud project. By default, these service accounts are granted the Editor role (roles/editor) on the Cloud project, which allows them to read and modify all resources in the Cloud project. This amount of access isn't essential for the services to work: To access resources in your Cloud project, Google Cloud services use service agents, not the default service accounts. + audit: | + Check if `member` is configured to use default service accounts `compute@developer.gserviceaccount.com`, `appspot.gserviceaccount.com`, or if a `data.google_compute_default_service_account` is being used + + ```hcl + resource "google_folder_iam_member" "folder-123" { + folder = "folder-123" + role = "roles/my-role" + member = "123-compute@developer.gserviceaccount.com" + } + + resource "google_folder_iam_member" "folder-456" { + folder = "folder-456" + role = "roles/my-role" + member = "123@appspot.gserviceaccount.com" + } + + data "google_compute_default_service_account" "default" { + } + + resource "google_folder_iam_member" "folder-789" { + folder = "folder-789" + role = "roles/my-role" + member = data.google_compute_default_service_account.default.id + } + ``` + remediation: | + Define a service account with least privilege for the role + + ```hcl + resource "google_service_account" "limited" { + account_id = "account123" + display_name = "account123" + } + + resource "google_folder_iam_member" "folder-123" { + folder = "folder-123" + role = "roles/my-role" + member = "serviceAccount:${google_service_account.limited.email}" + } + ``` + - uid: terraform-gcp-security-iam-no-folder-level-service-account-impersonation + title: Users should not be granted service account access at the folder level + mql: | + terraform.resources.where( nameLabel == "google_folder_iam_binding") { + arguments['role'] != /iam\.serviceAccountUser/ + } + docs: + desc: | + Users with service account access at the folder level can impersonate any service account. Instead, they should be given access to particular service accounts as required. + audit: | + Check if `role` is configured with `roles/iam.serviceAccountUser` + + ```hcl + resource "google_folder_iam_binding" "folder-123" { + folder = "folder-123" + role = "roles/iam.serviceAccountUser" + } + ``` + remediation: | + Define a custom role with least privilege + + ```hcl + resource "google_folder_iam_binding" "folder-123" { + folder = "folder-123" + role = "roles/custom-role" + } + ``` + - uid: terraform-gcp-security-iam-no-privileged-service-accounts + title: Service accounts should not have roles assigned with excessive privileges + mql: | + terraform.resources.where( nameLabel == "google_project_iam_member") { + arguments['role'] != /roles\/owner/ && + arguments['role'] != /roles\/editor/ + } + docs: + desc: | + Service accounts should have a minimal set of permissions assigned to accomplish their job. They should never have excessive access because if compromised, an attacker can escalate privileges and take over the entire account. + audit: | + Check if `role` is configured with basic roles: `roles/editor`, `roles/owner` + + ```hcl + resource "google_service_account" "test" { + account_id = "account123" + display_name = "account123" + } + + resource "google_project_iam_member" "project" { + project = "your-project-id" + role = "roles/owner" + member = "serviceAccount:${google_service_account.test.email}" + } + ``` + remediation: | + Define a custom role with least privilege + + ```hcl + resource "google_service_account" "test" { + account_id = "account123" + display_name = "account123" + } + + resource "google_project_iam_member" "project" { + project = "your-project-id" + role = "roles/logging.logWriter" + member = "serviceAccount:${google_service_account.test.email}" + } + ``` + - uid: terraform-gcp-security-storage-no-public-access + title: Ensure that Cloud Storage bucket is not publicly accessible + mql: | + terraform.resources.where( nameLabel == "google_storage_bucket_iam_binding") { + attributes['members']['value'] { _ != /allUsers/ && _ != /allAuthenticatedUsers/} + } + docs: + desc: | + Google Cloud Storage buckets that define 'allUsers' or 'allAuthenticatedUsers' as members in an IAM member/binding causes data to be exposed outside of the organization. This can lead to exposure of sensitive data. The recommended approach is to restrict public access. + audit: | + Check if `members` is configured with `allAuthenticatedUsers` or `allUsers` + + ```hcl + resource "google_storage_bucket_iam_binding" "allAuthenticatedUsers" { + bucket = google_storage_bucket.default.name + role = "roles/storage.admin" + members = [ + "allAuthenticatedUsers", + ] + } + + resource "google_storage_bucket_iam_binding" "allUsers" { + bucket = google_storage_bucket.default.name + role = "roles/storage.admin" + members = [ + "allUsers", + ] + } + ``` + remediation: | + Restrict public access to the bucket. + + ```hcl + resource "google_storage_bucket_iam_binding" "binding" { + bucket = google_storage_bucket.default.name + role = "roles/storage.admin" + members = [ + "user:jane@example.com", + ] + } + ``` + - uid: terraform-gcp-security-storage-enable-ubla + title: Ensure that Cloud Storage buckets have uniform bucket-level access enabled + mql: | + terraform.resources.where( nameLabel == "google_storage_bucket") { + arguments['uniform_bucket_level_access'] == true + } + docs: + desc: | + Google Cloud Storage buckets should be configured with uniform bucket-level access. + + When you enable uniform bucket-level access on a bucket, Access Control Lists (ACLs) are disabled, and only bucket-level Identity and Access Management (IAM) permissions grant access to that bucket and the objects it contains. You revoke all access granted by object ACLs and the ability to administrate permissions using bucket ACLs. + audit: | + Check if `uniform_bucket_level_access` is set to `true` + + ```hcl + resource "google_storage_bucket" "static-site" { + name = "image-store.com" + location = "EU" + force_destroy = true + + uniform_bucket_level_access = false + + website { + main_page_suffix = "index.html" + not_found_page = "404.html" + } + cors { + origin = ["http://image-store.com"] + method = ["GET", "HEAD", "PUT", "POST", "DELETE"] + response_header = ["*"] + max_age_seconds = 3600 + } + } + ``` + remediation: | + Configure `uniform_bucket_level_access` to `true` + + ```hcl + resource "google_storage_bucket" "static-site" { + name = "image-store.com" + location = "EU" + force_destroy = true + + uniform_bucket_level_access = true + + website { + main_page_suffix = "index.html" + not_found_page = "404.html" + } + cors { + origin = ["http://image-store.com"] + method = ["GET", "HEAD", "PUT", "POST", "DELETE"] + response_header = ["*"] + max_age_seconds = 3600 + } + } + ``` + - uid: terraform-gcp-security-compute-no-public-ip + title: Compute instances should not be publicly exposed to the internet + mql: | + terraform.resources.where( nameLabel == "google_compute_instance") { + blocks.where( type == "network_interface") { + blocks.where( type == "access_config") { + arguments.values.length != 0 + } + } + } + docs: + desc: | + Google Cloud compute instances that have a public IP address are exposed on the internet and are at risk to attack. + audit: | + Check if the `access_config` is empty. + + ```hcl + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } + } + ``` + remediation: | + Configure compute instance without empty `access_config` + + ```hcl + resource "google_compute_instance" "good_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + } + } + ``` + - uid: terraform-gcp-security-compute-disk-encryption-customer-key + title: Disks should be encrypted with Customer Supplied Encryption Keys + mql: | + terraform.resources.where( nameLabel == "google_compute_disk" ) { + blocks.one( type == "disk_encryption_key") + } + terraform.resources.where( nameLabel == "google_compute_disk" && blocks.one( type == "disk_encryption_key") ) { + blocks.where( type == "disk_encryption_key") { + arguments != "" + } + } + docs: + desc: | + Google Cloud compute instances should use disk encryption using a customer-supplied encryption key. If you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key, and you do not need to provide the key to use the disk later. + audit: | + Check if `disk_encryption_key` key is defined and that the arguments are not empty strings. + + ```hcl + resource "google_compute_disk" "bad_example" { + name = "test-disk" + type = "pd-ssd" + zone = "us-central1-a" + image = "debian-9-stretch-v20200805" + labels = { + environment = "dev" + } + physical_block_size_bytes = 4096 + } + ``` + remediation: | + Configure compute instance with `disk_encryption_key` and `kms_key_self_link` defined. + + ```hcl + resource "google_compute_disk" "good_example" { + name = "test-disk" + type = "pd-ssd" + zone = "us-central1-a" + image = "debian-9-stretch-v20200805" + labels = { + environment = "dev" + } + physical_block_size_bytes = 4096 + disk_encryption_key { + kms_key_self_link = "something" + } + } + ``` + - uid: terraform-gcp-security-compute-disk-encryption-required + title: Disk encryption Keys should not be passed as plaintext + mql: | + terraform.resources.where( nameLabel == "google_compute_disk" && blocks.one( type == "disk_encryption_key") ) { + blocks.where( type == "disk_encryption_key") { + arguments.keys[0] != "raw_key" + } + } + docs: + desc: | + Google Cloud compute instances should use disk encryption using a customer-supplied encryption key. One of the options is for the `disk_encryption_key` is `raw_key`, which is the key in plaintext. + + Sensitive values such as raw encryption keys should not be included in your Terraform code and should be stored securely by a secrets manager. + audit: | + Check if the `access_config` is empty + + ```hcl + resource "google_compute_disk" "good_example" { + disk_encryption_key { + raw_key="b2ggbm8gdGhpcyBpcyBiYWQ=" + } + } + ``` + remediation: | + Configure compute instance with `disk_encryption_key` and `kms_key_self_link` defined + + ```hcl + resource "google_compute_disk" "good_example" { + disk_encryption_key { + kms_key_self_link = google_kms_crypto_key.my_crypto_key.id + } + } + ``` + - uid: terraform-gcp-security-compute-enable-shielded-vm + title: Verify shielded VM is enabled on compute instances + mql: | + terraform.resources.where( nameLabel == "google_compute_instance" ) { + blocks.one( type == "shielded_instance_config" ) + } + terraform.resources.where( nameLabel == "google_compute_instance" && blocks.one( type == "shielded_instance_config" )) { + blocks.where( type == "shielded_instance_config") { + attributes['enable_vtpm'] == null || attributes['enable_vtpm']['value'] == true + } + } + terraform.resources.where( nameLabel == "google_compute_instance" && blocks.one( type == "shielded_instance_config" )) { + blocks.where( type == "shielded_instance_config") { + attributes['enable_integrity_monitoring'] == null || attributes['enable_integrity_monitoring']['value'] == true + } + } + docs: + desc: | + Shielded VMs are virtual machines (VMs) on Google Cloud hardened by a set of security controls that help defend against rootkits and bootkits. Using Shielded VMs helps protect enterprise workloads from threats like remote attacks, privilege escalation, and malicious insiders. Shielded VMs leverage advanced platform security capabilities such as secure and measured boot, a virtual trusted platform module (vTPM), UEFI firmware, and integrity monitoring. + + **Secure Boot** helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails. + + **Integrity monitoring** helps you understand and make decisions about the state of your VM instances. Integrity monitoring compares the most recent boot measurements to the integrity policy baseline and returns a pair of pass/fail results depending on whether they match or not, one for the early boot sequence and one for the late boot sequence. + audit: | + Check if the `shielded_instance_config` is configured on the instance, and if `enable_vtpm` and `enable_integrity_monitoring` are set to `false` + + ```hcl + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + shielded_instance_config { + enable_vtpm = false + enable_integrity_monitoring = false + } + } + ``` + remediation: | + Configure `shielded_instance_config` without `enable_vtpm` and `enable_integrity_monitoring`, or configure `enable_vtpm` and `enable_integrity_monitoring` explicitly to `true` + + ```hcl + resource "google_compute_instance" "good_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + shielded_instance_config { + enable_vtpm = true + enable_integrity_monitoring = true + } + } + ``` + - uid: terraform-gcp-security-compute-enable-vpc-flow-logs + title: Verify VPC flow logs enabled on compute instances + mql: | + terraform.resources.where( nameLabel == "google_compute_subnetwork" && arguments['purpose'] != "INTERNAL_HTTPS_LOAD_BALANCER" ) { + blocks.one( type == "log_config") + } + docs: + desc: | + VPC flow logs record information about all traffic, which is a vital tool in reviewing anomalous traffic. Google Compute Engine subnetworks that do not have VPC flow logs enabled have limited information for auditing and awareness. + + Note: Google Compute Engine subnets configured as INTERNAL_HTTPS_LOAD_BALANCER do not support VPC flow logs. Compute subnetworks with `purpose INTERNAL_HTTPS_LOAD_BALANCER` attribute will not be evaluated. + audit: | + The following example will fail: + + ```terraform + + resource "google_compute_subnetwork" "bad_example" { + name = "test-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.id + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + } + + resource "google_compute_network" "custom-test" { + name = "test-network" + auto_create_subnetworks = false + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_compute_subnetwork" "good_example" { + name = "test-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.id + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + log_config { + aggregation_interval = "INTERVAL_10_MIN" + flow_sampling = 0.5 + metadata = "INCLUDE_ALL_METADATA" + } + } + + resource "google_compute_network" "custom-test" { + name = "test-network" + auto_create_subnetworks = false + } + + ``` + - uid: terraform-gcp-security-compute-no-default-service-account + title: Compute instances should not use the default service account + mql: | + terraform.resources.where( nameLabel == "google_compute_instance" && blocks.one( type == "service_account") ) { + blocks.where( type == "service_account" ) { + attributes['email'] != null + } + } + terraform.resources.where( nameLabel == "google_compute_instance" && blocks.one( type == "service_account") ) { + blocks.where( type == "service_account" ) { + attributes['email'] != /.+-compute@developer\.gserviceaccount.com/ + } + } + docs: + desc: | + The default service account has full project access. Provisioning instances using the default service account gives the instance full access to the project. Compute instances should instead be assigned the minimal access they need. + audit: | + The following example will fail: + + ```terraform + + resource "google_compute_instance" "default" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + service_account { + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + email = "1234567890-compute@developer.gserviceaccount.com" + scopes = ["cloud-platform"] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service_account_id" + display_name = "Service Account" + } + + resource "google_compute_instance" "default" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } + + metadata = { + foo = "bar" + } + + metadata_startup_script = "echo hi > /test.txt" + + service_account { + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + email = google_service_account.default.email + scopes = ["cloud-platform"] + } + } + + ``` + - uid: terraform-gcp-security-compute-no-ip-forwarding + title: Compute instances should be configured with IP forwarding + mql: | + terraform.resources.where( nameLabel == "google_compute_instance" && attributes['can_ip_forward']) { + attributes['can_ip_forward']['value'] == false + } + docs: + desc: | + Disabling IP forwarding ensures the instance can only receive packets addressed to the instance and can only send packets with a source address of the instance. + + The attribute `can_ip_forward` is optional on `google_compute_instance` and defaults to `false`. Instances with `can_ip_forward = true` will fail. + audit: | + The following example will fail: + + ```terraform + + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + can_ip_forward = false + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + } + + // Local SSD disk + scratch_disk { + interface = "SCSI" + } + + can_ip_forward = false + } + + ``` + - uid: terraform-gcp-security-compute-no-plaintext-vm-disk-keys + title: VM disk encryption keys should not be provided in plaintext + mql: | + terraform.resources.where( nameLabel == "google_compute_instance" ) { + blocks { arguments.keys { _ != 'disk_encryption_key_raw' } } + } + docs: + desc: | + Providing your encryption key in plaintext format means anyone with access to the source code also has access to the key. + + When encrypting a `boot_disk`, it is not recommended to use the `disk_encryption_key_raw` argument as this passes the key in plaintext, which is not secure. Consider using `kms_key_self_link` or a secrets manager instead. + audit: | + The following example will fail: + + ```terraform + + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + disk_encryption_key_raw = "something" + } + + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_compute_instance" "bad_example" { + name = "test" + machine_type = "e2-medium" + zone = "us-central1-a" + + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-9" + } + kms_key_self_link = "kmsKeyName" + } + + } + ``` + - uid: terraform-gcp-security-bigquery-no-public-access + title: BigQuery datasets should only be accessible within the organization + mql: | + terraform.resources.where( nameLabel == "google_bigquery_dataset" ) { + blocks { arguments.values.none("allAuthenticatedUsers") } + } + docs: + desc: | + BigQuery datasets should not be configured to provide access to `allAuthenticatedUsers` as this provides any authenticated GCP user, even those outside of your organization, access to your BigQuery dataset. This can lead to exposure of sensitive data to the public internet. + + Configure access permissions with higher granularity and least privilege principles. + audit: | + The following example will fail: + + ```terraform + + resource "google_bigquery_dataset" "bad_example" { + dataset_id = "example_dataset" + friendly_name = "test" + description = "This is a test description" + location = "EU" + default_table_expiration_ms = 3600000 + + labels = { + env = "default" + } + + access { + role = "OWNER" + special_group = "allAuthenticatedUsers" + } + + access { + role = "READER" + domain = "hashicorp.com" + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_bigquery_dataset" "good_example" { + dataset_id = "example_dataset" + friendly_name = "test" + description = "This is a test description" + location = "EU" + default_table_expiration_ms = 3600000 + + labels = { + env = "default" + } + + access { + role = "OWNER" + user_by_email = google_service_account.bqowner.email + } + + access { + role = "READER" + domain = "hashicorp.com" + } + } + + resource "google_service_account" "bqowner" { + account_id = "bqowner" + } + ``` + - uid: terraform-gcp-security-dns-enable-dnssec + title: Cloud DNS should use DNSSEC + mql: | + terraform.resources.where( nameLabel == "google_dns_managed_zone" ) { + blocks.where( type == "dnssec_config" ) { + attributes['state']['value'] != "off" + } + } + docs: + desc: | + DNSSEC authenticates DNS responses, preventing MITM attacks and impersonation. Unverified DNS responses could lead to man-in-the-middle attacks. + audit: | + The following example will fail: + + ```terraform + + resource "google_dns_managed_zone" "bad_example" { + name = "example-zone" + dns_name = "example-${random_id.rnd.hex}.com." + description = "Example DNS zone" + labels = { + foo = "bar" + } + dnssec_config { + state = "off" + } + } + + resource "random_id" "rnd" { + byte_length = 4 + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_dns_managed_zone" "good_example" { + name = "example-zone" + dns_name = "example-${random_id.rnd.hex}.com." + description = "Example DNS zone" + labels = { + foo = "bar" + } + dnssec_config { + state = "on" + } + } + + resource "random_id" "rnd" { + byte_length = 4 + } + ``` + - uid: terraform-gcp-security-dns-no-rsa-sha1 + title: Zone signing should not use RSA SHA1 + mql: | + terraform.datasources.where( nameLabel == "google_dns_keys" ) { + blocks { attributes['algorithm']['value'] != "rsasha1" } + } + docs: + desc: | + RSA SHA1 is a weaker algorithm than SHA2-based algorithms such as RSA SHA256/512. + audit: | + The following example will fail: + + ```terraform + + resource "google_dns_managed_zone" "foo" { + name = "foobar" + dns_name = "foo.bar." + + dnssec_config { + state = "on" + non_existence = "nsec3" + } + } + + data "google_dns_keys" "foo_dns_keys" { + managed_zone = google_dns_managed_zone.foo.id + zone_signing_keys { + algorithm = "rsasha1" + } + } + + output "foo_dns_ds_record" { + description = "DS record of the foo subdomain." + value = data.google_dns_keys.foo_dns_keys.key_signing_keys[0].ds_record + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_dns_managed_zone" "foo" { + name = "foobar" + dns_name = "foo.bar." + + dnssec_config { + state = "on" + non_existence = "nsec3" + } + } + + data "google_dns_keys" "foo_dns_keys" { + managed_zone = google_dns_managed_zone.foo.id + zone_signing_keys { + algorithm = "rsasha512" + } + } + + output "foo_dns_ds_record" { + description = "DS record of the foo subdomain." + value = data.google_dns_keys.foo_dns_keys.key_signing_keys[0].ds_record + } + ``` + - uid: terraform-gcp-security-gke-enable-auto-repair + title: Kubernetes should have 'Automatic repair' enabled + mql: | + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "management") { + arguments['auto_repair'] != false + } + } + docs: + desc: | + Automatic repair will monitor nodes and attempt repair when a node fails multiple subsequent health checks. Failing nodes will require manual repair. + audit: | + The following example will fail: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + management { + auto_repair = false + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "good_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + management { + auto_repair = true + } + } + ``` + - uid: terraform-gcp-security-gke-enable-auto-upgrade + title: Kubernetes should have 'Automatic upgrade' enabled + mql: | + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "management") { + arguments['auto_upgrade'] != false + } + } + docs: + desc: | + Automatic updates keep nodes updated with the latest cluster master version. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "bad_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + management { + auto_upgrade = false + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "good_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + management { + auto_upgrade = true + } + } + ``` + - uid: terraform-gcp-security-gke-enable-ip-aliasing + title: Clusters should have IP aliasing enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.one( type == "ip_allocation_policy" ) + } + docs: + desc: | + IP aliasing allows the reuse of public IPs internally, removing the need for a NAT gateway. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + ip_allocation_policy = {} + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + `,` + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + ip_allocation_policy { + cluster_secondary_range_name = "some range name" + services_secondary_range_name = "some range name" + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-master-networks + title: Master authorized networks should be configured on GKE clusters + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments.keys.contains("master_authorized_networks_config") + } + docs: + desc: | + Enabling authorized networks means you can restrict master access to a fixed set of CIDR ranges. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + master_authorized_networks_config = [{ + cidr_blocks = [{ + cidr_block = "10.10.128.0/24" + display_name = "internal" + }] + }] + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-network-policy + title: Network Policy should be enabled on GKE clusters + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.one( type == "network_policy" ) + } + docs: + desc: | + Enabling a network policy allows the segregation of network traffic by namespace. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + network_policy { + enabled = false + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + network_policy { + enabled = true + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-private-cluster + title: Clusters should be set to private + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.one( type == "network_policy" ) + } + docs: + desc: | + Enabling private nodes on a cluster ensures the nodes are only available internally as they will only be assigned internal addresses. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + network_policy { + enabled = false + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + network_policy { + enabled = true + } + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-stackdriver-logging + title: Stackdriver Logging should be enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes.keys.contains( "logging_service" ) + } + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes['logging_service']['value'] == 'logging.googleapis.com/kubernetes' + } + docs: + desc: | + StackDriver logging provides a useful interface to all of stdout/stderr for each container and should be enabled for monitoring, debugging, etc. Without Stackdriver, visibility to the cluster will be reduced. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + logging_service = "logging.googleapis.com" + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + logging_service = "logging.googleapis.com/kubernetes" + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-enable-stackdriver-monitoring + title: Stackdriver Monitoring should be enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes.keys.contains( "monitoring_service" ) + } + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes['monitoring_service']['value'] == 'monitoring.googleapis.com/kubernetes' + } + docs: + desc: | + StackDriver monitoring aggregates logs, events, and metrics from your Kubernetes environment on GKE to help you understand your application's behavior in production. + audit: | + The following example will fail: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + monitoring_service = "monitoring.googleapis.com" + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_service_account" "default" { + account_id = "service-account-id" + display_name = "Service Account" + } + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + monitoring_service = "monitoring.googleapis.com/kubernetes" + } + + resource "google_container_node_pool" "primary_preemptible_nodes" { + name = "my-node-pool" + location = "us-central1" + cluster = google_container_cluster.primary.name + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + } + ``` + - uid: terraform-gcp-security-gke-metadata-endpoints-disabled + title: Legacy metadata endpoints enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + attributes['metadata']['value']['disable-legacy-endpoints'] != false + } + docs: + desc: | + The Compute Engine instance metadata server exposes legacy v0.1 and v1beta1 endpoints, which do not enforce metadata query headers. This is a feature in the v1 APIs that makes it more difficult for a potential attacker to retrieve instance metadata. Unless specifically required, we recommend you disable these legacy APIs. When setting the `metadata` block, the default value for `disable-legacy-endpoints` is set to `true`, they should not be explicitly enabled. + audit: | + The following example will fail: + + ```terraform + + resource "google_container_cluster" "bad_example" { + metadata { + disable-legacy-endpoints = false + } + } + + ``` + remediation: | + The following example will pass: + + ```terraform + + resource "google_container_cluster" "good_example" { + metadata { + disable-legacy-endpoints = true + } + } + ``` + - uid: terraform-gcp-security-gke-no-client-cert-authentication + title: Clusters should not use client certificates for authentication + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.where( type == "master_auth" ) { + blocks { attributes['issue_client_certificate']['value'] != true } + } + } + docs: + desc: | + There are several methods of authenticating to the Kubernetes API server. In GKE, the supported methods are service account bearer tokens, OAuth tokens, and x509 client certificates. Prior to GKE's integration with OAuth, a one-time generated x509 certificate or static password were the only available authentication methods, but are now not recommended and should be disabled. These methods present a wider surface of attack for cluster compromise and have been disabled by default since GKE version 1.12. If you are using legacy authentication methods, we recommend that you turn them off. Authentication with a static password is deprecated and has been removed since GKE version 1.19. + + Existing clusters should move to OAuth. + audit: | + The following example will fail due to the `master_auth` block that includes the `issue_client_certificate = true` configuration which is set to `false` by default: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + master_auth { + client_certificate_config { + issue_client_certificate = true + } + } + } + ``` + remediation: | + The following example will pass since the `master_auth` block is not specified and secure defaults are used instead: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + ``` + The following example will pass because the `master_auth` block is explicitly configuring `issue_client_certificate = false`: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + master_auth { + client_certificate_config { + issue_client_certificate = false + } + } + ``` + - uid: terraform-gcp-security-gke-no-basic-authentication + title: Clusters should not use basic authentication + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.where( type == "master_auth" ) { attributes['username']['value'] == null || attributes['username']['value'] == "" } + } + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.where( type == "master_auth" ) { attributes['password']['value'] == null || attributes['password']['value'] == "" } + } + docs: + desc: | + There are several methods of authenticating to the Kubernetes API server. In GKE, the supported methods are service account bearer tokens, OAuth tokens, and x509 client certificates. Prior to GKE's integration with OAuth, a one-time generated x509 certificate or static password were the only available authentication methods, but are now not recommended and should be disabled. These methods present a wider surface of attack for cluster compromise and have been disabled by default since GKE version 1.12. If you are using legacy authentication methods, we recommend that you turn them off. Authentication with a static password is deprecated and has been removed since GKE version 1.19. + + Existing clusters should move to OAuth. + audit: | + The following example will fail due to the `master_auth` block that includes the `username` and `password` configuration which is set to a value other than `""`: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + master_auth { + username = "kubeadmin" + password = var.cluster_password + } + } + ``` + remediation: | + The following example will pass since the `master_auth` block is not specified and secure defaults are used instead: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + } + ``` + + The following example will pass because the `master_auth` block is explicitly configuring basic auth to be disabled: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + remove_default_node_pool = true + initial_node_count = 1 + + master_auth { + username = "" + password = "" + client_certificate_config { + issue_client_certificate = false + } + } + ``` + - uid: terraform-gcp-security-gke-no-public-control-plane + title: GKE Control Plane should not be publicly accessible + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments['master_authorized_networks_config'][0]['cidr_blocks'] { _['cidr_block'] != "0.0.0.0/0" } + } + docs: + desc: | + Authorized networks allow you to specify CIDR ranges and allow IP addresses in those ranges to access your cluster control plane endpoint using HTTPS. Exposing the Kubernetes control plane to the public internet by specifying a CIDR block of "0.0.0.0/0" is not recommended. Public clusters can have up to 50 authorized network CIDR ranges; private clusters can have up to 100. + audit: | + The following example will fail due to the `master_authorized_networks_config` block that specifies `cidr_block = "0.0.0.0/0"` which is publicly accessible: + + ```terraform + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + remove_default_node_pool = true + initial_node_count = 1 + master_authorized_networks_config = [{ + cidr_blocks = [{ + cidr_block = "0.0.0.0/0" + display_name = "external" + }] + }] + } + ``` + remediation: | + The following example will pass since the `master_authorized_networks_config` block configures an internal `cidr_block`: + + ```terraform + + resource "google_container_cluster" "primary" { + name = "my-gke-cluster" + location = "us-central1" + + remove_default_node_pool = true + initial_node_count = 1 + master_authorized_networks_config = [{ + cidr_blocks = [{ + cidr_block = "10.10.128.0/24" + display_name = "internal" + }] + }] + } + + ``` + - uid: terraform-gcp-security-gke-node-metadata-security + title: Node metadata value disables metadata concealment + mql: | + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "node_config") { + blocks { attributes['node_metadata']['value'] != "EXPOSE" } + } + } + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "node_config") { + blocks { attributes['node_metadata']['value'] != "UNSPECIFIED" } + } + } + docs: + desc: | + GKE metadata concealment protects some potentially sensitive system metadata from user workloads running on your cluster. Metadata concealment is scheduled to be deprecated in the future and Google recommends using Workload Identity instead of metadata concealment. This check is looking for configuration that exposes metadata completely. + audit: | + The following example will fail due to the `node_config` block that specifies `node_metadata = "EXPOSE"`: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + node_config { + workload_metadata_config { + node_metadata = "EXPOSE" + } + } + } + ``` + + The following example will fail due to the `node_config` block that specifies `node_metadata = "UNSPECIFIED"`: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + node_config { + workload_metadata_config { + node_metadata = "UNSPECIFIED" + } + } + } + ``` + remediation: | + The following example will pass due to the `node_config` block that specifies `node_metadata = "GKE_METADATA_SERVER"` (recommended): + + ```terraform + + resource "google_container_node_pool" "bad_example" { + node_config { + workload_metadata_config { + node_metadata = "GKE_METADATA_SERVER" + } + } + } + + ``` + + The following example will pass due to the `node_config` block that specifies `node_metadata = "SECURE"`: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + node_config { + workload_metadata_config { + node_metadata = "SECURE" + } + } + } + + ``` + - uid: terraform-gcp-security-gke-node-pool-uses-cos + title: Ensure Container-Optimized OS (cos) is used for Kubernetes Engine Clusters Node image + mql: | + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "node_config") { + attributes['image_type']['value'] == 'COS_CONTAINERD' + } + } + docs: + desc: | + GKE supports several OS image types but COS_CONTAINERD is the recommended OS image to use on cluster nodes for enhanced security. COS_CONTAINERD is the recommended OS image to use on cluster nodes. + audit: | + The following example will fail due to the `node_config` block that specifies `image_type = "something"`: + + ```terraform + + resource "google_container_node_pool" "bad_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + image_type = "something" + } + } + ``` + remediation: | + The following example will pass due to the `node_config` block that specifies `image_type = "COS_CONTAINERD"` (recommended): + + ```terraform + + resource "google_container_node_pool" "good_example" { + name = "my-node-pool" + cluster = google_container_cluster.primary.id + node_count = 1 + + node_config { + preemptible = true + machine_type = "e2-medium" + + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + ] + image_type = "COS_CONTAINERD" + } + } + + ``` + - uid: terraform-gcp-security-gke-node-shielding-enabled + title: Shielded GKE nodes not enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments['enable_shielded_nodes'] != false + } + docs: + desc: | + Node identity and integrity can't be verified without shielded GKE nodes. CIS GKE Benchmark Recommendation: 6.5.5. Shielded GKE Nodes provide strong, verifiable node identity and integrity to increase the security of GKE nodes and should be enabled on all GKE clusters. + + `enable_shielded_nodes` is an optional argument and is set to `true` by default, and should not be set to `false`. + audit: | + The following example will fail due to the `enable_shielded_nodes` is set to `false`: + + ```terraform + + resource "google_container_cluster" "bad_example" { + enable_shielded_nodes = "false" + } + ``` + remediation: | + The following example will pass due to the `enable_shielded_nodes` is set to `true`: + + ```terraform + + resource "google_container_cluster" "good_example" { + enable_shielded_nodes = "true" + } + + ``` + - uid: terraform-gcp-security-gke-use-cluster-labels + title: Clusters should be configured with Labels + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments.keys.contains( "resource_labels" ) + } + docs: + desc: | + Cluster labels are key-value pairs that helps you organize your Google Cloud clusters. You can attach a label to each resource, then filter the resources based on their labels. Information about labels is forwarded to the billing system, so you can break down your billed charges by label. + + The `resource_labels` argument is optional when using the `google_container_cluster` resource. + audit: | + The following example will fail because the `resource_labels` argument is not defined for the cluster: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "my-gke-cluster" + location = "us-central1" + + remove_default_node_pool = true + initial_node_count = 1 + } + + ``` + remediation: | + The following example will pass because the `resource_labels` argument is defined for the cluster: + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + resource_labels = { + "env" = "staging" + } + } + + ``` + - uid: terraform-gcp-security-gke-use-rbac-permissions + title: Legacy ABAC permissions are enabled + mql: | + terraform.resources.where( nameLabel == "google_container_cluster" ) { + arguments['enable_legacy_abac'] != true + } + docs: + desc: | + By default, ABAC is disabled for clusters created using GKE version 1.8 and later. In Kubernetes, RBAC is used to grant permissions to resources at the cluster and namespace level. RBAC allows you to define roles with rules containing a set of permissions. RBAC has significant security advantages over ABAC. + + The `enable_legacy_abac` is set to `false` by default. + audit: | + The following example will fail because the `enable_legacy_abac` argument is set to `true`: + + ```terraform + + resource "google_container_cluster" "bad_example" { + enable_legacy_abac = true + } + + ``` + remediation: | + The following example will pass because the `enable_legacy_abac` argument is explicitly set to `false` (omitting the argument will also pass): + + ```terraform + + resource "google_container_cluster" "good_example" { + name = "my-gke-cluster" + location = "us-central1" + + enable_legacy_abac = false + } + + ``` + - uid: terraform-gcp-security-gke-use-service-account + title: Checks for service account defined for GKE nodes + mql: |- + terraform.resources.where( nameLabel == "google_container_cluster" ) { + blocks.where( type == "node_config" ) { + arguments.keys.contains("service_account") + } + } + terraform.resources.where( nameLabel == "google_container_node_pool" ) { + blocks.where( type == "node_config" ) { + arguments.keys.contains("service_account") + } + } + docs: + desc: | + Each GKE node has an Identity and Access Management (IAM) Service Account associated with it. By default, nodes are given the Compute Engine default service account, which you can find by navigating to the IAM section of the Cloud Console. This account has broad access by default, making it useful to wide variety of applications, but it has more permissions than are required to run your Kubernetes Engine cluster. You should create and use a minimally privileged service account for your nodes to use instead of the Compute Engine default service account. + audit: | + The following example will fail because the `node_config` block does not contain a `service_account` argument: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "marcellus-wallace" + location = "us-central1-a" + initial_node_count = 3 + + node_config { + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + } + timeouts { + create = "30m" + update = "40m" + } + } + + ``` + remediation: | + The following example will pass because the `node_config` block contains a `service_account` argument: + + ```terraform + + resource "google_container_cluster" "bad_example" { + name = "marcellus-wallace" + location = "us-central1-a" + initial_node_count = 3 + + node_config { + # Google recommends custom service accounts that have cloud-platform scope and permissions granted via IAM Roles. + service_account = google_service_account.default.email + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform" + + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + } + timeouts { + create = "30m" + update = "40m" + } + } + + ```