From facf233c3a80703bbc760e068dc4fcaaf477b4a9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 13:39:21 +0100 Subject: [PATCH 1/7] build(deps): bump docker/setup-buildx-action from 3.7.1 to 3.8.0 (#7223) Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.7.1 to 3.8.0. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/c47758b77c9736f4b2ef4073d4d51994fabfe349...6524bf65af31da8d45b59e8c27de4bd072b392f5) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/post-merge.yaml | 2 +- .github/workflows/post-tag.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/post-merge.yaml b/.github/workflows/post-merge.yaml index aea50a482c..dffa514a5c 100644 --- a/.github/workflows/post-merge.yaml +++ b/.github/workflows/post-merge.yaml @@ -163,7 +163,7 @@ jobs: path: _release - name: Set up Docker Buildx - uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 + uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0 - name: Deploy OPA Edge env: diff --git a/.github/workflows/post-tag.yaml b/.github/workflows/post-tag.yaml index 5a77393200..4abe4dd7cb 100644 --- a/.github/workflows/post-tag.yaml +++ b/.github/workflows/post-tag.yaml @@ -98,7 +98,7 @@ jobs: path: _release - name: Set up Docker Buildx - uses: docker/setup-buildx-action@c47758b77c9736f4b2ef4073d4d51994fabfe349 # v3.7.1 + uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0 - name: Build and Deploy OPA Docker Images id: build-and-deploy From 339640fb868201e510dd0bf33265a98b890de5b6 Mon Sep 17 00:00:00 2001 From: Johan Fylling Date: Mon, 16 Dec 2024 13:59:08 +0100 Subject: [PATCH 2/7] Fixing broken bench tests (#7221) These fail for the `ci-release-test` GHA on `main`. Signed-off-by: Johan Fylling --- v1/compile/compile_bench_test.go | 12 ++++---- v1/cover/cover_bench_test.go | 2 +- v1/dependencies/deps_bench_test.go | 4 +-- v1/plugins/logs/plugin_benchmark_test.go | 2 +- v1/profiler/profiler_bench_test.go | 2 +- v1/test/e2e/logs/utils.go | 2 +- v1/topdown/sets_bench_test.go | 4 +-- v1/topdown/strings_bench_test.go | 4 +-- v1/topdown/topdown_bench_test.go | 29 +++++++++---------- v1/topdown/topdown_partial_bench_test.go | 2 +- v1/util/test/benchmark.go | 36 ++++++++++++------------ 11 files changed, 48 insertions(+), 51 deletions(-) diff --git a/v1/compile/compile_bench_test.go b/v1/compile/compile_bench_test.go index a3fa3f108f..4e60a2bc7a 100644 --- a/v1/compile/compile_bench_test.go +++ b/v1/compile/compile_bench_test.go @@ -44,13 +44,13 @@ func generateDynamicPolicyBenchmarkData(N int) map[string]string { "main.rego": ` package main - denies[x] { + denies contains x if { x := data.policies[input.type][input.subtype][_].denies[_] } - any_denies { + any_denies if { denies[_] } - allow { + allow if { not any_denies }`, } @@ -64,7 +64,7 @@ func generateDynamicPolicyBenchmarkData(N int) map[string]string { func generateDynamicMockPolicy(N int) string { return fmt.Sprintf(`package policies["%d"]["%d"].policy%d -denies[x] { +denies contains x if { input.attribute == "%d" x := "policy%d" }`, N, N, N, N, N) @@ -104,7 +104,7 @@ func generateLargePartialRuleBenchmarkData(N int) map[string]string { policy.WriteString(generateLargePartialRuleMockRule(i)) policy.WriteString("\n\n") } - policy.WriteString(`number_denies = x { + policy.WriteString(`number_denies = x if { x := count(deny) }`) @@ -115,7 +115,7 @@ func generateLargePartialRuleBenchmarkData(N int) map[string]string { } func generateLargePartialRuleMockRule(N int) string { - return fmt.Sprintf(`deny[[resource, errormsg]] { + return fmt.Sprintf(`deny contains [resource, errormsg] if { resource := "example.%d" i := %d i %% 2 != 0 diff --git a/v1/cover/cover_bench_test.go b/v1/cover/cover_bench_test.go index 43cbbbfd91..c0a64c72ba 100644 --- a/v1/cover/cover_bench_test.go +++ b/v1/cover/cover_bench_test.go @@ -61,7 +61,7 @@ func generateModule(numVars int, dataSize int) string { sb := strings.Builder{} sb.WriteString(`package test -p { +p if { x := a v := x[i] `) diff --git a/v1/dependencies/deps_bench_test.go b/v1/dependencies/deps_bench_test.go index 1224fe9666..ccd9302762 100644 --- a/v1/dependencies/deps_bench_test.go +++ b/v1/dependencies/deps_bench_test.go @@ -60,14 +60,14 @@ func makePolicy(ruleCount int) string { var b strings.Builder b.WriteString("package test\n\n") - b.WriteString("main {\n") + b.WriteString("main if {\n") for i := 0; i < ruleCount; i++ { b.WriteString(fmt.Sprintf(" p_%d\n", i)) } b.WriteString("}\n\n") for i := 0; i < ruleCount; i++ { - b.WriteString(fmt.Sprintf("p_%d {\n", i)) + b.WriteString(fmt.Sprintf("p_%d if {\n", i)) for j := i + 1; j < ruleCount; j++ { b.WriteString(fmt.Sprintf(" p_%d\n", j)) } diff --git a/v1/plugins/logs/plugin_benchmark_test.go b/v1/plugins/logs/plugin_benchmark_test.go index 0e0288d77b..3817db7b92 100644 --- a/v1/plugins/logs/plugin_benchmark_test.go +++ b/v1/plugins/logs/plugin_benchmark_test.go @@ -222,7 +222,7 @@ func BenchmarkMaskingErase(b *testing.B) { return store.UpsertPolicy(ctx, txn, "test.rego", []byte(` package system.log - mask["/input"] { + mask contains "/input" if { input.input.request.kind.kind == "Pod" } `)) diff --git a/v1/profiler/profiler_bench_test.go b/v1/profiler/profiler_bench_test.go index 87513491e0..286ee66446 100644 --- a/v1/profiler/profiler_bench_test.go +++ b/v1/profiler/profiler_bench_test.go @@ -61,7 +61,7 @@ func generateModule(numVars int, dataSize int) string { sb := strings.Builder{} sb.WriteString(`package test -p { +p if { x := a v := x[i] `) diff --git a/v1/test/e2e/logs/utils.go b/v1/test/e2e/logs/utils.go index 3223c0a01f..0ff3818e25 100644 --- a/v1/test/e2e/logs/utils.go +++ b/v1/test/e2e/logs/utils.go @@ -80,7 +80,7 @@ func GeneratePolicy(ruleCounts int, ruleHits int) string { pb.WriteString("package test\n") hits := 0 for i := 0; i < ruleCounts; i++ { - pb.WriteString("rule {") + pb.WriteString("rule if {") if hits < ruleHits { pb.WriteString("input.hit = true") hits++ diff --git a/v1/topdown/sets_bench_test.go b/v1/topdown/sets_bench_test.go index bb2cd78952..2205f8e932 100644 --- a/v1/topdown/sets_bench_test.go +++ b/v1/topdown/sets_bench_test.go @@ -84,10 +84,8 @@ func BenchmarkSetIntersectionSlow(b *testing.B) { store := inmem.NewFromObject(map[string]interface{}{"sets": genNxMSetBenchmarkData(n, m)}) module := `package test - import future.keywords.every - import future.keywords.in - combined[z] { + combined contains z if { data.sets[m][z] every ss in data.sets { ss[z] diff --git a/v1/topdown/strings_bench_test.go b/v1/topdown/strings_bench_test.go index 73a3187bb9..c9632d8e47 100644 --- a/v1/topdown/strings_bench_test.go +++ b/v1/topdown/strings_bench_test.go @@ -19,7 +19,7 @@ func BenchmarkBulkStartsWithNaive(b *testing.B) { "test.rego": ` package test -result { +result if { startswith(data.strings[_], data.prefixes[_]) } `, @@ -64,7 +64,7 @@ func BenchmarkBulkStartsWithOptimized(b *testing.B) { "test.rego": ` package test -result { +result if { strings.any_prefix_match(data.strings, data.prefixes) } `, diff --git a/v1/topdown/topdown_bench_test.go b/v1/topdown/topdown_bench_test.go index d53c79c87b..403f512234 100644 --- a/v1/topdown/topdown_bench_test.go +++ b/v1/topdown/topdown_bench_test.go @@ -44,7 +44,7 @@ func BenchmarkArrayPlugging(b *testing.B) { store := inmem.NewFromObject(map[string]interface{}{"fixture": data}) module := `package test fixture := data.fixture - main { x := fixture }` + main if { x := fixture }` query := ast.MustParseBody("data.test.main") compiler := ast.MustCompileModules(map[string]string{ @@ -482,19 +482,19 @@ const partialEvalBenchmarkPolicy = `package authz default allow = false - allow { + allow if { user_has_role[role_name] role_has_permission[role_name] } - user_has_role[role_name] { + user_has_role contains role_name if { data.bindings[_] = binding binding.iss = input.iss binding.group = input.group role_name = binding.role } - role_has_permission[role_name] { + role_has_permission contains role_name if { data.roles[_] = role role.name = role_name role.operation = input.operation @@ -607,7 +607,7 @@ func BenchmarkComprehensionIndexing(b *testing.B) { module: ` package test - bench_array { + bench_array if { v := data.items[_] ks := [k | some k; v == data.items[k]] } @@ -619,7 +619,7 @@ func BenchmarkComprehensionIndexing(b *testing.B) { module: ` package test - bench_set { + bench_set if { v := data.items[_] ks := {k | some k; v == data.items[k]} } @@ -631,7 +631,7 @@ func BenchmarkComprehensionIndexing(b *testing.B) { module: ` package test - bench_object { + bench_object if { v := data.items[_] ks := {k: 1 | some k; v == data.items[k]} } @@ -717,7 +717,7 @@ func moduleWithDefs(n int) string { b.WriteString(`package test `) for i := 1; i <= n; i++ { - fmt.Fprintf(&b, `f(x) = y { y := true; x == %[1]d } + fmt.Fprintf(&b, `f(x) = y if { y := true; x == %[1]d } `, i) } return b.String() @@ -751,7 +751,7 @@ func BenchmarkObjectSubset(b *testing.B) { store := inmem.NewFromObject(map[string]interface{}{"all": all, "evens": evens}) module := `package test - main {object.subset(data.all, data.evens)}` + main if {object.subset(data.all, data.evens)}` query := ast.MustParseBody("data.test.main") compiler := ast.MustCompileModules(map[string]string{ @@ -811,14 +811,14 @@ func BenchmarkObjectSubsetSlow(b *testing.B) { // https://github.com/open-policy-agent/opa/issues/4358#issue-1141145857 module := `package test - path_matches[match] { + path_matches contains match if { [path, value] := walk(data.evens) not is_object(value) match := object.get(data.all, path, null) == value } - main { path_matches == {true} }` + main if { path_matches == {true} }` query := ast.MustParseBody("data.test.main") compiler := ast.MustCompileModules(map[string]string{ @@ -904,7 +904,7 @@ func BenchmarkGlob(b *testing.B) { }) module := `package test - main { + main if { needleMatches := {h | h := data.haystack[_]; glob.match(data.needleGlob, [], h)} xMatches := {h | h := data.haystack[_]; glob.match("*x*", [], h)} yMtches := {h | h := data.haystack[_]; glob.match("*y*", [], h)} @@ -945,8 +945,7 @@ func BenchmarkGlob(b *testing.B) { func BenchmarkMemberWithKeyFromBaseDoc(b *testing.B) { store := inmem.NewFromObject(test.GenerateLargeJSONBenchmarkData()) mod := `package test - import future.keywords.in - main { "key99", "value99" in data.values } + main if { "key99", "value99" in data.values } ` ctx := context.Background() @@ -971,7 +970,7 @@ func BenchmarkMemberWithKeyFromBaseDoc(b *testing.B) { func BenchmarkObjectGetFromBaseDoc(b *testing.B) { store := inmem.NewFromObject(test.GenerateLargeJSONBenchmarkData()) mod := `package test - main { object.get(data.values, "key99", false) == "value99" } + main if { object.get(data.values, "key99", false) == "value99" } ` ctx := context.Background() diff --git a/v1/topdown/topdown_partial_bench_test.go b/v1/topdown/topdown_partial_bench_test.go index 459201b3cd..8333904af0 100644 --- a/v1/topdown/topdown_partial_bench_test.go +++ b/v1/topdown/topdown_partial_bench_test.go @@ -19,7 +19,7 @@ func BenchmarkInliningFullScan(b *testing.B) { "test.rego": ` package test - p { + p if { data.a[i] == input } `, diff --git a/v1/util/test/benchmark.go b/v1/util/test/benchmark.go index 1af3eaa2d0..f02632742f 100644 --- a/v1/util/test/benchmark.go +++ b/v1/util/test/benchmark.go @@ -18,25 +18,25 @@ func PartialObjectBenchmarkCrossModule(n int) []string { import data.test.bar import data.test.baz - output[key] := value { + output[key] := value if { value := bar[key] startswith("bench_test_", key) }` barMod := "package test.bar\n" barMod += ` - cond_bench_0 { + cond_bench_0 if { contains(lower(input.test_input_0), lower("input_01")) } - cond_bench_1 { + cond_bench_1 if { contains(lower(input.test_input_1), lower("input")) } - cond_bench_2 { + cond_bench_2 if { contains(lower(input.test_input_2), lower("input_10")) } bench_test_out_result := load_tests(test_collector) - load_tests(in) := out { - out := in + load_tests(i) := out if { + out := i } ` @@ -45,15 +45,15 @@ func PartialObjectBenchmarkCrossModule(n int) []string { for idx := 1; idx <= n; idx++ { barMod += fmt.Sprintf(` - bench_test_%[1]d := result { + bench_test_%[1]d := result if { input.bench_test_collector_mambo_number_%[3]d result := input.bench_test_collector_mambo_number_%[3]d - } else := result { + } else := result if { is_null(bench_test_out_result.mambo_number_%[3]d.error) result := bench_test_out_result.mambo_number_%[3]d.result } - test_collector["mambo_number_%[3]d"] := result { + test_collector["mambo_number_%[3]d"] := result if { cond_bench_%[2]d not %[3]d == 2 not %[3]d == 3 @@ -63,11 +63,11 @@ func PartialObjectBenchmarkCrossModule(n int) []string { `, idx, idx%3, idx%5) ruleBuilder += fmt.Sprintf(" bar.bench_test_%[1]d == %[1]d\n", idx) if idx%10 == 0 { - bazMod += fmt.Sprintf(`rule_%d { + bazMod += fmt.Sprintf(`rule_%d if { %s }`, idx, ruleBuilder) fooMod += fmt.Sprintf(` - final_decision = "allow" { + final_decision = "allow" if { baz.rule_%d } `, idx) @@ -85,7 +85,7 @@ func ArrayIterationBenchmarkModule(n int) string { fixture = [ x | x := numbers.range(1, %d)[_] ] - main { fixture[i] }`, n) + main if { fixture[i] }`, n) } // SetIterationBenchmarkModule returns a module that iterates a set @@ -95,7 +95,7 @@ func SetIterationBenchmarkModule(n int) string { fixture = { x | x := numbers.range(1, %d)[_] } - main { fixture[i] }`, n) + main if { fixture[i] }`, n) } // ObjectIterationBenchmarkModule returns a module that iterates an object @@ -105,7 +105,7 @@ func ObjectIterationBenchmarkModule(n int) string { fixture = { x: x | x := numbers.range(1, %d)[_] } - main { fixture[i] }`, n) + main if { fixture[i] }`, n) } // GenerateLargeJSONBenchmarkData returns a map of 100 keys and 100.000 key/value @@ -176,12 +176,12 @@ func GenerateConcurrencyBenchmarkData() (string, map[string]interface{}) { import data.objs - p { + p if { objs[i].attr1 = "get" objs[i].groups[j] = "eng" } - p { + p if { objs[i].user = "alice" } ` @@ -195,7 +195,7 @@ func GenerateConcurrencyBenchmarkData() (string, map[string]interface{}) { func GenerateVirtualDocsBenchmarkData(numTotalRules, numHitRules int) (string, map[string]interface{}) { hitRule := ` - allow { + allow if { input.method = "POST" input.path = ["accounts", account_id] input.user_id = account_id @@ -203,7 +203,7 @@ func GenerateVirtualDocsBenchmarkData(numTotalRules, numHitRules int) (string, m ` missRule := ` - allow { + allow if { input.method = "GET" input.path = ["salaries", account_id] input.user_id = account_id From 82b2214ef94f1047c5ea2316154f9606cd99c890 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 15:11:45 +0100 Subject: [PATCH 3/7] build(deps): bump google.golang.org/grpc from 1.68.1 to 1.69.0 (#7218) Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.68.1 to 1.69.0. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.68.1...v1.69.0) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 6 +- .../grpc/balancer/balancer.go | 93 ---- .../balancer/pickfirst/internal/internal.go | 17 +- .../grpc/balancer/pickfirst/pickfirst.go | 2 +- .../pickfirst/pickfirstleaf/pickfirstleaf.go | 472 ++++++++++++++---- .../grpc/balancer/roundrobin/roundrobin.go | 4 +- .../grpc/balancer/subconn.go | 134 +++++ .../grpc/balancer_wrapper.go | 77 +++ .../grpc_binarylog_v1/binarylog.pb.go | 180 ++----- vendor/google.golang.org/grpc/clientconn.go | 11 +- vendor/google.golang.org/grpc/codec.go | 2 +- vendor/google.golang.org/grpc/dialoptions.go | 16 - .../grpc/experimental/stats/metricregistry.go | 27 +- .../grpc/experimental/stats/metrics.go | 75 --- .../grpc/grpclog/internal/loggerv2.go | 107 +++- .../grpc/health/grpc_health_v1/health.pb.go | 48 +- .../grpc/internal/backoff/backoff.go | 2 +- .../grpc/internal/internal.go | 22 +- .../internal/resolver/dns/dns_resolver.go | 41 +- .../grpc/internal/transport/client_stream.go | 144 ++++++ .../grpc/internal/transport/flowcontrol.go | 9 +- .../grpc/internal/transport/handler_server.go | 36 +- .../grpc/internal/transport/http2_client.go | 91 ++-- .../grpc/internal/transport/http2_server.go | 69 +-- .../grpc/internal/transport/server_stream.go | 178 +++++++ .../grpc/internal/transport/transport.go | 321 ++---------- .../grpc/mem/buffer_slice.go | 59 ++- vendor/google.golang.org/grpc/preloader.go | 4 +- .../grpc/resolver/resolver.go | 22 +- vendor/google.golang.org/grpc/rpc_util.go | 42 +- vendor/google.golang.org/grpc/server.go | 87 ++-- .../google.golang.org/grpc/service_config.go | 5 +- .../google.golang.org/grpc/stats/metrics.go | 81 +++ vendor/google.golang.org/grpc/stats/stats.go | 74 +-- vendor/google.golang.org/grpc/stream.go | 62 +-- vendor/google.golang.org/grpc/version.go | 2 +- vendor/modules.txt | 2 +- 38 files changed, 1508 insertions(+), 1118 deletions(-) create mode 100644 vendor/google.golang.org/grpc/balancer/subconn.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/client_stream.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/server_stream.go create mode 100644 vendor/google.golang.org/grpc/stats/metrics.go diff --git a/go.mod b/go.mod index 7fe79c84fb..ecd7da999e 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( go.uber.org/automaxprocs v1.6.0 golang.org/x/net v0.32.0 golang.org/x/time v0.8.0 - google.golang.org/grpc v1.68.1 + google.golang.org/grpc v1.69.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/yaml.v3 v3.0.1 oras.land/oras-go/v2 v2.3.1 diff --git a/go.sum b/go.sum index 04e2b9ba35..fee6a77c9e 100644 --- a/go.sum +++ b/go.sum @@ -280,6 +280,8 @@ go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZk go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= @@ -418,8 +420,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= +google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 3a2092f105..382ad69411 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -73,17 +73,6 @@ func unregisterForTesting(name string) { delete(m, name) } -// connectedAddress returns the connected address for a SubConnState. The -// address is only valid if the state is READY. -func connectedAddress(scs SubConnState) resolver.Address { - return scs.connectedAddress -} - -// setConnectedAddress sets the connected address for a SubConnState. -func setConnectedAddress(scs *SubConnState, addr resolver.Address) { - scs.connectedAddress = addr -} - func init() { internal.BalancerUnregister = unregisterForTesting internal.ConnectedAddress = connectedAddress @@ -106,57 +95,6 @@ func Get(name string) Builder { return nil } -// A SubConn represents a single connection to a gRPC backend service. -// -// Each SubConn contains a list of addresses. -// -// All SubConns start in IDLE, and will not try to connect. To trigger the -// connecting, Balancers must call Connect. If a connection re-enters IDLE, -// Balancers must call Connect again to trigger a new connection attempt. -// -// gRPC will try to connect to the addresses in sequence, and stop trying the -// remainder once the first connection is successful. If an attempt to connect -// to all addresses encounters an error, the SubConn will enter -// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. -// -// Once established, if a connection is lost, the SubConn will transition -// directly to IDLE. -// -// This interface is to be implemented by gRPC. Users should not need their own -// implementation of this interface. For situations like testing, any -// implementations should embed this interface. This allows gRPC to add new -// methods to this interface. -type SubConn interface { - // UpdateAddresses updates the addresses used in this SubConn. - // gRPC checks if currently-connected address is still in the new list. - // If it's in the list, the connection will be kept. - // If it's not in the list, the connection will gracefully close, and - // a new connection will be created. - // - // This will trigger a state transition for the SubConn. - // - // Deprecated: this method will be removed. Create new SubConns for new - // addresses instead. - UpdateAddresses([]resolver.Address) - // Connect starts the connecting for this SubConn. - Connect() - // GetOrBuildProducer returns a reference to the existing Producer for this - // ProducerBuilder in this SubConn, or, if one does not currently exist, - // creates a new one and returns it. Returns a close function which may be - // called when the Producer is no longer needed. Otherwise the producer - // will automatically be closed upon connection loss or subchannel close. - // Should only be called on a SubConn in state Ready. Otherwise the - // producer will be unable to create streams. - GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) - // Shutdown shuts down the SubConn gracefully. Any started RPCs will be - // allowed to complete. No future calls should be made on the SubConn. - // One final state update will be delivered to the StateListener (or - // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to - // indicate the shutdown operation. This may be delivered before - // in-progress RPCs are complete and the actual connection is closed. - Shutdown() -} - // NewSubConnOptions contains options to create new SubConn. type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created @@ -424,18 +362,6 @@ type ExitIdler interface { ExitIdle() } -// SubConnState describes the state of a SubConn. -type SubConnState struct { - // ConnectivityState is the connectivity state of the SubConn. - ConnectivityState connectivity.State - // ConnectionError is set if the ConnectivityState is TransientFailure, - // describing the reason the SubConn failed. Otherwise, it is nil. - ConnectionError error - // connectedAddr contains the connected address when ConnectivityState is - // Ready. Otherwise, it is indeterminate. - connectedAddress resolver.Address -} - // ClientConnState describes the state of a ClientConn relevant to the // balancer. type ClientConnState struct { @@ -448,22 +374,3 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") - -// A ProducerBuilder is a simple constructor for a Producer. It is used by the -// SubConn to create producers when needed. -type ProducerBuilder interface { - // Build creates a Producer. The first parameter is always a - // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as `any` to avoid a dependency - // cycle. Build also returns a close function that will be called when all - // references to the Producer have been given up for a SubConn, or when a - // connectivity state change occurs on the SubConn. The close function - // should always block until all asynchronous cleanup work is completed. - Build(grpcClientConnInterface any) (p Producer, close func()) -} - -// A Producer is a type shared among potentially many consumers. It is -// associated with a SubConn, and an implementation will typically contain -// other methods to provide additional functionality, e.g. configuration or -// subscription registration. -type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go index c519789458..7d66cb491c 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go @@ -18,7 +18,18 @@ // Package internal contains code internal to the pickfirst package. package internal -import "math/rand" +import ( + rand "math/rand/v2" + "time" +) -// RandShuffle pseudo-randomizes the order of addresses. -var RandShuffle = rand.Shuffle +var ( + // RandShuffle pseudo-randomizes the order of addresses. + RandShuffle = rand.Shuffle + // TimeAfterFunc allows mocking the timer for testing connection delay + // related functionality. + TimeAfterFunc = func(d time.Duration, f func()) func() { + timer := time.AfterFunc(d, f) + return func() { timer.Stop() } + } +) diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index e069346a75..ea8899818c 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -23,7 +23,7 @@ import ( "encoding/json" "errors" "fmt" - "math/rand" + rand "math/rand/v2" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 985b6edc7f..2fc0a71f94 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -29,11 +29,15 @@ import ( "encoding/json" "errors" "fmt" + "net" + "net/netip" "sync" + "time" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/pickfirst/internal" "google.golang.org/grpc/connectivity" + expstats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" @@ -50,26 +54,68 @@ func init() { balancer.Register(pickfirstBuilder{}) } +// enableHealthListenerKeyType is a unique key type used in resolver attributes +// to indicate whether the health listener usage is enabled. +type enableHealthListenerKeyType struct{} + var ( logger = grpclog.Component("pick-first-leaf-lb") // Name is the name of the pick_first_leaf balancer. // It is changed to "pick_first" in init() if this balancer is to be // registered as the default pickfirst. - Name = "pick_first_leaf" + Name = "pick_first_leaf" + disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.disconnections", + Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.", + Unit: "disconnection", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_succeeded", + Description: "EXPERIMENTAL. Number of successful connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) + connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{ + Name: "grpc.lb.pick_first.connection_attempts_failed", + Description: "EXPERIMENTAL. Number of failed connection attempts.", + Unit: "attempt", + Labels: []string{"grpc.target"}, + Default: false, + }) ) -// TODO: change to pick-first when this becomes the default pick_first policy. -const logPrefix = "[pick-first-leaf-lb %p] " +const ( + // TODO: change to pick-first when this becomes the default pick_first policy. + logPrefix = "[pick-first-leaf-lb %p] " + // connectionDelayInterval is the time to wait for during the happy eyeballs + // pass before starting the next connection attempt. + connectionDelayInterval = 250 * time.Millisecond +) + +type ipAddrFamily int + +const ( + // ipAddrFamilyUnknown represents strings that can't be parsed as an IP + // address. + ipAddrFamilyUnknown ipAddrFamily = iota + ipAddrFamilyV4 + ipAddrFamilyV6 +) type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{ - cc: cc, - addressList: addressList{}, - subConns: resolver.NewAddressMap(), - state: connectivity.Connecting, - mu: sync.Mutex{}, + cc: cc, + target: bo.Target.String(), + metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder. + + subConns: resolver.NewAddressMap(), + state: connectivity.Connecting, + cancelConnectionTimer: func() {}, } b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -87,6 +133,13 @@ func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalan return cfg, nil } +// EnableHealthListener updates the state to configure pickfirst for using a +// generic health listener. +func EnableHealthListener(state resolver.State) resolver.State { + state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true) + return state +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -104,14 +157,19 @@ type scData struct { subConn balancer.SubConn addr resolver.Address - state connectivity.State - lastErr error + rawConnectivityState connectivity.State + // The effective connectivity state based on raw connectivity, health state + // and after following sticky TransientFailure behaviour defined in A62. + effectiveState connectivity.State + lastErr error + connectionFailedInFirstPass bool } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { sd := &scData{ - state: connectivity.Idle, - addr: addr, + rawConnectivityState: connectivity.Idle, + effectiveState: connectivity.Idle, + addr: addr, } sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{ StateListener: func(state balancer.SubConnState) { @@ -128,19 +186,25 @@ func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { type pickfirstBalancer struct { // The following fields are initialized at build time and read-only after // that and therefore do not need to be guarded by a mutex. - logger *internalgrpclog.PrefixLogger - cc balancer.ClientConn + logger *internalgrpclog.PrefixLogger + cc balancer.ClientConn + target string + metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil // The mutex is used to ensure synchronization of updates triggered // from the idle picker and the already serialized resolver, // SubConn state updates. - mu sync.Mutex + mu sync.Mutex + // State reported to the channel based on SubConn states and resolver + // updates. state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap - addressList addressList - firstPass bool - numTF int + subConns *resolver.AddressMap + addressList addressList + firstPass bool + numTF int + cancelConnectionTimer func() + healthCheckingEnabled bool } // ResolverError is called by the ClientConn when the name resolver produces @@ -166,7 +230,7 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { return } - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, }) @@ -175,15 +239,16 @@ func (b *pickfirstBalancer) resolverErrorLocked(err error) { func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { b.mu.Lock() defer b.mu.Unlock() + b.cancelConnectionTimer() if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { // Cleanup state pertaining to the previous resolver state. // Treat an empty address list like an error by calling b.ResolverError. - b.state = connectivity.TransientFailure b.closeSubConnsLocked() b.addressList.updateAddrs(nil) b.resolverErrorLocked(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil cfg, ok := state.BalancerConfig.(pfConfig) if state.BalancerConfig != nil && !ok { return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState) @@ -206,9 +271,6 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // "Flatten the list by concatenating the ordered list of addresses for // each of the endpoints, in order." - A61 for _, endpoint := range endpoints { - // "In the flattened list, interleave addresses from the two address - // families, as per RFC-8305 section 4." - A61 - // TODO: support the above language. newAddrs = append(newAddrs, endpoint.Addresses...) } } else { @@ -231,16 +293,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Not de-duplicating would result in attempting to connect to the same // SubConn multiple times in the same pass. We don't want this. newAddrs = deDupAddresses(newAddrs) + newAddrs = interleaveAddresses(newAddrs) - // Since we have a new set of addresses, we are again at first pass. - b.firstPass = true - - // If the previous ready SubConn exists in new address list, - // keep this connection and don't create new SubConns. prevAddr := b.addressList.currentAddress() + prevSCData, found := b.subConns.Get(prevAddr) prevAddrsCount := b.addressList.size() + isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready b.addressList.updateAddrs(newAddrs) - if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) { + + // If the previous ready SubConn exists in new address list, + // keep this connection and don't create new SubConns. + if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) { return nil } @@ -252,18 +315,17 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // we should still enter CONNECTING because the sticky TF behaviour // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported // due to connectivity failures. - if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 { + if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 { // Start connection attempt at first address. - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ + b.forceUpdateConcludedStateLocked(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) - b.requestConnectionLocked() + b.startFirstPassLocked() } else if b.state == connectivity.TransientFailure { // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until // we're READY. See A62. - b.requestConnectionLocked() + b.startFirstPassLocked() } return nil } @@ -278,6 +340,7 @@ func (b *pickfirstBalancer) Close() { b.mu.Lock() defer b.mu.Unlock() b.closeSubConnsLocked() + b.cancelConnectionTimer() b.state = connectivity.Shutdown } @@ -287,10 +350,19 @@ func (b *pickfirstBalancer) Close() { func (b *pickfirstBalancer) ExitIdle() { b.mu.Lock() defer b.mu.Unlock() - if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() { - b.firstPass = true - b.requestConnectionLocked() + if b.state == connectivity.Idle { + b.startFirstPassLocked() + } +} + +func (b *pickfirstBalancer) startFirstPassLocked() { + b.firstPass = true + b.numTF = 0 + // Reset the connection attempt record for existing SubConns. + for _, sd := range b.subConns.Values() { + sd.(*scData).connectionFailedInFirstPass = false } + b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { @@ -314,6 +386,70 @@ func deDupAddresses(addrs []resolver.Address) []resolver.Address { return retAddrs } +// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6) +// as per RFC-8305 section 4. +// Whichever address family is first in the list is followed by an address of +// the other address family; that is, if the first address in the list is IPv6, +// then the first IPv4 address should be moved up in the list to be second in +// the list. It doesn't support configuring "First Address Family Count", i.e. +// there will always be a single member of the first address family at the +// beginning of the interleaved list. +// Addresses that are neither IPv4 nor IPv6 are treated as part of a third +// "unknown" family for interleaving. +// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6 +func interleaveAddresses(addrs []resolver.Address) []resolver.Address { + familyAddrsMap := map[ipAddrFamily][]resolver.Address{} + interleavingOrder := []ipAddrFamily{} + for _, addr := range addrs { + family := addressFamily(addr.Addr) + if _, found := familyAddrsMap[family]; !found { + interleavingOrder = append(interleavingOrder, family) + } + familyAddrsMap[family] = append(familyAddrsMap[family], addr) + } + + interleavedAddrs := make([]resolver.Address, 0, len(addrs)) + + for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) { + // Some IP types may have fewer addresses than others, so we look for + // the next type that has a remaining member to add to the interleaved + // list. + family := interleavingOrder[curFamilyIdx] + remainingMembers := familyAddrsMap[family] + if len(remainingMembers) > 0 { + interleavedAddrs = append(interleavedAddrs, remainingMembers[0]) + familyAddrsMap[family] = remainingMembers[1:] + } + } + + return interleavedAddrs +} + +// addressFamily returns the ipAddrFamily after parsing the address string. +// If the address isn't of the format "ip-address:port", it returns +// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when +// using a resolver like passthrough where the address may be a hostname in +// some format that the dialer can resolve. +func addressFamily(address string) ipAddrFamily { + // Parse the IP after removing the port. + host, _, err := net.SplitHostPort(address) + if err != nil { + return ipAddrFamilyUnknown + } + ip, err := netip.ParseAddr(host) + if err != nil { + return ipAddrFamilyUnknown + } + switch { + case ip.Is4() || ip.Is4In6(): + return ipAddrFamilyV4 + case ip.Is6(): + return ipAddrFamilyV6 + default: + return ipAddrFamilyUnknown + } +} + // reconcileSubConnsLocked updates the active subchannels based on a new address // list from the resolver. It does this by: // - closing subchannels: any existing subchannels associated with addresses @@ -342,6 +478,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // shutdownRemainingLocked shuts down remaining subConns. Called when a subConn // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { + b.cancelConnectionTimer() for _, v := range b.subConns.Values() { sd := v.(*scData) if sd.subConn != selected.subConn { @@ -382,46 +519,89 @@ func (b *pickfirstBalancer) requestConnectionLocked() { } scd := sd.(*scData) - switch scd.state { + switch scd.rawConnectivityState { case connectivity.Idle: scd.subConn.Connect() + b.scheduleNextConnectionLocked() + return case connectivity.TransientFailure: - // Try the next address. + // The SubConn is being re-used and failed during a previous pass + // over the addressList. It has not completed backoff yet. + // Mark it as having failed and try the next address. + scd.connectionFailedInFirstPass = true lastErr = scd.lastErr continue - case connectivity.Ready: - // Should never happen. - b.logger.Errorf("Requesting a connection even though we have a READY SubConn") - case connectivity.Shutdown: - // Should never happen. - b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map") case connectivity.Connecting: - // Wait for the SubConn to report success or failure. + // Wait for the connection attempt to complete or the timer to fire + // before attempting the next address. + b.scheduleNextConnectionLocked() + return + default: + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + return + } - return } + // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the - // first pass. - b.endFirstPassLocked(lastErr) + // first pass if possible. + b.endFirstPassIfPossibleLocked(lastErr) +} + +func (b *pickfirstBalancer) scheduleNextConnectionLocked() { + b.cancelConnectionTimer() + if !b.addressList.hasNext() { + return + } + curAddr := b.addressList.currentAddress() + cancelled := false // Access to this is protected by the balancer's mutex. + closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() { + b.mu.Lock() + defer b.mu.Unlock() + // If the scheduled task is cancelled while acquiring the mutex, return. + if cancelled { + return + } + if b.logger.V(2) { + b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr) + } + if b.addressList.increment() { + b.requestConnectionLocked() + } + }) + // Access to the cancellation callback held by the balancer is guarded by + // the balancer's mutex, so it's safe to set the boolean from the callback. + b.cancelConnectionTimer = sync.OnceFunc(func() { + cancelled = true + closeFn() + }) } func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) { b.mu.Lock() defer b.mu.Unlock() - oldState := sd.state - sd.state = newState.ConnectivityState + oldState := sd.rawConnectivityState + sd.rawConnectivityState = newState.ConnectivityState // Previously relevant SubConns can still callback with state updates. // To prevent pickers from returning these obsolete SubConns, this logic // is included to check if the current list of active SubConns includes this // SubConn. - if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd { + if !b.isActiveSCData(sd) { return } if newState.ConnectivityState == connectivity.Shutdown { + sd.effectiveState = connectivity.Shutdown return } + // Record a connection attempt when exiting CONNECTING. + if newState.ConnectivityState == connectivity.TransientFailure { + sd.connectionFailedInFirstPass = true + connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target) + } + if newState.ConnectivityState == connectivity.Ready { + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) b.shutdownRemainingLocked(sd) if !b.addressList.seekTo(sd.addr) { // This should not fail as we should have only one SubConn after @@ -429,10 +609,30 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses) return } - b.state = connectivity.Ready - b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Ready, - Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + if !b.healthCheckingEnabled { + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn) + } + + sd.effectiveState = connectivity.Ready + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + return + } + if b.logger.V(2) { + b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn) + } + // Send a CONNECTING update to take the SubConn out of sticky-TF if + // required. + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) { + b.updateSubConnHealthState(sd, scs) }) return } @@ -443,13 +643,24 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub // a transport is successfully created, but the connection fails // before the SubConn can send the notification for READY. We treat // this as a successful connection and transition to IDLE. - if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { + // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second + // part of the if condition below once the issue is fixed. + if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) { // Once a transport fails, the balancer enters IDLE and starts from // the first address when the picker is used. b.shutdownRemainingLocked(sd) - b.state = connectivity.Idle + sd.effectiveState = newState.ConnectivityState + // READY SubConn interspliced in between CONNECTING and IDLE, need to + // account for that. + if oldState == connectivity.Connecting { + // A known issue (https://github.com/grpc/grpc-go/issues/7862) + // causes a race that prevents the READY state change notification. + // This works around it. + connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target) + } + disconnectionsMetric.Record(b.metricsRecorder, 1, b.target) b.addressList.reset() - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.Idle, Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)}, }) @@ -459,32 +670,35 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub if b.firstPass { switch newState.ConnectivityState { case connectivity.Connecting: - // The balancer can be in either IDLE, CONNECTING or - // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in + // The effective state can be in either IDLE, CONNECTING or + // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in // TRANSIENT_FAILURE until it's READY. See A62. - // If the balancer is already in CONNECTING, no update is needed. - if b.state == connectivity.Idle { - b.state = connectivity.Connecting - b.cc.UpdateState(balancer.State{ + if sd.effectiveState != connectivity.TransientFailure { + sd.effectiveState = connectivity.Connecting + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.Connecting, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) } case connectivity.TransientFailure: sd.lastErr = newState.ConnectionError + sd.effectiveState = connectivity.TransientFailure // Since we're re-using common SubConns while handling resolver // updates, we could receive an out of turn TRANSIENT_FAILURE from - // a pass over the previous address list. We ignore such updates. - - if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { - return - } - if b.addressList.increment() { - b.requestConnectionLocked() - return + // a pass over the previous address list. Happy Eyeballs will also + // cause out of order updates to arrive. + + if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) { + b.cancelConnectionTimer() + if b.addressList.increment() { + b.requestConnectionLocked() + return + } } - // End of the first pass. - b.endFirstPassLocked(newState.ConnectionError) + + // End the first pass if we've seen a TRANSIENT_FAILURE from all + // SubConns once. + b.endFirstPassIfPossibleLocked(newState.ConnectionError) } return } @@ -495,7 +709,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub b.numTF = (b.numTF + 1) % b.subConns.Len() sd.lastErr = newState.ConnectionError if b.numTF%b.subConns.Len() == 0 { - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: newState.ConnectionError}, }) @@ -509,24 +723,95 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub } } -func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) { +// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the +// addresses are tried and their SubConns have reported a failure. +func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { + // An optimization to avoid iterating over the entire SubConn map. + if b.addressList.isValid() { + return + } + // Connect() has been called on all the SubConns. The first pass can be + // ended if all the SubConns have reported a failure. + for _, v := range b.subConns.Values() { + sd := v.(*scData) + if !sd.connectionFailedInFirstPass { + return + } + } b.firstPass = false - b.numTF = 0 - b.state = connectivity.TransientFailure - - b.cc.UpdateState(balancer.State{ + b.updateBalancerState(balancer.State{ ConnectivityState: connectivity.TransientFailure, Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. for _, v := range b.subConns.Values() { sd := v.(*scData) - if sd.state == connectivity.Idle { + if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } } } +func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool { + activeSD, found := b.subConns.Get(sd.addr) + return found && activeSD == sd +} + +func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) { + b.mu.Lock() + defer b.mu.Unlock() + // Previously relevant SubConns can still callback with state updates. + // To prevent pickers from returning these obsolete SubConns, this logic + // is included to check if the current list of active SubConns includes + // this SubConn. + if !b.isActiveSCData(sd) { + return + } + sd.effectiveState = state.ConnectivityState + switch state.ConnectivityState { + case connectivity.Ready: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Ready, + Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}}, + }) + case connectivity.TransientFailure: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)}, + }) + case connectivity.Connecting: + b.updateBalancerState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + default: + b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state) + } +} + +// updateBalancerState stores the state reported to the channel and calls +// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate +// updates to the channel. +func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) { + // In case of TransientFailures allow the picker to be updated to update + // the connectivity error, in all other cases don't send duplicate state + // updates. + if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure { + return + } + b.forceUpdateConcludedStateLocked(newState) +} + +// forceUpdateConcludedStateLocked stores the state reported to the channel and +// calls ClientConn.UpdateState(). +// A separate function is defined to force update the ClientConn state since the +// channel doesn't correctly assume that LB policies start in CONNECTING and +// relies on LB policy to send an initial CONNECTING update. +func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) { + b.state = newState.ConnectivityState + b.cc.UpdateState(newState) +} + type picker struct { result balancer.PickResult err error @@ -583,15 +868,6 @@ func (al *addressList) currentAddress() resolver.Address { return al.addresses[al.idx] } -// first returns the first address in the list. If the list is empty, it returns -// an empty address instead. -func (al *addressList) first() resolver.Address { - if len(al.addresses) == 0 { - return resolver.Address{} - } - return al.addresses[0] -} - func (al *addressList) reset() { al.idx = 0 } @@ -614,6 +890,16 @@ func (al *addressList) seekTo(needle resolver.Address) bool { return false } +// hasNext returns whether incrementing the addressList will result in moving +// past the end of the list. If the list has already moved past the end, it +// returns false. +func (al *addressList) hasNext() bool { + if !al.isValid() { + return false + } + return al.idx+1 < len(al.addresses) +} + // equalAddressIgnoringBalAttributes returns true is a and b are considered // equal. This is different from the Equal method on the resolver.Address type // which considers all fields to determine equality. Here, we only consider diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 260255d31b..80a42d2251 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,7 +22,7 @@ package roundrobin import ( - "math/rand" + rand "math/rand/v2" "sync/atomic" "google.golang.org/grpc/balancer" @@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { // Start at a random index, as the same RR balancer rebuilds a new // picker when SubConn states change, and we don't want to apply excess // load to the first server in the list. - next: uint32(rand.Intn(len(scs))), + next: uint32(rand.IntN(len(scs))), } } diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go new file mode 100644 index 0000000000..ea27c4fa76 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/subconn.go @@ -0,0 +1,134 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import ( + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/resolver" +) + +// A SubConn represents a single connection to a gRPC backend service. +// +// All SubConns start in IDLE, and will not try to connect. To trigger a +// connection attempt, Balancers must call Connect. +// +// If the connection attempt fails, the SubConn will transition to +// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the +// connection attempt succeeds, it will transition to READY. +// +// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE. +// +// If a connection re-enters IDLE, Balancers must call Connect again to trigger +// a new connection attempt. +// +// Each SubConn contains a list of addresses. gRPC will try to connect to the +// addresses in sequence, and stop trying the remainder once the first +// connection is successful. However, this behavior is deprecated. SubConns +// should only use a single address. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing poilices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a SubConn +// returned by ClientConn.NewSubConn() by embedding it in their implementations. +// An embedded SubConn must never be nil, or runtime panics will occur. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully close, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which may be + // called when the Producer is no longer needed. Otherwise the producer + // will automatically be closed upon connection loss or subchannel close. + // Should only be called on a SubConn in state Ready. Otherwise the + // producer will be unable to create streams. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() + // RegisterHealthListener registers a health listener that receives health + // updates for a Ready SubConn. Only one health listener can be registered + // at a time. A health listener should be registered each time the SubConn's + // connectivity state changes to READY. Registering a health listener when + // the connectivity state is not READY may result in undefined behaviour. + // This method must not be called synchronously while handling an update + // from a previously registered health listener. + RegisterHealthListener(func(SubConnState)) + // EnforceSubConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceSubConnEmbedding +} + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Build also returns a close function that will be called when all + // references to the Producer have been given up for a SubConn, or when a + // connectivity state change occurs on the SubConn. The close function + // should always block until all asynchronous cleanup work is completed. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 2a4f2878ae..905817b5fc 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -189,6 +189,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer), stateListener: opts.StateListener, + healthData: newHealthData(connectivity.Idle), } ac.acbw = acbw return acbw, nil @@ -254,12 +255,32 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + internal.EnforceSubConnEmbedding ac *addrConn // read-only ccb *ccBalancerWrapper // read-only stateListener func(balancer.SubConnState) producersMu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer + + // Access to healthData is protected by healthMu. + healthMu sync.Mutex + // healthData is stored as a pointer to detect when the health listener is + // dropped or updated. This is required as closures can't be compared for + // equality. + healthData *healthData +} + +// healthData holds data related to health state reporting. +type healthData struct { + // connectivityState stores the most recent connectivity state delivered + // to the LB policy. This is stored to avoid sending updates when the + // SubConn has already exited connectivity state READY. + connectivityState connectivity.State +} + +func newHealthData(s connectivity.State) *healthData { + return &healthData{connectivityState: s} } // updateState is invoked by grpc to push a subConn state update to the @@ -279,6 +300,24 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolve if s == connectivity.Ready { setConnectedAddress(&scs, curAddr) } + // Invalidate the health listener by updating the healthData. + acbw.healthMu.Lock() + // A race may occur if a health listener is registered soon after the + // connectivity state is set but before the stateListener is called. + // Two cases may arise: + // 1. The new state is not READY: RegisterHealthListener has checks to + // ensure no updates are sent when the connectivity state is not + // READY. + // 2. The new state is READY: This means that the old state wasn't Ready. + // The RegisterHealthListener API mentions that a health listener + // must not be registered when a SubConn is not ready to avoid such + // races. When this happens, the LB policy would get health updates + // on the old listener. When the LB policy registers a new listener + // on receiving the connectivity update, the health updates will be + // sent to the new health listener. + acbw.healthData = newHealthData(scs.ConnectivityState) + acbw.healthMu.Unlock() + acbw.stateListener(scs) }) } @@ -373,3 +412,41 @@ func (acbw *acBalancerWrapper) closeProducers() { delete(acbw.producers, pb) } } + +// RegisterHealthListener accepts a health listener from the LB policy. It sends +// updates to the health listener as long as the SubConn's connectivity state +// doesn't change and a new health listener is not registered. To invalidate +// the currently registered health listener, acbw updates the healthData. If a +// nil listener is registered, the active health listener is dropped. +func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + // listeners should not be registered when the connectivity state + // isn't Ready. This may happen when the balancer registers a listener + // after the connectivityState is updated, but before it is notified + // of the update. + if acbw.healthData.connectivityState != connectivity.Ready { + return + } + // Replace the health data to stop sending updates to any previously + // registered health listeners. + hd := newHealthData(connectivity.Ready) + acbw.healthData = hd + if listener == nil { + return + } + + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Don't send updates if a new listener is registered. + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + curHD := acbw.healthData + if curHD != hd { + return + } + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + }) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 55bffaa77e..9e9d080699 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -274,11 +274,9 @@ type GrpcLogEntry struct { func (x *GrpcLogEntry) Reset() { *x = GrpcLogEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GrpcLogEntry) String() string { @@ -289,7 +287,7 @@ func (*GrpcLogEntry) ProtoMessage() {} func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -440,11 +438,9 @@ type ClientHeader struct { func (x *ClientHeader) Reset() { *x = ClientHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClientHeader) String() string { @@ -455,7 +451,7 @@ func (*ClientHeader) ProtoMessage() {} func (x *ClientHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -509,11 +505,9 @@ type ServerHeader struct { func (x *ServerHeader) Reset() { *x = ServerHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServerHeader) String() string { @@ -524,7 +518,7 @@ func (*ServerHeader) ProtoMessage() {} func (x *ServerHeader) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -565,11 +559,9 @@ type Trailer struct { func (x *Trailer) Reset() { *x = Trailer{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Trailer) String() string { @@ -580,7 +572,7 @@ func (*Trailer) ProtoMessage() {} func (x *Trailer) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -638,11 +630,9 @@ type Message struct { func (x *Message) Reset() { *x = Message{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Message) String() string { @@ -653,7 +643,7 @@ func (*Message) ProtoMessage() {} func (x *Message) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -713,11 +703,9 @@ type Metadata struct { func (x *Metadata) Reset() { *x = Metadata{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Metadata) String() string { @@ -728,7 +716,7 @@ func (*Metadata) ProtoMessage() {} func (x *Metadata) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -762,11 +750,9 @@ type MetadataEntry struct { func (x *MetadataEntry) Reset() { *x = MetadataEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataEntry) String() string { @@ -777,7 +763,7 @@ func (*MetadataEntry) ProtoMessage() {} func (x *MetadataEntry) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -820,11 +806,9 @@ type Address struct { func (x *Address) Reset() { *x = Address{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Address) String() string { @@ -835,7 +819,7 @@ func (*Address) ProtoMessage() {} func (x *Address) ProtoReflect() protoreflect.Message { mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1057,104 +1041,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { if File_grpc_binlog_v1_binarylog_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GrpcLogEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*ClientHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*ServerHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*Trailer); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*Message); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*Metadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*MetadataEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*Address); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 19763f8edd..4f57b55434 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -775,10 +775,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) } } - var balCfg serviceconfig.LoadBalancingConfig - if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig - } + balCfg := cc.sc.lbConfig bw := cc.balancerWrapper cc.mu.Unlock() @@ -1374,7 +1371,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, defer cancel() copts.ChannelzParent = ac.channelz - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { if logger.V(2) { logger.Infof("Creating new client transport to %q: %v", addr, err) @@ -1448,7 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if !ac.scopts.HealthCheckEnabled { return } - healthCheckFunc := ac.cc.dopts.healthCheckFunc + healthCheckFunc := internal.HealthCheckFunc if healthCheckFunc == nil { // The health package is not imported to set health check function. // @@ -1480,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { } // Start the health checking stream. go func() { - err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index e840858b77..959c2f99d4 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -71,7 +71,7 @@ func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { if err != nil { return nil, err } - return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil + return mem.BufferSlice{mem.SliceBuffer(data)}, nil } func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 518692c3af..7494ae591f 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -87,7 +87,6 @@ type dialOptions struct { disableServiceConfig bool disableRetry bool disableHealthCheck bool - healthCheckFunc internal.HealthChecker minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string @@ -445,10 +444,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp }) } -func init() { - internal.WithHealthCheckFunc = withHealthCheckFunc -} - // WithDialer returns a DialOption that specifies a function to use for dialing // network addresses. If FailOnNonTempDialError() is set to true, and an error // is returned by f, gRPC checks the error's Temporary() method to decide if it @@ -662,16 +657,6 @@ func WithDisableHealthCheck() DialOption { }) } -// withHealthCheckFunc replaces the default health check function with the -// provided one. It makes tests easier to change the health check function. -// -// For testing purpose only. -func withHealthCheckFunc(f internal.HealthChecker) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.healthCheckFunc = f - }) -} - func defaultDialOptions() dialOptions { return dialOptions{ copts: transport.ConnectOptions{ @@ -682,7 +667,6 @@ func defaultDialOptions() dialOptions { BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, - healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go index 1d827dd5d9..ad75313a18 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -23,6 +23,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + "google.golang.org/grpc/stats" ) func init() { @@ -34,7 +35,7 @@ var logger = grpclog.Component("metrics-registry") // DefaultMetrics are the default metrics registered through global metrics // registry. This is written to at initialization time only, and is read only // after initialization. -var DefaultMetrics = NewMetrics() +var DefaultMetrics = stats.NewMetricSet() // MetricDescriptor is the data for a registered metric. type MetricDescriptor struct { @@ -42,7 +43,7 @@ type MetricDescriptor struct { // (including any per call metrics). See // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions // for metric naming conventions. - Name Metric + Name string // The description of this metric. Description string // The unit (e.g. entries, seconds) of this metric. @@ -154,27 +155,27 @@ func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels . } // registeredMetrics are the registered metric descriptor names. -var registeredMetrics = make(map[Metric]bool) +var registeredMetrics = make(map[string]bool) // metricsRegistry contains all of the registered metrics. // // This is written to only at init time, and read only after that. -var metricsRegistry = make(map[Metric]*MetricDescriptor) +var metricsRegistry = make(map[string]*MetricDescriptor) // DescriptorForMetric returns the MetricDescriptor from the global registry. // // Returns nil if MetricDescriptor not present. -func DescriptorForMetric(metric Metric) *MetricDescriptor { - return metricsRegistry[metric] +func DescriptorForMetric(metricName string) *MetricDescriptor { + return metricsRegistry[metricName] } -func registerMetric(name Metric, def bool) { - if registeredMetrics[name] { - logger.Fatalf("metric %v already registered", name) +func registerMetric(metricName string, def bool) { + if registeredMetrics[metricName] { + logger.Fatalf("metric %v already registered", metricName) } - registeredMetrics[name] = true + registeredMetrics[metricName] = true if def { - DefaultMetrics = DefaultMetrics.Add(name) + DefaultMetrics = DefaultMetrics.Add(metricName) } } @@ -256,8 +257,8 @@ func snapshotMetricsRegistryForTesting() func() { oldRegisteredMetrics := registeredMetrics oldMetricsRegistry := metricsRegistry - registeredMetrics = make(map[Metric]bool) - metricsRegistry = make(map[Metric]*MetricDescriptor) + registeredMetrics = make(map[string]bool) + metricsRegistry = make(map[string]*MetricDescriptor) maps.Copy(registeredMetrics, registeredMetrics) maps.Copy(metricsRegistry, metricsRegistry) diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go index 3221f7a633..bf9e7f987b 100644 --- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -19,8 +19,6 @@ // Package stats contains experimental metrics/stats API's. package stats -import "maps" - // MetricsRecorder records on metrics derived from metric registry. type MetricsRecorder interface { // RecordInt64Count records the measurement alongside labels on the int @@ -39,76 +37,3 @@ type MetricsRecorder interface { // gauge associated with the provided handle. RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) } - -// Metric is an identifier for a metric. -type Metric string - -// Metrics is a set of metrics to record. Once created, Metrics is immutable, -// however Add and Remove can make copies with specific metrics added or -// removed, respectively. -// -// Do not construct directly; use NewMetrics instead. -type Metrics struct { - // metrics are the set of metrics to initialize. - metrics map[Metric]bool -} - -// NewMetrics returns a Metrics containing Metrics. -func NewMetrics(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Metrics returns the metrics set. The returned map is read-only and must not -// be modified. -func (m *Metrics) Metrics() map[Metric]bool { - return m.metrics -} - -// Add adds the metrics to the metrics set and returns a new copy with the -// additional metrics. -func (m *Metrics) Add(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - newMetrics[metric] = true - } - return &Metrics{ - metrics: newMetrics, - } -} - -// Join joins the metrics passed in with the metrics set, and returns a new copy -// with the merged metrics. -func (m *Metrics) Join(metrics *Metrics) *Metrics { - newMetrics := make(map[Metric]bool) - maps.Copy(newMetrics, m.metrics) - maps.Copy(newMetrics, metrics.metrics) - return &Metrics{ - metrics: newMetrics, - } -} - -// Remove removes the metrics from the metrics set and returns a new copy with -// the metrics removed. -func (m *Metrics) Remove(metrics ...Metric) *Metrics { - newMetrics := make(map[Metric]bool) - for metric := range m.metrics { - newMetrics[metric] = true - } - - for _, metric := range metrics { - delete(newMetrics, metric) - } - return &Metrics{ - metrics: newMetrics, - } -} diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index 07df71e98a..ed90060c3c 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -101,6 +101,22 @@ var severityName = []string{ fatalLog: "FATAL", } +// sprintf is fmt.Sprintf. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintf = fmt.Sprintf + +// sprint is fmt.Sprint. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprint = fmt.Sprint + +// sprintln is fmt.Sprintln. +// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily. +var sprintln = fmt.Sprintln + +// exit is os.Exit. +// This var exists to make it possible to test functions calling os.Exit. +var exit = os.Exit + // loggerT is the default logger used by grpclog. type loggerT struct { m []*log.Logger @@ -111,7 +127,7 @@ type loggerT struct { func (g *loggerT) output(severity int, s string) { sevStr := severityName[severity] if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + g.m[severity].Output(2, sevStr+": "+s) return } // TODO: we can also include the logging component, but that needs more @@ -123,55 +139,79 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } +func (g *loggerT) printf(severity int, format string, args ...any) { + // Note the discard check is duplicated in each print func, rather than in + // output, to avoid the expensive Sprint calls. + // De-duplicating this by moving to output would be a significant performance regression! + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintf(format, args...)) +} + +func (g *loggerT) print(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprint(v...)) +} + +func (g *loggerT) println(severity int, v ...any) { + if lg := g.m[severity]; lg.Writer() == io.Discard { + return + } + g.output(severity, sprintln(v...)) +} + func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) + g.print(infoLog, args...) } func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) + g.println(infoLog, args...) } func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) + g.printf(infoLog, format, args...) } func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) + g.print(warningLog, args...) } func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) + g.println(warningLog, args...) } func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) + g.printf(warningLog, format, args...) } func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) + g.print(errorLog, args...) } func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) + g.println(errorLog, args...) } func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) + g.printf(errorLog, format, args...) } func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) + g.print(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) + g.println(fatalLog, args...) + exit(1) } func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) + g.printf(fatalLog, format, args...) + exit(1) } func (g *loggerT) V(l int) bool { @@ -186,19 +226,42 @@ type LoggerV2Config struct { FormatJSON bool } +// combineLoggers returns a combined logger for both higher & lower severity logs, +// or only one if the other is io.Discard. +// +// This uses io.Discard instead of io.MultiWriter when all loggers +// are set to io.Discard. Both this package and the standard log package have +// significant optimizations for io.Discard, which io.MultiWriter lacks (as of +// this writing). +func combineLoggers(lower, higher io.Writer) io.Writer { + if lower == io.Discard { + return higher + } + if higher == io.Discard { + return lower + } + return io.MultiWriter(lower, higher) +} + // NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. // The infoW, warningW, and errorW writers are used to write log messages of // different severity levels. func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { - var m []*log.Logger flag := log.LstdFlags if c.FormatJSON { flag = 0 } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) + + warningW = combineLoggers(infoW, warningW) + errorW = combineLoggers(errorW, warningW) + + fatalW := errorW + + m := []*log.Logger{ + log.New(infoW, "", flag), + log.New(warningW, "", flag), + log.New(errorW, "", flag), + log.New(fatalW, "", flag), + } return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index d92335445f..26e16d9192 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.35.1 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -99,11 +99,9 @@ type HealthCheckRequest struct { func (x *HealthCheckRequest) Reset() { *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckRequest) String() string { @@ -114,7 +112,7 @@ func (*HealthCheckRequest) ProtoMessage() {} func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -146,11 +144,9 @@ type HealthCheckResponse struct { func (x *HealthCheckResponse) Reset() { *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HealthCheckResponse) String() string { @@ -161,7 +157,7 @@ func (*HealthCheckResponse) ProtoMessage() {} func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -260,32 +256,6 @@ func file_grpc_health_v1_health_proto_init() { if File_grpc_health_v1_health_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index b15cf482d2..b6ae7f2585 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,7 +25,7 @@ package backoff import ( "context" "errors" - "math/rand" + rand "math/rand/v2" "time" grpcbackoff "google.golang.org/grpc/backoff" diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 20b4dc3d35..3afc181344 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -29,8 +29,6 @@ import ( ) var ( - // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -149,6 +147,20 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder + // using the provided xDS client instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error) + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment // variable. @@ -255,3 +267,9 @@ const ( // It currently has an experimental suffix which would be removed once // end-to-end testing of the policy is completed. const RLSLoadBalancingPolicyName = "rls_experimental" + +// EnforceSubConnEmbedding is used to enforce proper SubConn implementation +// embedding. +type EnforceSubConnEmbedding interface { + enforceSubConnEmbedding() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 374c12fb77..ba5c5a95d0 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -24,8 +24,9 @@ import ( "context" "encoding/json" "fmt" - "math/rand" + rand "math/rand/v2" "net" + "net/netip" "os" "strconv" "strings" @@ -122,7 +123,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts } // IP address. - if ipAddr, ok := formatIP(host); ok { + if ipAddr, err := formatIP(host); err == nil { addr := []resolver.Address{{Addr: ipAddr + ":" + port}} cc.UpdateState(resolver.State{Addresses: addr}) return deadResolver{}, nil @@ -260,9 +261,9 @@ func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) return nil, err } for _, a := range lbAddrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) @@ -322,9 +323,9 @@ func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error } newAddrs := make([]resolver.Address, 0, len(addrs)) for _, a := range addrs { - ip, ok := formatIP(a) - if !ok { - return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + ip, err := formatIP(a) + if err != nil { + return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err) } addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) @@ -351,19 +352,19 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of -// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// formatIP returns an error if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and error = nil. // If addr is an IPv6 address, return the addr enclosed in square brackets and -// ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false +// error = nil. +func formatIP(addr string) (string, error) { + ip, err := netip.ParseAddr(addr) + if err != nil { + return "", err } - if ip.To4() != nil { - return addr, true + if ip.Is4() { + return addr, nil } - return "[" + addr + "]", true + return "[" + addr + "]", nil } // parseTarget takes the user input target string and default port, returns @@ -379,7 +380,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { return "", "", internal.ErrMissingAddr } - if ip := net.ParseIP(target); ip != nil { + if _, err := netip.ParseAddr(target); err == nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } @@ -427,7 +428,7 @@ func chosenByPercentage(a *int) bool { if a == nil { return true } - return rand.Intn(100)+1 <= *a + return rand.IntN(100)+1 <= *a } func canaryingSC(js string) string { diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go new file mode 100644 index 0000000000..8ed347c541 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync/atomic" + + "golang.org/x/net/http2" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ClientStream implements streaming functionality for a gRPC client. +type ClientStream struct { + *Stream // Embed for common stream functionality. + + ct *http2Client + done chan struct{} // closed at the end of stream to unblock writers. + doneFunc func() // invoked at the end of stream. + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). + headerValid bool + header metadata.MD // the received header metadata + noHeaders bool // set if the client never received headers (set only after the stream is done). + + bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream + unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream + + status *status.Status // the status error received from the server +} + +// Read reads an n byte message from the input stream. +func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.ct.incrMsgRecv() + } + return b, err +} + +// Close closes the stream and popagates err to any readers. +func (s *ClientStream) Close(err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.ct.write(s, hdr, data, opts) +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *ClientStream) BytesReceived() bool { + return s.bytesReceived.Load() +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *ClientStream) Unprocessed() bool { + return s.unprocessed.Load() +} + +func (s *ClientStream) waitOnHeader() { + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.Close(ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ClientStream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *ClientStream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. Acquires the key-value +// pairs of header metadata once it is available. It blocks until i) the +// metadata is ready or ii) there is no header metadata or iii) the stream is +// canceled/expired. +func (s *ClientStream) Header() (metadata.MD, error) { + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. +func (s *ClientStream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *ClientStream) Status() *status.Status { + return s.status +} diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index 97198c5158..dfc0f224ec 100644 --- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -92,14 +92,11 @@ func (f *trInFlow) newLimit(n uint32) uint32 { func (f *trInFlow) onData(n uint32) uint32 { f.unacked += n - if f.unacked >= f.limit/4 { - w := f.unacked - f.unacked = 0 + if f.unacked < f.limit/4 { f.updateEffectiveWindowSize() - return w + return 0 } - f.updateEffectiveWindowSize() - return 0 + return f.reset() } func (f *trInFlow) reset() uint32 { diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index ce878693bd..d9305a65d8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -225,7 +225,7 @@ func (ht *serverHandlerTransport) do(fn func()) error { } } -func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { +func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error { ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() @@ -289,14 +289,14 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro // writePendingHeaders sets common and custom headers on the first // write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { +func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) { ht.writeCommonHeaders(s) ht.writeCustomHeaders(s) } // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) { h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -317,7 +317,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { // writeCustomHeaders sets custom headers set on the stream via SetHeader // on the first write call (Write, WriteHeader, or WriteStatus) -func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { +func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) { h := ht.rw.Header() s.hdrMu.Lock() @@ -333,7 +333,7 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { // Always take a reference because otherwise there is no guarantee the data will // be available after this function returns. This is what callers to Write // expect. @@ -357,7 +357,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSl return nil } -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { +func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error { if err := s.SetHeader(md); err != nil { return err } @@ -385,7 +385,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) { // With this transport type there will be exactly 1 stream: this HTTP request. var cancel context.CancelFunc if ht.timeoutSet { @@ -408,16 +408,18 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - ctx: ctx, - requestRead: func(int) {}, + s := &ServerStream{ + Stream: &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + buf: newRecvBuffer(), + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + }, cancel: cancel, - buf: newRecvBuffer(), st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ @@ -471,9 +473,7 @@ func (ht *serverHandlerTransport) runStream() { } } -func (ht *serverHandlerTransport) IncrMsgSent() {} - -func (ht *serverHandlerTransport) IncrMsgRecv() {} +func (ht *serverHandlerTransport) incrMsgRecv() {} func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 62b81885d8..f323ab7f45 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -123,7 +123,7 @@ type http2Client struct { mu sync.Mutex // guard the following variables nextID uint32 state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ClientStream // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. prevGoAwayID uint32 // goAwayReason records the http2.ErrCode and debug data received with the @@ -199,10 +199,10 @@ func isTemporary(err error) bool { return true } -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { +func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -339,7 +339,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ClientStream), isSecure: isSecure, perRPCCreds: perRPCCreds, kp: kp, @@ -480,17 +480,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return t, nil } -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - ct: t, - done: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - headerChan: make(chan struct{}), - contentSubtype: callHdr.ContentSubtype, - doneFunc: callHdr.DoneFunc, + s := &ClientStream{ + Stream: &Stream{ + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + contentSubtype: callHdr.ContentSubtype, + }, + ct: t, + done: make(chan struct{}), + headerChan: make(chan struct{}), + doneFunc: callHdr.DoneFunc, } s.wq = newWriteQuota(defaultWriteQuota, s.done) s.requestRead = func(n int) { @@ -506,7 +508,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { ctxDone: s.ctx.Done(), recv: s.buf, closeStream: func(err error) { - t.CloseStream(s, err) + s.Close(err) }, }, windowHandler: func(n int) { @@ -597,12 +599,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) for k, v := range callAuthData { headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) } - if b := stats.OutgoingTags(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) - } - if b := stats.OutgoingTrace(ctx); b != nil { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) - } if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string @@ -738,7 +734,7 @@ func (e NewStreamError) Error() string { // NewStream creates a stream and registers it into the transport as "active" // streams. All non-nil errors returned will be *NewStreamError. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) { ctx = peer.NewContext(ctx, t.getPeer()) // ServerName field of the resolver returned address takes precedence over @@ -763,7 +759,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return } // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) s.write(recvMsg{err: err}) close(s.done) // If headerChan isn't closed, then close it. @@ -908,21 +904,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return s, nil } -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var ( - rst bool - rstCode http2.ErrCode - ) - if err != nil { - rst = true - rstCode = http2.ErrCodeCancel - } - t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) -} - -func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { +func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { // Set stream status to done. if s.swapState(streamDone) == streamDone { // If it was already done, return. If multiple closeStream calls @@ -1085,7 +1067,7 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { +func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { reader := data.Reader() if opts.Last { @@ -1114,10 +1096,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *O _ = reader.Close() return err } + t.incrMsgSent() return nil } -func (t *http2Client) getStream(f http2.Frame) *Stream { +func (t *http2Client) getStream(f http2.Frame) *ClientStream { t.mu.Lock() s := t.activeStreams[f.Header().StreamID] t.mu.Unlock() @@ -1127,7 +1110,7 @@ func (t *http2Client) getStream(f http2.Frame) *Stream { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Client) adjustWindow(s *Stream, n uint32) { +func (t *http2Client) adjustWindow(s *ClientStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1136,7 +1119,7 @@ func (t *http2Client) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream. // Window updates will be sent out when the cumulative quota // exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { +func (t *http2Client) updateWindow(s *ClientStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -1242,7 +1225,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } if f.ErrCode == http2.ErrCodeRefusedStream { // The stream was unprocessed by the server. - atomic.StoreUint32(&s.unprocessed, 1) + s.unprocessed.Store(true) } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { @@ -1383,11 +1366,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { return connectionErrorf(true, nil, "received goaway and there are no active streams") } - streamsToClose := make([]*Stream, 0) + streamsToClose := make([]*ClientStream, 0) for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - atomic.StoreUint32(&stream.unprocessed, 1) + stream.unprocessed.Store(true) streamsToClose = append(streamsToClose, stream) } } @@ -1439,7 +1422,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } endStream := frame.StreamEnded() - atomic.StoreUint32(&s.bytesReceived, 1) + s.bytesReceived.Store(true) initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 if !initialHeader && !endStream { @@ -1809,14 +1792,18 @@ func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Client) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) + } } -func (t *http2Client) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +func (t *http2Client) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) + } } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 584b50fe55..0055fddd7e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -25,7 +25,7 @@ import ( "fmt" "io" "math" - "math/rand" + rand "math/rand/v2" "net" "net/http" "strconv" @@ -111,7 +111,7 @@ type http2Server struct { // already initialized since draining is already underway. drainEvent *grpcsync.Event state transportState - activeStreams map[uint32]*Stream + activeStreams map[uint32]*ServerStream // idle is the time instant when the connection went idle. // This is either the beginning of the connection or when the number of // RPCs go down to 0. @@ -256,7 +256,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, - activeStreams: make(map[uint32]*Stream), + activeStreams: make(map[uint32]*ServerStream), stats: config.StatsHandlers, kp: kp, idle: time.Now(), @@ -359,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -385,11 +385,13 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.maxStreamID = streamID buf := newRecvBuffer() - s := &Stream{ - id: streamID, + s := &ServerStream{ + Stream: &Stream{ + id: streamID, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + }, st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, headerWireLength: int(frame.Header().Length), } var ( @@ -537,12 +539,6 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // Attach the received metadata to the context. if len(mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, mdata) - if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { - s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) - } - if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { - s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) - } } t.mu.Lock() if t.state != reachable { @@ -634,7 +630,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) { defer func() { close(t.readerDone) <-t.loopyWriterDone @@ -698,7 +694,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { } } -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) { t.mu.Lock() defer t.mu.Unlock() if t.activeStreams == nil { @@ -716,7 +712,7 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { // adjustWindow sends out extra window update over the initial window size // of stream if the application is requesting data larger in size than // the window. -func (t *http2Server) adjustWindow(s *Stream, n uint32) { +func (t *http2Server) adjustWindow(s *ServerStream, n uint32) { if w := s.fc.maybeAdjust(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) } @@ -726,7 +722,7 @@ func (t *http2Server) adjustWindow(s *Stream, n uint32) { // updateWindow adjusts the inbound quota for the stream and the transport. // Window updates will deliver to the controller for sending when // the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { +func (t *http2Server) updateWindow(s *ServerStream, n uint32) { if w := s.fc.onRead(n); w > 0 { t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w, @@ -963,7 +959,7 @@ func (t *http2Server) checkForHeaderListSize(it any) bool { return true } -func (t *http2Server) streamContextErr(s *Stream) error { +func (t *http2Server) streamContextErr(s *ServerStream) error { select { case <-t.done: return ErrConnClosing @@ -973,7 +969,7 @@ func (t *http2Server) streamContextErr(s *Stream) error { } // WriteHeader sends the header metadata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { +func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() if s.getState() == streamDone { @@ -1006,7 +1002,7 @@ func (t *http2Server) setResetPingStrikes() { atomic.StoreUint32(&t.resetPingStrikes, 1) } -func (t *http2Server) writeHeaderLocked(s *Stream) error { +func (t *http2Server) writeHeaderLocked(s *ServerStream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -1046,7 +1042,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { // There is no further I/O operations being able to perform on this stream. // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early // OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { +func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error { s.hdrMu.Lock() defer s.hdrMu.Unlock() @@ -1117,11 +1113,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { +func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error { reader := data.Reader() if !s.isHeaderSent() { // Headers haven't been written yet. - if err := t.WriteHeader(s, nil); err != nil { + if err := t.writeHeader(s, nil); err != nil { _ = reader.Close() return err } @@ -1147,6 +1143,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Opti _ = reader.Close() return err } + t.incrMsgSent() return nil } @@ -1276,7 +1273,7 @@ func (t *http2Server) Close(err error) { } // deleteStream deletes the stream s from transport's active streams. -func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { +func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { @@ -1297,7 +1294,7 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { } // finishStream closes the stream and puts the trailing headerFrame into controlbuf. -func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { +func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1321,7 +1318,7 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h } // closeStream clears the footprint of a stream when the stream is not needed any more. -func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { +func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) { // In case stream sending and receiving are invoked in separate // goroutines (e.g., bi-directional streaming), cancel needs to be // called to interrupt the potential blocking on other goroutines. @@ -1415,14 +1412,18 @@ func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { } } -func (t *http2Server) IncrMsgSent() { - t.channelz.SocketMetrics.MessagesSent.Add(1) - t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +func (t *http2Server) incrMsgSent() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) + } } -func (t *http2Server) IncrMsgRecv() { - t.channelz.SocketMetrics.MessagesReceived.Add(1) - t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +func (t *http2Server) incrMsgRecv() { + if channelz.IsOn() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) + } } func (t *http2Server) getOutFlowWindow() int64 { @@ -1455,7 +1456,7 @@ func getJitter(v time.Duration) time.Duration { } // Generate a jitter between +/- 10% of the value. r := int64(v / 10) - j := rand.Int63n(2*r) - r + j := rand.Int64N(2*r) - r return time.Duration(j) } diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go new file mode 100644 index 0000000000..a22a901514 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -0,0 +1,178 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "errors" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// ServerStream implements streaming functionality for a gRPC server. +type ServerStream struct { + *Stream // Embed for common stream functionality. + + st internalServerTransport + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. + clientAdvertisedCompressors string + headerWireLength int + + // hdrMu protects outgoing header and trailer metadata. + hdrMu sync.Mutex + header metadata.MD // the outgoing header metadata. Updated by WriteHeader. + headerSent atomic.Bool // atomically set when the headers are sent out. +} + +// Read reads an n byte message from the input stream. +func (s *ServerStream) Read(n int) (mem.BufferSlice, error) { + b, err := s.Stream.read(n) + if err == nil { + s.st.incrMsgRecv() + } + return b, err +} + +// SendHeader sends the header metadata for the given stream. +func (s *ServerStream) SendHeader(md metadata.MD) error { + return s.st.writeHeader(s, md) +} + +// Write writes the hdr and data bytes to the output stream. +func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error { + return s.st.write(s, hdr, data, opts) +} + +// WriteStatus sends the status of a stream to the client. WriteStatus is +// the final call made on a stream and always occurs. +func (s *ServerStream) WriteStatus(st *status.Status) error { + return s.st.writeStatus(s, st) +} + +// isHeaderSent indicates whether headers have been sent. +func (s *ServerStream) isHeaderSent() bool { + return s.headerSent.Load() +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. +func (s *ServerStream) updateHeaderSent() bool { + return s.headerSent.Swap(true) +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *ServerStream) RecvCompress() string { + return s.recvCompress +} + +// SendCompress returns the send compressor name. +func (s *ServerStream) SendCompress() string { + return s.sendCompress +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *ServerStream) ContentSubtype() string { + return s.contentSubtype +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *ServerStream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *ServerStream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *ServerStream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Header returns the header metadata of the stream. It returns the out header +// after t.WriteHeader is called. It does not block and must not be called +// until after WriteHeader. +func (s *ServerStream) Header() (metadata.MD, error) { + // Return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. +func (s *ServerStream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// This should not be called in parallel to other data writes. +func (s *ServerStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. +// This should not be called parallel to other data writes. +func (s *ServerStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index e12cb0bc91..2859b87755 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -27,7 +27,6 @@ import ( "fmt" "io" "net" - "strings" "sync" "sync/atomic" "time" @@ -39,7 +38,6 @@ import ( "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" - "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -133,7 +131,7 @@ type recvBufferReader struct { err error } -func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } @@ -142,9 +140,9 @@ func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { return n, nil } if r.closeStream != nil { - n, r.err = r.readHeaderClient(header) + n, r.err = r.readMessageHeaderClient(header) } else { - n, r.err = r.readHeader(header) + n, r.err = r.readMessageHeader(header) } return n, r.err } @@ -174,12 +172,12 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { return buf, r.err } -func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -192,7 +190,7 @@ func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -213,9 +211,9 @@ func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) case m := <-r.recv.get(): - return r.readHeaderAdditional(m, header) + return r.readMessageHeaderAdditional(m, header) } } @@ -246,7 +244,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { } } -func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { +func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { if m.buffer != nil { @@ -288,14 +286,8 @@ const ( // Stream represents an RPC in the transport layer. type Stream struct { id uint32 - st ServerTransport // nil for client side Stream - ct ClientTransport // nil for server side Stream - ctx context.Context // the associated context of the stream - cancel context.CancelFunc // always nil for client side Stream - done chan struct{} // closed at the end of stream to unblock writers. On the client side. - doneFunc func() // invoked at the end of stream on client side. - ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) - method string // the associated RPC method of the stream + ctx context.Context // the associated context of the stream + method string // the associated RPC method of the stream recvCompress string sendCompress string buf *recvBuffer @@ -303,58 +295,17 @@ type Stream struct { fc *inFlow wq *writeQuota - // Holds compressor names passed in grpc-accept-encoding metadata from the - // client. This is empty for the client side stream. - clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) - headerChan chan struct{} // closed to indicate the end of header metadata. - headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. - // headerValid indicates whether a valid header was received. Only - // meaningful after headerChan is closed (always call waitOnHeader() before - // reading its value). Not valid on server side. - headerValid bool - headerWireLength int // Only set on server side. - - // hdrMu protects header and trailer metadata on the server-side. - hdrMu sync.Mutex - // On client side, header keeps the received header metadata. - // - // On server side, header keeps the header set by SetHeader(). The complete - // header will merged into this after t.WriteHeader() is called. - header metadata.MD - trailer metadata.MD // the key-value map of trailer metadata. - - noHeaders bool // set if the client never received headers (set only after the stream is done). - - // On the server-side, headerSent is atomically set to 1 when the headers are sent out. - headerSent uint32 - state streamState - // On client-side it is the status error received from the server. - // On server-side it is unused. - status *status.Status - - bytesReceived uint32 // indicates whether any bytes have been received on this stream - unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream - // contentSubtype is the content-subtype for requests. // this must be lowercase or the behavior is undefined. contentSubtype string -} - -// isHeaderSent is only valid on the server-side. -func (s *Stream) isHeaderSent() bool { - return atomic.LoadUint32(&s.headerSent) == 1 -} -// updateHeaderSent updates headerSent and returns true -// if it was already set. It is valid only on server-side. -func (s *Stream) updateHeaderSent() bool { - return atomic.SwapUint32(&s.headerSent, 1) == 1 + trailer metadata.MD // the key-value map of trailer metadata. } func (s *Stream) swapState(st streamState) streamState { @@ -369,110 +320,12 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() { - if s.headerChan == nil { - // On the server headerChan is always nil since a stream originates - // only after having received headers. - return - } - select { - case <-s.ctx.Done(): - // Close the stream to prevent headers/trailers from changing after - // this function returns. - s.ct.CloseStream(s, ContextErr(s.ctx.Err())) - // headerChan could possibly not be closed yet if closeStream raced - // with operateHeaders; wait until it is closed explicitly here. - <-s.headerChan - case <-s.headerChan: - } -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - s.waitOnHeader() - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(name string) error { - if s.isHeaderSent() || s.getState() == streamDone { - return errors.New("transport: set send compressor called after headers sent or stream done") - } - - s.sendCompress = name - return nil -} - -// SendCompress returns the send compressor name. -func (s *Stream) SendCompress() string { - return s.sendCompress -} - -// ClientAdvertisedCompressors returns the compressor names advertised by the -// client via grpc-accept-encoding header. -func (s *Stream) ClientAdvertisedCompressors() []string { - values := strings.Split(s.clientAdvertisedCompressors, ",") - for i, v := range values { - values[i] = strings.TrimSpace(v) - } - return values -} - -// Done returns a channel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// Header returns the header metadata of the stream. -// -// On client side, it acquires the key-value pairs of header metadata once it is -// available. It blocks until i) the metadata is ready or ii) there is no header -// metadata or iii) the stream is canceled/expired. -// -// On server side, it returns the out header after t.WriteHeader is called. It -// does not block and must not be called until after WriteHeader. -func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil { - // On server side, return the header in stream. It will be the out - // header after t.WriteHeader is called. - return s.header.Copy(), nil - } - s.waitOnHeader() - - if !s.headerValid || s.noHeaders { - return nil, s.status.Err() - } - - return s.header.Copy(), nil -} - -// TrailersOnly blocks until a header or trailers-only frame is received and -// then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. Client-side only. -func (s *Stream) TrailersOnly() bool { - s.waitOnHeader() - return s.noHeaders -} - // Trailer returns the cached trailer metadata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. +// after the entire stream is done, it could return an empty MD. // It can be safely read only after stream has ended that is either read // or write have returned io.EOF. func (s *Stream) Trailer() metadata.MD { - c := s.trailer.Copy() - return c -} - -// ContentSubtype returns the content-subtype for a request. For example, a -// content-subtype of "proto" will result in a content-type of -// "application/grpc+proto". This will always be lowercase. See -// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for -// more details. -func (s *Stream) ContentSubtype() string { - return s.contentSubtype + return s.trailer.Copy() } // Context returns the context of the stream. @@ -480,90 +333,31 @@ func (s *Stream) Context() context.Context { return s.ctx } -// SetContext sets the context of the stream. This will be deleted once the -// stats handler callouts all move to gRPC layer. -func (s *Stream) SetContext(ctx context.Context) { - s.ctx = ctx -} - // Method returns the method for the stream. func (s *Stream) Method() string { return s.method } -// Status returns the status received from the server. -// Status can be read safely only after the stream has ended, -// that is, after Done() is closed. -func (s *Stream) Status() *status.Status { - return s.status -} - -// HeaderWireLength returns the size of the headers of the stream as received -// from the wire. Valid only on the server. -func (s *Stream) HeaderWireLength() int { - return s.headerWireLength -} - -// SetHeader sets the header metadata. This can be called multiple times. -// Server side only. -// This should not be called in parallel to other data writes. -func (s *Stream) SetHeader(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.isHeaderSent() || s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.header = metadata.Join(s.header, md) - s.hdrMu.Unlock() - return nil -} - -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - return s.st.WriteHeader(s, md) -} - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can be called multiple times. Server side only. -// This should not be called parallel to other data writes. -func (s *Stream) SetTrailer(md metadata.MD) error { - if md.Len() == 0 { - return nil - } - if s.getState() == streamDone { - return ErrIllegalHeaderWrite - } - s.hdrMu.Lock() - s.trailer = metadata.Join(s.trailer, md) - s.hdrMu.Unlock() - return nil -} - func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// ReadHeader reads data into the provided header slice from the stream. It -// first checks if there was an error during a previous read operation and +// ReadMessageHeader reads data into the provided header slice from the stream. +// It first checks if there was an error during a previous read operation and // returns it if present. It then requests a read operation for the length of // the header. It continues to read from the stream until the entire header -// slice is filled or an error occurs. If an `io.EOF` error is encountered -// with partially read data, it is converted to `io.ErrUnexpectedEOF` to -// indicate an unexpected end of the stream. The method returns any error -// encountered during the read process or nil if the header was successfully -// read. -func (s *Stream) ReadHeader(header []byte) (err error) { +// slice is filled or an error occurs. If an `io.EOF` error is encountered with +// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an +// unexpected end of the stream. The method returns any error encountered during +// the read process or nil if the header was successfully read. +func (s *Stream) ReadMessageHeader(header []byte) (err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return er } s.requestRead(len(header)) for len(header) != 0 { - n, err := s.trReader.ReadHeader(header) + n, err := s.trReader.ReadMessageHeader(header) header = header[n:] if len(header) == 0 { err = nil @@ -579,7 +373,7 @@ func (s *Stream) ReadHeader(header []byte) (err error) { } // Read reads n bytes from the wire for this stream. -func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { +func (s *Stream) read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier if er := s.trReader.er; er != nil { return nil, er @@ -619,8 +413,8 @@ type transportReader struct { er error } -func (t *transportReader) ReadHeader(header []byte) (int, error) { - n, err := t.reader.ReadHeader(header) +func (t *transportReader) ReadMessageHeader(header []byte) (int, error) { + n, err := t.reader.ReadMessageHeader(header) if err != nil { t.er = err return 0, err @@ -639,17 +433,6 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) { return buf, nil } -// BytesReceived indicates whether any bytes have been received on this stream. -func (s *Stream) BytesReceived() bool { - return atomic.LoadUint32(&s.bytesReceived) == 1 -} - -// Unprocessed indicates whether the server did not process this stream -- -// i.e. it sent a refused stream or GOAWAY including this stream ID. -func (s *Stream) Unprocessed() bool { - return atomic.LoadUint32(&s.unprocessed) == 1 -} - // GoString is implemented by Stream so context.String() won't // race when printing %#v. func (s *Stream) GoString() string { @@ -725,15 +508,9 @@ type ConnectOptions struct { BufferPool mem.BufferPool } -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) -} - -// Options provides additional hints and information for message +// WriteOptions provides additional hints and information for message // transmission. -type Options struct { +type WriteOptions struct { // Last indicates whether this write is the last piece for // this stream. Last bool @@ -782,18 +559,8 @@ type ClientTransport interface { // It does not block. GracefulClose() - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) + NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) // Error returns a channel that is closed when some I/O error // happens. Typically the caller should have a goroutine to monitor @@ -813,12 +580,6 @@ type ClientTransport interface { // RemoteAddr returns the remote network address. RemoteAddr() net.Addr - - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() } // ServerTransport is the common interface for all gRPC server-side transport @@ -828,19 +589,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(context.Context, func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error - - // WriteStatus sends the status of a stream to the client. WriteStatus is - // the final call made on a stream and always occurs. - WriteStatus(s *Stream, st *status.Status) error + HandleStreams(context.Context, func(*ServerStream)) // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their @@ -852,12 +601,14 @@ type ServerTransport interface { // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) +} - // IncrMsgSent increments the number of message sent through this transport. - IncrMsgSent() - - // IncrMsgRecv increments the number of message received through this transport. - IncrMsgRecv() +type internalServerTransport interface { + ServerTransport + writeHeader(s *ServerStream, md metadata.MD) error + write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error + writeStatus(s *ServerStream, st *status.Status) error + incrMsgRecv() } // connectionErrorf creates an ConnectionError with the specified error description. diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go index 228e9c2f20..65002e2cc8 100644 --- a/vendor/google.golang.org/grpc/mem/buffer_slice.go +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -22,6 +22,11 @@ import ( "io" ) +const ( + // 32 KiB is what io.Copy uses. + readAllBufSize = 32 * 1024 +) + // BufferSlice offers a means to represent data that spans one or more Buffer // instances. A BufferSlice is meant to be immutable after creation, and methods // like Ref create and return copies of the slice. This is why all methods have @@ -219,8 +224,58 @@ func (w *writer) Write(p []byte) (n int, err error) { // NewWriter wraps the given BufferSlice and BufferPool to implement the // io.Writer interface. Every call to Write copies the contents of the given -// buffer into a new Buffer pulled from the given pool and the Buffer is added to -// the given BufferSlice. +// buffer into a new Buffer pulled from the given pool and the Buffer is +// added to the given BufferSlice. func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { return &writer{buffers: buffers, pool: pool} } + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +// +// Important: A failed call returns a non-nil error and may also return +// partially read buffers. It is the responsibility of the caller to free the +// BufferSlice returned, or its memory will not be reused. +func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) { + var result BufferSlice + if wt, ok := r.(io.WriterTo); ok { + // This is more optimal since wt knows the size of chunks it wants to + // write and, hence, we can allocate buffers of an optimal size to fit + // them. E.g. might be a single big chunk, and we wouldn't chop it + // into pieces. + w := NewWriter(&result, pool) + _, err := wt.WriteTo(w) + return result, err + } +nextBuffer: + for { + buf := pool.Get(readAllBufSize) + // We asked for 32KiB but may have been given a bigger buffer. + // Use all of it if that's the case. + *buf = (*buf)[:cap(*buf)] + usedCap := 0 + for { + n, err := r.Read((*buf)[usedCap:]) + usedCap += n + if err != nil { + if usedCap == 0 { + // Nothing in this buf, put it back + pool.Put(buf) + } else { + *buf = (*buf)[:usedCap] + result = append(result, NewBuffer(buf, pool)) + } + if err == io.EOF { + err = nil + } + return result, err + } + if len(*buf) == usedCap { + result = append(result, NewBuffer(buf, pool)) + continue nextBuffer + } + } + } +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index e87a17f36a..ee0ff969af 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -62,7 +62,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { materializedData := data.Materialize() data.Free() - p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)} // TODO: it should be possible to grab the bufferPool from the underlying // stream implementation with a type cast to its actual type (such as @@ -76,7 +76,7 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if p.pf.isCompressed() { materializedCompData := compData.Materialize() compData.Free() - compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)} } p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 202854511b..8eb1cf3bcf 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,6 +22,7 @@ package resolver import ( "context" + "errors" "fmt" "net" "net/url" @@ -237,8 +238,8 @@ type ClientConn interface { // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an - // error. The ClientConn will notify the load balancer and begin calling - // ResolveNow on the Resolver with exponential backoff. + // error. The ClientConn then forwards this error to the load balancing + // policy. ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. @@ -330,3 +331,20 @@ type AuthorityOverrider interface { // typically in line, and must keep it unchanged. OverrideAuthority(Target) string } + +// ValidateEndpoints validates endpoints from a petiole policy's perspective. +// Petiole policies should call this before calling into their children. See +// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md) +// for details. +func ValidateEndpoints(endpoints []Endpoint) error { + if len(endpoints) == 0 { + return errors.New("endpoints list is empty") + } + + for _, endpoint := range endpoints { + for range endpoint.Addresses { + return nil + } + } + return errors.New("endpoints list contains no addresses") +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index aba1ae3e67..9fac2b08b4 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -622,7 +622,7 @@ func (pf payloadFormat) isCompressed() bool { } type streamReader interface { - ReadHeader(header []byte) error + ReadMessageHeader(header []byte) error Read(n int) (mem.BufferSlice, error) } @@ -656,7 +656,7 @@ type parser struct { // that the underlying streamReader must not return an incompatible // error. func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { - err := p.r.ReadHeader(p.header[:]) + err := p.r.ReadMessageHeader(p.header[:]) if err != nil { return 0, nil, err } @@ -664,9 +664,6 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSl pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) - if length == 0 { - return pf, nil, nil - } if int64(length) > int64(maxInt) { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) } @@ -817,7 +814,7 @@ func (p *payloadInfo) free() { // the buffer is no longer needed. // TODO: Refactor this function to reduce the number of arguments. // See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, ) (out mem.BufferSlice, err error) { pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { @@ -841,7 +838,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei var uncompressedBuf []byte uncompressedBuf, err = dc.Do(compressed.Reader()) if err == nil { - out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} } size = len(uncompressedBuf) } else { @@ -877,30 +874,7 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return nil, 0, err } - // TODO: Can/should this still be preserved with the new BufferSlice API? Are - // there any actual benefits to allocating a single large buffer instead of - // multiple smaller ones? - //if sizer, ok := compressor.(interface { - // DecompressedSize(compressedBytes []byte) int - //}); ok { - // if size := sizer.DecompressedSize(d); size >= 0 { - // if size > maxReceiveMessageSize { - // return nil, size, nil - // } - // // size is used as an estimate to size the buffer, but we - // // will read more data if available. - // // +MinRead so ReadFrom will not reallocate if size is correct. - // // - // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // // we can also utilize the recv buffer pool here. - // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - // return buf.Bytes(), int(bytesRead), err - // } - //} - - var out mem.BufferSlice - _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) if err != nil { out.Free() return nil, 0, err @@ -908,10 +882,14 @@ func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMes return out, out.Len(), nil } +type recvCompressor interface { + RecvCompress() string +} + // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { +func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index d1e1415a40..16065a027a 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -87,12 +87,13 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) +// MethodHandler is a function type that processes a unary RPC method call. +type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { MethodName string - Handler methodHandler + Handler MethodHandler } // ServiceDesc represents an RPC service's specification. @@ -621,8 +622,8 @@ func bufferPool(bufferPool mem.BufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorker blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows multiple requests to be +// serverWorker blocks on a *transport.ServerStream channel forever and waits +// for data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // @@ -1020,7 +1021,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, }() streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(ctx, func(stream *transport.Stream) { + st.HandleStreams(ctx, func(stream *transport.ServerStream) { s.handlersWG.Add(1) streamQuota.acquire() f := func() { @@ -1136,7 +1137,7 @@ func (s *Server) incrCallsFailed() { s.channelz.ServerMetrics.CallsFailed.Add(1) } -func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) @@ -1165,7 +1166,7 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, if payloadLen > s.opts.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } - err = t.Write(stream, hdr, payload, opts) + err = stream.Write(hdr, payload, opts) if err == nil { if len(s.opts.statsHandlers) != 0 { for _, sh := range s.opts.statsHandlers { @@ -1212,7 +1213,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1320,7 +1321,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor decomp = encoding.GetCompressor(rc) if decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(stream, st) + stream.WriteStatus(st) return st.Err() } } @@ -1354,15 +1355,12 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { - if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + if e := stream.WriteStatus(status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } defer d.Free() - if channelz.IsOn() { - t.IncrMsgRecv() - } df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) @@ -1404,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.LazyLog(stringer(appStatus.Message()), true) trInfo.tr.SetError() } - if e := t.WriteStatus(stream, appStatus); e != nil { + if e := stream.WriteStatus(appStatus); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { @@ -1431,20 +1429,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{Last: true} + opts := &transport.WriteOptions{Last: true} // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err } if sts, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, sts); e != nil { + if e := stream.WriteStatus(sts); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { @@ -1484,9 +1482,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, sm) } } - if channelz.IsOn() { - t.IncrMsgSent() - } if trInfo != nil { trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) } @@ -1502,7 +1497,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor binlog.Log(ctx, st) } } - return t.WriteStatus(stream, statusOK) + return stream.WriteStatus(statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1541,7 +1536,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1561,7 +1556,6 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, - t: t, s: stream, p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), @@ -1648,7 +1642,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ss.decomp = encoding.GetCompressor(rc) if ss.decomp == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) - t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) return st.Err() } } @@ -1717,7 +1711,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - t.WriteStatus(ss.s, appStatus) + ss.s.WriteStatus(appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1735,10 +1729,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran binlog.Log(ctx, st) } } - return t.WriteStatus(ss.s, statusOK) + return ss.s.WriteStatus(statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) { ctx := stream.Context() ctx = contextWithServer(ctx, s) var ti *traceInfo @@ -1768,7 +1762,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -1783,17 +1777,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] - md, _ := metadata.FromIncomingContext(ctx) - for _, sh := range s.opts.statsHandlers { - ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) - sh.HandleRPC(ctx, &stats.InHeader{ - FullMethod: stream.Method(), - RemoteAddr: t.Peer().Addr, - LocalAddr: t.Peer().LocalAddr, - Compression: stream.RecvCompress(), - WireLength: stream.HeaderWireLength(), - Header: md, - }) + // FromIncomingContext is expensive: skip if there are no statsHandlers + if len(s.opts.statsHandlers) > 0 { + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } } // To have calls in stream callouts work. Will delete once all stats handler // calls come from the gRPC layer. @@ -1802,17 +1799,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(ctx, t, stream, srv, md, ti) + s.processUnaryRPC(ctx, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + s.processStreamingRPC(ctx, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1825,7 +1822,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyPrintf("%s", errDesc) ti.tr.SetError() } - if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil { if ti != nil { ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() @@ -2100,7 +2097,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func SetSendCompressor(ctx context.Context, name string) error { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return fmt.Errorf("failed to fetch the stream from the given context") } @@ -2122,7 +2119,7 @@ func SetSendCompressor(ctx context.Context, name string) error { // Notice: This function is EXPERIMENTAL and may be changed or removed in a // later release. func ClientSupportedCompressors(ctx context.Context) ([]string, error) { - stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream) if !ok || stream == nil { return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) } diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 2671c5ef69..7e83027d19 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -168,6 +168,7 @@ func init() { return parseServiceConfig(js, defaultMaxCallAttempts) } } + func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { if len(js) == 0 { return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} @@ -297,7 +298,7 @@ func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalservi return rp, nil } -func min(a, b *int) *int { +func minPointers(a, b *int) *int { if *a < *b { return a } @@ -309,7 +310,7 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { return &defaultVal } if mcMax != nil && doptMax != nil { - return min(mcMax, doptMax) + return minPointers(mcMax, doptMax) } if mcMax != nil { return mcMax diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go new file mode 100644 index 0000000000..641c8e9794 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/metrics.go @@ -0,0 +1,81 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import "maps" + +// MetricSet is a set of metrics to record. Once created, MetricSet is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetricSet instead. +type MetricSet struct { + // metrics are the set of metrics to initialize. + metrics map[string]bool +} + +// NewMetricSet returns a MetricSet containing metricNames. +func NewMetricSet(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *MetricSet) Metrics() map[string]bool { + return m.metrics +} + +// Add adds the metricNames to the metrics set and returns a new copy with the +// additional metrics. +func (m *MetricSet) Add(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + newMetrics[metric] = true + } + return &MetricSet{metrics: newMetrics} +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *MetricSet) Join(metrics *MetricSet) *MetricSet { + newMetrics := make(map[string]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &MetricSet{metrics: newMetrics} +} + +// Remove removes the metricNames from the metrics set and returns a new copy +// with the metrics removed. +func (m *MetricSet) Remove(metricNames ...string) *MetricSet { + newMetrics := make(map[string]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metricNames { + delete(newMetrics, metric) + } + return &MetricSet{metrics: newMetrics} +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 71195c4943..6f20d2d548 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -260,84 +260,42 @@ func (s *ConnEnd) IsClient() bool { return s.Client } func (s *ConnEnd) isConnStats() {} -type incomingTagsKey struct{} -type outgoingTagsKey struct{} - // SetTags attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-tags-bin. Subsequent calls to // SetTags will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-tags-bin` header in the metadata instead. func SetTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTagsKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b)) } // Tags returns the tags from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-tags-bin` header from metadata instead. func Tags(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTagsKey{}).([]byte) - return b -} - -// SetIncomingTags attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). -// -// This is intended for gRPC-internal use ONLY. -func SetIncomingTags(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTagsKey{}, b) -} - -// OutgoingTags returns the tags from the context for the outbound RPC. -// -// This is intended for gRPC-internal use ONLY. -func OutgoingTags(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTagsKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } -type incomingTraceKey struct{} -type outgoingTraceKey struct{} - // SetTrace attaches stats tagging data to the context, which will be sent in // the outgoing RPC with the header grpc-trace-bin. Subsequent calls to // SetTrace will overwrite the values from earlier calls. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: set the `grpc-trace-bin` header in the metadata instead. func SetTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, outgoingTraceKey{}, b) + return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b)) } // Trace returns the trace from the context for the inbound RPC. // -// NOTE: this is provided only for backward compatibility with existing clients -// and will likely be removed in an upcoming release. New uses should transmit -// this type of data using metadata with a different, non-reserved (i.e. does -// not begin with "grpc-") header name. +// Deprecated: obtain the `grpc-trace-bin` header from metadata instead. func Trace(ctx context.Context) []byte { - b, _ := ctx.Value(incomingTraceKey{}).([]byte) - return b -} - -// SetIncomingTrace attaches stats tagging data to the context, to be read by -// the application (not sent in outgoing RPCs). It is intended for -// gRPC-internal use. -func SetIncomingTrace(ctx context.Context, b []byte) context.Context { - return context.WithValue(ctx, incomingTraceKey{}, b) -} - -// OutgoingTrace returns the trace from the context for the outbound RPC. It is -// intended for gRPC-internal use. -func OutgoingTrace(ctx context.Context) []byte { - b, _ := ctx.Value(outgoingTraceKey{}).([]byte) - return b + traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin") + if len(traceValues) == 0 { + return nil + } + return []byte(traceValues[len(traceValues)-1]) } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb2b2a216c..17e2267b33 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -23,7 +23,7 @@ import ( "errors" "io" "math" - "math/rand" + rand "math/rand/v2" "strconv" "sync" "time" @@ -113,7 +113,9 @@ type ClientStream interface { // SendMsg is generally called by generated code. On error, SendMsg aborts // the stream. If the error was generated by the client, the status is // returned directly; otherwise, io.EOF is returned and the status of - // the stream may be discovered using RecvMsg. + // the stream may be discovered using RecvMsg. For unary or server-streaming + // RPCs (StreamDesc.ClientStreams is false), a nil error is returned + // unconditionally. // // SendMsg blocks until: // - There is sufficient flow control to schedule m with the transport, or @@ -216,7 +218,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth var mc serviceconfig.MethodConfig var onCommit func() - var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) { return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) } @@ -584,7 +586,7 @@ type csAttempt struct { ctx context.Context cs *clientStream t transport.ClientTransport - s *transport.Stream + s *transport.ClientStream p *parser pickResult balancer.PickResult @@ -706,11 +708,10 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { cs.numRetriesSincePushback = 0 } else { fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) - cur := float64(rp.InitialBackoff) * fact - if max := float64(rp.MaxBackoff); cur > max { - cur = max - } - dur = time.Duration(rand.Int63n(int64(cur))) + cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff)) + // Apply jitter by multiplying with a random factor between 0.8 and 1.2 + cur *= 0.8 + 0.4*rand.Float64() + dur = time.Duration(int64(cur)) cs.numRetriesSincePushback++ } @@ -991,7 +992,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1083,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1097,9 +1098,6 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) } } - if channelz.IsOn() { - a.t.IncrMsgSent() - } return nil } @@ -1153,9 +1151,6 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { Length: payInfo.uncompressedBytes.Len(), }) } - if channelz.IsOn() { - a.t.IncrMsgRecv() - } if cs.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1183,7 +1178,7 @@ func (a *csAttempt) finish(err error) { } var tr metadata.MD if a.s != nil { - a.t.CloseStream(a.s, err) + a.s.Close(err) tr = a.s.Trailer() } @@ -1340,7 +1335,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.Stream + s *transport.ClientStream ac *addrConn callHdr *transport.CallHdr cancel context.CancelFunc @@ -1380,7 +1375,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1430,7 +1425,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1440,9 +1435,6 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return io.EOF } - if channelz.IsOn() { - as.t.IncrMsgSent() - } return nil } @@ -1480,9 +1472,6 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { return toRPCErr(err) } - if channelz.IsOn() { - as.t.IncrMsgRecv() - } if as.desc.ServerStreams { // Subsequent messages should be received by subsequent RecvMsg calls. return nil @@ -1510,7 +1499,7 @@ func (as *addrConnStream) finish(err error) { err = nil } if as.s != nil { - as.t.CloseStream(as.s, err) + as.s.Close(err) } if err != nil { @@ -1577,8 +1566,7 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { ctx context.Context - t transport.ServerTransport - s *transport.Stream + s *transport.ServerStream p *parser codec baseCodec @@ -1628,7 +1616,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { return status.Error(codes.Internal, err.Error()) } - err = ss.t.WriteHeader(ss.s, md) + err = ss.s.SendHeader(md) if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() sh := &binarylog.ServerHeader{ @@ -1668,7 +1656,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1676,9 +1664,6 @@ func (ss *serverStream) SendMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgSent() - } }() // Server handler could have set new compressor by calling SetSendCompressor. @@ -1710,7 +1695,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { if payloadLen > ss.maxSendMessageSize { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } - if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil { return toRPCErr(err) } @@ -1756,7 +1741,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if err != nil && err != io.EOF { st, _ := status.FromError(toRPCErr(err)) - ss.t.WriteStatus(ss.s, st) + ss.s.WriteStatus(st) // Non-user specified status was sent out. This should be an error // case (as a server side Cancel maybe). // @@ -1764,9 +1749,6 @@ func (ss *serverStream) RecvMsg(m any) (err error) { // status from the service handler, we will log that error instead. // This behavior is similar to an interceptor. } - if channelz.IsOn() && err == nil { - ss.t.IncrMsgRecv() - } }() var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5a47094ae8..a5b038829d 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.68.1" +const Version = "1.69.0-dev" diff --git a/vendor/modules.txt b/vendor/modules.txt index 3f5bfbbb3e..37185fd48a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -471,7 +471,7 @@ google.golang.org/genproto/googleapis/api/httpbody ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.68.1 +# google.golang.org/grpc v1.69.0 ## explicit; go 1.22 google.golang.org/grpc google.golang.org/grpc/attributes From 563321d26b7a887211aeadd8bfdffdf5d9229873 Mon Sep 17 00:00:00 2001 From: Johan Fylling Date: Tue, 17 Dec 2024 11:50:11 +0100 Subject: [PATCH 4/7] Rego v1 capabilities and keywords update (#7216) * Separating v0- and v1 keywords * Adding `rego_v1` capability feature Signed-off-by: Johan Fylling --- .github/workflows/pull-request.yaml | 10 +- ast/capabilities.go | 3 +- ast/parser.go | 4 + build/policy/files.rego | 4 +- build/policy/files_test.rego | 10 +- build/policy/helpers.rego | 3 +- build/policy/integrations.rego | 4 +- build/policy/integrations_test.rego | 60 +- builtin_metadata.json | 199 ++ capabilities.json | 10 +- cmd/bench.go | 5 +- cmd/build.go | 22 +- cmd/build_test.go | 461 ++- cmd/capabilities.go | 19 +- cmd/capabilities_test.go | 65 + cmd/check.go | 3 +- cmd/check_test.go | 196 ++ cmd/eval.go | 6 +- cmd/eval_test.go | 218 ++ cmd/inspect_test.go | 31 +- cmd/test.go | 10 +- cmd/test_test.go | 312 ++ v1/ast/capabilities.go | 53 +- v1/ast/capabilities_test.go | 31 +- v1/ast/compile.go | 95 +- v1/ast/compile_test.go | 221 +- v1/ast/parser.go | 108 +- v1/ast/parser_test.go | 38 - v1/ast/version_index.json | 7 + v1/bundle/bundle.go | 4 +- v1/capabilities/v1.0.0.json | 4844 +++++++++++++++++++++++++++ v1/compile/compile_test.go | 5 +- v1/loader/loader.go | 1 + v1/plugins/plugins_test.go | 220 +- v1/rego/rego.go | 11 +- v1/rego/rego_test.go | 207 ++ v1/repl/repl.go | 7 +- v1/tester/runner.go | 62 + 38 files changed, 7252 insertions(+), 317 deletions(-) create mode 100644 v1/capabilities/v1.0.0.json diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index d7c4d19ce9..94d249feba 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -352,23 +352,23 @@ jobs: version: edge - name: Test policies - run: opa test build/policy + run: opa test --v0-compatible build/policy - name: Ensure proper formatting - run: opa fmt --list --fail build/policy + run: opa fmt --v0-compatible --list --fail build/policy - name: Run file policy checks on changed files run: | curl --silent --fail --header 'Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' -o files.json \ https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/files - opa eval -d build/policy/files.rego -d build/policy/helpers.rego --format values --input files.json \ + opa eval --v0-compatible -d build/policy/files.rego -d build/policy/helpers.rego --format values --input files.json \ --fail-defined 'data.files.deny[message]' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Show input on failure - run: opa eval --input files.json --format pretty input + run: opa eval --v0-compatible --input files.json --format pretty input if: ${{ failure() }} - name: Setup Hugo @@ -386,4 +386,4 @@ jobs: cd docs make dev-generate hugo-production-build cd - - opa eval 'data.integrations.deny[message]' -i docs/website/public/index.json -d build/policy/integrations.rego --format=values --fail-defined + opa eval --v0-compatible 'data.integrations.deny[message]' -i docs/website/public/index.json -d build/policy/integrations.rego --format=values --fail-defined diff --git a/ast/capabilities.go b/ast/capabilities.go index 7c82377ab0..bc7278a885 100644 --- a/ast/capabilities.go +++ b/ast/capabilities.go @@ -21,6 +21,7 @@ type VersionIndex = v1.VersionIndex // heads, they wouldn't be able to parse them. const FeatureRefHeadStringPrefixes = v1.FeatureRefHeadStringPrefixes const FeatureRefHeads = v1.FeatureRefHeads +const FeatureRegoV1 = v1.FeatureRegoV1 const FeatureRegoV1Import = v1.FeatureRegoV1Import // Capabilities defines a structure containing data that describes the capabilities @@ -33,7 +34,7 @@ type WasmABIVersion = v1.WasmABIVersion // CapabilitiesForThisVersion returns the capabilities of this version of OPA. func CapabilitiesForThisVersion() *Capabilities { - return v1.CapabilitiesForThisVersion() + return v1.CapabilitiesForThisVersion(v1.CapabilitiesRegoVersion(DefaultRegoVersion)) } // LoadCapabilitiesJSON loads a JSON serialized capabilities structure from the reader r. diff --git a/ast/parser.go b/ast/parser.go index 8954618a05..45cd4da06e 100644 --- a/ast/parser.go +++ b/ast/parser.go @@ -43,3 +43,7 @@ type ParserOptions = v1.ParserOptions func NewParser() *Parser { return v1.NewParser().WithRegoVersion(DefaultRegoVersion) } + +func IsFutureKeyword(s string) bool { + return v1.IsFutureKeywordForRegoVersion(s, RegoV0) +} diff --git a/build/policy/files.rego b/build/policy/files.rego index 5a4bdb6126..ed22bd8f2d 100644 --- a/build/policy/files.rego +++ b/build/policy/files.rego @@ -7,9 +7,7 @@ package files -import future.keywords.contains -import future.keywords.if -import future.keywords.in +import rego.v1 import data.helpers.basename import data.helpers.directory diff --git a/build/policy/files_test.rego b/build/policy/files_test.rego index 03e9f1e2d3..e92b9d1e37 100644 --- a/build/policy/files_test.rego +++ b/build/policy/files_test.rego @@ -1,27 +1,27 @@ package files_test -import future.keywords.in +import rego.v1 import data.files.deny -test_deny_invalid_yaml_file { +test_deny_invalid_yaml_file if { expected := "invalid.yaml is an invalid YAML file: {null{}}" expected in deny with data.files.yaml_file_contents as {"invalid.yaml": "{null{}}"} with data.files.changes as {"invalid.yaml": {"status": "modified"}} } -test_allow_valid_yaml_file { +test_allow_valid_yaml_file if { count(deny) == 0 with data.files.yaml_file_contents as {"valid.yaml": "foo: bar"} with data.files.changes as {"valid.yaml": {"status": "modified"}} } -test_deny_invalid_json_file { +test_deny_invalid_json_file if { expected := "invalid.json is an invalid JSON file: }}}" expected in deny with data.files.json_file_contents as {"invalid.json": "}}}"} with data.files.changes as {"invalid.json": {"status": "modified"}} } -test_allow_valid_json_file { +test_allow_valid_json_file if { count(deny) == 0 with data.files.json_file_contents as {"valid.json": "{\"foo\": \"bar\"}"} with data.files.changes as {"valid.json": {"status": "modified"}} } diff --git a/build/policy/helpers.rego b/build/policy/helpers.rego index 0236ef5a4c..cc302ab8bd 100644 --- a/build/policy/helpers.rego +++ b/build/policy/helpers.rego @@ -1,7 +1,6 @@ package helpers -import future.keywords.if -import future.keywords.in +import rego.v1 last_indexof(string, search) := i if { all := [i | chars := split(string, ""); chars[i] == search] diff --git a/build/policy/integrations.rego b/build/policy/integrations.rego index ff0e70e485..66112b813c 100644 --- a/build/policy/integrations.rego +++ b/build/policy/integrations.rego @@ -1,8 +1,6 @@ package integrations -import future.keywords.contains -import future.keywords.if -import future.keywords.in +import rego.v1 allowed_image_extensions := ["png", "svg"] diff --git a/build/policy/integrations_test.rego b/build/policy/integrations_test.rego index d21b331d53..f6b1c5eacf 100644 --- a/build/policy/integrations_test.rego +++ b/build/policy/integrations_test.rego @@ -1,8 +1,8 @@ package integrations_test -import future.keywords.in +import rego.v1 -messages_for_key(key, output) = messages { +messages_for_key(key, output) = messages if { messages := {m | some e output[e] @@ -15,18 +15,18 @@ messages_for_key(key, output) = messages { print_if(true, _, _, _) = true -print_if(false, key, false, output) := false { +print_if(false, key, false, output) := false if { print("Exp:", {}) print("Got: ", messages_for_key(key, output)) } -print_if(false, key, expected, output) := false { +print_if(false, key, expected, output) := false if { is_string(expected) print("Exp:", expected) print("Got:", messages_for_key(key, output)) } -test_integration_has_valid_key { +test_integration_has_valid_key if { output := data.integrations.deny with input as {"integrations": {"/integrations/in.valid/": {"link": "https://example.com/", "title": "Example"}}} key := "key" @@ -40,7 +40,7 @@ test_integration_has_valid_key { print_if(result, key, message, output) } -test_integration_has_required_fields_missing { +test_integration_has_required_fields_missing if { output := data.integrations.deny with input as {"integrations": {"/integrations/regal/": {}}} key := "fields" @@ -53,7 +53,7 @@ test_integration_has_required_fields_missing { print_if(result, key, message, output) } -test_integration_has_required_fields_present { +test_integration_has_required_fields_present if { output := data.integrations.deny with input as {"integrations": {"/integrations/regal/": {"title": "Regal"}}} key := "fields" @@ -66,7 +66,7 @@ test_integration_has_required_fields_present { print_if(result, key, false, output) } -test_integration_has_content_missing { +test_integration_has_content_missing if { output := data.integrations.deny with input as {"integrations": {"/integrations/regal/": {}}} key := "content" @@ -79,7 +79,7 @@ test_integration_has_content_missing { print_if(result, key, message, output) } -test_integration_has_content_blank { +test_integration_has_content_blank if { output := data.integrations.deny with input as {"integrations": {"/integrations/regal/": {"content": "\t\t\n "}}} key := "content" @@ -92,7 +92,7 @@ test_integration_has_content_blank { print_if(result, key, message, output) } -test_integration_has_content_present { +test_integration_has_content_present if { output := data.integrations.deny with input as {"integrations": {"/integrations/regal/": {"content": "foobar"}}} key := "content" @@ -103,7 +103,7 @@ test_integration_has_content_present { print_if(result, key, false, output) } -test_every_integration_has_image_missing { +test_every_integration_has_image_missing if { output := data.integrations.deny with input as { "images": ["reegal.png"], "integrations": {"/integrations/regal/": {}}, @@ -119,7 +119,7 @@ test_every_integration_has_image_missing { print_if(result, key, message, output) } -test_every_integration_has_image_present { +test_every_integration_has_image_present if { output := data.integrations.deny with input as { "images": ["regal.png"], "integrations": {"regal": {}}, @@ -133,7 +133,7 @@ test_every_integration_has_image_present { print_if(result, key, false, output) } -test_every_integration_has_image_missing_but_permitted { +test_every_integration_has_image_missing_but_permitted if { output := data.integrations.deny with input as { "images": ["reegal.png"], "integrations": {"regal": {"allow_missing_image": true}}, @@ -148,7 +148,7 @@ test_every_integration_has_image_missing_but_permitted { print_if(result, key, false, output) } -test_every_image_has_integration_missing { +test_every_image_has_integration_missing if { output := data.integrations.deny with input as { "images": ["regal.png"], "integrations": {"foobar": {}}, @@ -164,7 +164,7 @@ test_every_image_has_integration_missing { print_if(result, key, message, output) } -test_every_image_has_integration_present { +test_every_image_has_integration_present if { output := data.integrations.deny with input as { "images": ["regal.png"], "integrations": {"/integrations/regal/": {}}, @@ -178,7 +178,7 @@ test_every_image_has_integration_present { print_if(result, key, false, output) } -test_integration_organizations_missing { +test_integration_organizations_missing if { output := data.integrations.deny with input as { "organizations": {"/organizations/stira/": {}}, "integrations": {"/integrations/regal/": {"inventors": ["styra"]}}, @@ -194,7 +194,7 @@ test_integration_organizations_missing { print_if(result, key, message, output) } -test_integration_organizations_present { +test_integration_organizations_present if { output := data.integrations.deny with input as { "organizations": {"/organizations/styra/": {}}, "integrations": {"/integrations/regal/": {"inventors": ["styra"]}}, @@ -208,7 +208,7 @@ test_integration_organizations_present { print_if(result, key, false, output) } -test_integration_softwares_missing { +test_integration_softwares_missing if { output := data.integrations.deny with input as { "softwares": {"/softwares/mars/": {}}, "integrations": {"/integrations/regal/": {"software": ["terraform"]}}, @@ -224,7 +224,7 @@ test_integration_softwares_missing { print_if(result, key, message, output) } -test_integration_softwares_present { +test_integration_softwares_present if { output := data.integrations.deny with input as { "softwares": {"/softwares/terraform/": {}}, "integrations": {"/integrations/regal/": {"software": ["terraform"]}}, @@ -238,7 +238,7 @@ test_integration_softwares_present { print_if(result, key, false, output) } -test_software_has_required_fields_missing { +test_software_has_required_fields_missing if { output := data.integrations.deny with input as {"softwares": {"/softwares/terraform/": {}}} key := "fields" @@ -251,7 +251,7 @@ test_software_has_required_fields_missing { print_if(result, key, message, output) } -test_software_has_required_fields_present { +test_software_has_required_fields_present if { output := data.integrations.deny with input as {"softwares": {"terraform": {"link": "https://www.terraform.io/", "title": "Terraform"}}} key := "fields" @@ -263,7 +263,7 @@ test_software_has_required_fields_present { print_if(result, key, false, output) } -test_organization_has_required_labels { +test_organization_has_required_labels if { output := data.integrations.deny with input as {"organizations": {"/organizations/styra/": {}}} key := "fields" @@ -276,7 +276,7 @@ test_organization_has_required_labels { print_if(result, key, message, output) } -test_organization_has_required_fields_present { +test_organization_has_required_fields_present if { output := data.integrations.deny with input as {"organizations": {"styra": {"link": "https://styra.com/", "title": "Styra"}}} key := "fields" @@ -288,7 +288,7 @@ test_organization_has_required_fields_present { print_if(result, key, false, output) } -test_organization_has_valid_key { +test_organization_has_valid_key if { output := data.integrations.deny with input as {"organizations": {"/organizations/sty.ra/": {"link": "https://styra.com/", "title": "Styra"}}} key := "key" @@ -302,7 +302,7 @@ test_organization_has_valid_key { print_if(result, key, message, output) } -test_organization_has_one_or_more_integrations_none { +test_organization_has_one_or_more_integrations_none if { output := data.integrations.deny with input as {"organizations": {"/organizations/foobar/": {}}, "integrations": {}} key := "orphaned_org" @@ -315,7 +315,7 @@ test_organization_has_one_or_more_integrations_none { print_if(result, key, message, output) } -test_organization_has_one_or_more_integrations_one { +test_organization_has_one_or_more_integrations_one if { output := data.integrations.deny with input as {"organizations": {"/organizations/foobaz/": {}}, "integrations": {"/integrations/foobar/": {"inventors": ["foobaz"]}}} key := "orphaned_org" @@ -326,7 +326,7 @@ test_organization_has_one_or_more_integrations_one { print_if(result, key, false, output) } -test_organization_has_one_or_more_integrations_speaker { +test_organization_has_one_or_more_integrations_speaker if { output := data.integrations.deny with input as {"organizations": {"foobaz": {}}, "integrations": {"foobar": {"videos": [{"speakers": [{"organization": "foobaz"}]}]}}} key := "orphaned_org" @@ -337,7 +337,7 @@ test_organization_has_one_or_more_integrations_speaker { print_if(result, key, false, output) } -test_software_has_one_or_more_integrations_none { +test_software_has_one_or_more_integrations_none if { output := data.integrations.deny with input as {"softwares": {"/softwares/foobar/": {}}, "integrations": {}} key := "orphaned_software" @@ -350,7 +350,7 @@ test_software_has_one_or_more_integrations_none { print_if(result, key, message, output) } -test_software_has_one_or_more_integrations_one { +test_software_has_one_or_more_integrations_one if { output := data.integrations.deny with input as {"softwares": {"foobaz": {}}, "integrations": {"foobar": {"software": ["foobaz"]}}} key := "orphaned_software" @@ -361,7 +361,7 @@ test_software_has_one_or_more_integrations_one { print_if(result, key, false, output) } -test_software_has_valid_key { +test_software_has_valid_key if { output := data.integrations.deny with input as {"softwares": {"/softwares/in.valid/": {"link": "https://example.com/", "title": "Example"}}} key := "key" diff --git a/builtin_metadata.json b/builtin_metadata.json index 4e35f13087..dc1236cb32 100644 --- a/builtin_metadata.json +++ b/builtin_metadata.json @@ -362,6 +362,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the number without its sign.", @@ -488,6 +489,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -619,6 +621,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the intersection of two sets.", @@ -746,6 +749,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -877,6 +881,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Concatenates two arrays.", @@ -955,6 +960,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the reverse of a given array.", @@ -1093,6 +1099,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a slice of a given array. If `start` is greater or equal than `stop`, `slice` is `[]`.", @@ -1222,6 +1229,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": ":=", @@ -1348,6 +1356,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Deserializes the base64 encoded input string.", @@ -1476,6 +1485,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Serializes the input string into base64 encoding.", @@ -1583,6 +1593,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies the input string is base64 encoded.", @@ -1711,6 +1722,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Deserializes the base64url encoded input string.", @@ -1839,6 +1851,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Serializes the input string into base64url encoding.", @@ -1944,6 +1957,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Serializes the input string into base64url encoding without padding.", @@ -2073,6 +2087,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the bitwise \"AND\" of two integers.", @@ -2202,6 +2217,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a new integer with its bits shifted `s` bits to the left.", @@ -2326,6 +2342,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the bitwise negation (flip) of an integer.", @@ -2455,6 +2472,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the bitwise \"OR\" of two integers.", @@ -2584,6 +2602,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a new integer with its bits shifted `s` bits to the right.", @@ -2713,6 +2732,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the bitwise \"XOR\" (exclusive-or) of two integers.", @@ -2839,6 +2859,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -2963,6 +2984,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -3087,6 +3109,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -3211,6 +3234,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -3335,6 +3359,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -3459,6 +3484,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -3556,6 +3582,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Rounds the number _up_ to the nearest integer.", @@ -3689,6 +3716,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Joins a set or array of strings with a delimiter.", @@ -3822,6 +3850,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `true` if the search string is included in the base string", @@ -3950,6 +3979,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": " Count takes a collection or string and returns the number of elements (or characters) in it.", @@ -3999,6 +4029,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a boolean representing the result of comparing two MACs for equality without leaking timing information.", @@ -4082,6 +4113,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a string representing the MD5 HMAC of the input message using the input key.", @@ -4165,6 +4197,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a string representing the SHA1 HMAC of the input message using the input key.", @@ -4248,6 +4281,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a string representing the SHA256 HMAC of the input message using the input key.", @@ -4331,6 +4365,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a string representing the SHA512 HMAC of the input message using the input key.", @@ -4459,6 +4494,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a string representing the input string hashed with the MD5 function", @@ -4499,6 +4535,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns zero or more private keys from the given encoded string containing DER certificate data.\n\nIf the input is empty, the function will return null. The input string should be a list of one or more concatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.", @@ -4627,6 +4664,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a string representing the input string hashed with the SHA1 function", @@ -4755,6 +4793,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a string representing the input string hashed with the SHA256 function", @@ -4842,6 +4881,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns one or more certificates from the given string containing PEM\nor base64 encoded DER certificates after verifying the supplied certificates form a complete\ncertificate chain back to a trusted root.\n\nThe first certificate is treated as the root and the last is treated as the leaf,\nwith all others being treated as intermediates.", @@ -4877,6 +4917,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns one or more certificates from the given string containing PEM\nor base64 encoded DER certificates after verifying the supplied certificates form a complete\ncertificate chain back to a trusted root. A config option passed as the second argument can\nbe used to configure the validation options used.\n\nThe first certificate is treated as the root and the last is treated as the leaf,\nwith all others being treated as intermediates.", @@ -4990,6 +5031,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a PKCS #10 certificate signing request from the given PEM-encoded PKCS#10 certificate signing request.", @@ -5118,6 +5160,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns zero or more certificates from the given encoded string containing\nDER certificate data.\n\nIf the input is empty, the function will return null. The input string should be a list of one or more\nconcatenated PEM blocks. The whole input of concatenated PEM blocks can optionally be Base64 encoded.", @@ -5166,6 +5209,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a valid key pair", @@ -5250,6 +5294,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a JWK for signing a JWT from the given PEM-encoded RSA private key.", @@ -5383,6 +5428,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Divides the first number by the second number.", @@ -5517,6 +5563,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns true if the search string ends with the base string.", @@ -5646,6 +5693,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "=", @@ -5775,6 +5823,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "==", @@ -5874,6 +5923,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Rounds the number _down_ to the nearest integer.", @@ -6007,6 +6057,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the string representation of the number in the given base after rounding it down to an integer value.", @@ -6145,6 +6196,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Parses and matches strings against the glob notation. Not to be confused with `regex.globs_match`.", @@ -6273,6 +6325,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a string which represents a version of the pattern where all asterisks have been escaped.", @@ -6397,6 +6450,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Computes the set of reachable nodes in the graph from a set of starting nodes.", @@ -6478,6 +6532,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Computes the set of reachable paths in the graph from a set of starting nodes.", @@ -6552,6 +6607,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks that a GraphQL query is valid against a given schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", @@ -6626,6 +6682,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns AST objects for a given GraphQL query and schema after validating the query against the schema. Returns undefined if errors were encountered during parsing or validation. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", @@ -6700,6 +6757,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a boolean indicating success or failure alongside the parsed ASTs for a given GraphQL query and schema after validating the query against the schema. The query and/or schema can be either GraphQL strings or AST objects from the other GraphQL builtin functions.", @@ -6769,6 +6827,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns an AST object for a GraphQL query.", @@ -6838,6 +6897,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns an AST object for a GraphQL schema.", @@ -6899,6 +6959,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks that the input is a valid GraphQL schema. The schema can be either a GraphQL string or an AST object from the other GraphQL builtin functions.", @@ -7030,6 +7091,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "\u003e", @@ -7161,6 +7223,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "\u003e=", @@ -7266,6 +7329,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Deserializes the hex-encoded input string.", @@ -7371,6 +7435,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Serializes the input string using hex-encoding.", @@ -7499,6 +7564,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a HTTP response to the given HTTP request.", @@ -7632,6 +7698,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the index of a substring contained inside a string.", @@ -7713,6 +7780,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a list of all the indexes of a substring contained inside a string.", @@ -7796,6 +7864,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "in", @@ -7880,6 +7949,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "in", @@ -7958,6 +8028,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "introduced": "v0.34.0", @@ -8081,6 +8152,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the intersection of the given input sets.", @@ -8209,6 +8281,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Decodes a JSON Web Token and outputs it as an object.", @@ -8342,6 +8415,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies a JWT signature under parameterized constraints and decodes the claims if it is valid.\nSupports the following algorithms: HS256, HS384, HS512, RS256, RS384, RS512, ES256, ES384, ES512, PS256, PS384 and PS512.", @@ -8480,6 +8554,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Encodes and optionally signs a JSON Web Token. Inputs are taken as objects, not encoded strings (see `io.jwt.encode_sign_raw`).", @@ -8618,6 +8693,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Encodes and optionally signs a JSON Web Token.", @@ -8751,6 +8827,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a ES256 JWT signature is valid.", @@ -8875,6 +8952,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a ES384 JWT signature is valid.", @@ -8999,6 +9077,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a ES512 JWT signature is valid.", @@ -9132,6 +9211,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a HS256 (secret) JWT signature is valid.", @@ -9256,6 +9336,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a HS384 (secret) JWT signature is valid.", @@ -9380,6 +9461,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a HS512 (secret) JWT signature is valid.", @@ -9513,6 +9595,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a PS256 JWT signature is valid.", @@ -9637,6 +9720,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a PS384 JWT signature is valid.", @@ -9761,6 +9845,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a PS512 JWT signature is valid.", @@ -9894,6 +9979,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a RS256 JWT signature is valid.", @@ -10018,6 +10104,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a RS384 JWT signature is valid.", @@ -10142,6 +10229,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies if a RS512 JWT signature is valid.", @@ -10270,6 +10358,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `true` if the input value is an array.", @@ -10398,6 +10487,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `true` if the input value is a boolean.", @@ -10526,6 +10616,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `true` if the input value is null.", @@ -10654,6 +10745,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `true` if the input value is a number.", @@ -10782,6 +10874,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns true if the input value is an object", @@ -10910,6 +11003,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `true` if the input value is a set.", @@ -11038,6 +11132,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `true` if the input value is a string.", @@ -11171,6 +11266,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Filters the object. For example: `json.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"b\": \"x\"}}`). Paths are not filtered in-order and are deduplicated before being evaluated.", @@ -11277,6 +11373,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies the input string is a valid JSON document.", @@ -11405,6 +11502,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Serializes the input term to JSON.", @@ -11439,6 +11537,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Serializes the input term JSON, with additional formatting options via the `opts` parameter. `opts` accepts keys `pretty` (enable multi-line/formatted JSON), `prefix` (string to prefix lines with, default empty string) and `indent` (string to indent with, default `\\t`).", @@ -11492,6 +11591,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks that the document matches the JSON schema.", @@ -11599,6 +11699,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Patches an object according to RFC6902. For example: `json.patch({\"a\": {\"foo\": 1}}, [{\"op\": \"add\", \"path\": \"/a/bar\", \"value\": 2}])` results in `{\"a\": {\"foo\": 1, \"bar\": 2}`. The patches are applied atomically: if any of them fails, the result will be undefined. Additionally works on sets, where a value contained in the set is considered to be its path.", @@ -11728,6 +11829,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Removes paths from an object. For example: `json.remove({\"a\": {\"b\": \"x\", \"c\": \"y\"}}, [\"a/b\"])` will result in `{\"a\": {\"c\": \"y\"}}`. Paths are not removed in-order and are deduplicated before being evaluated.", @@ -11856,6 +11958,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Deserializes the input string.", @@ -11904,6 +12007,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks that the input is a valid JSON schema object. The schema can be either a JSON string or an JSON object.", @@ -12032,6 +12136,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the input string but with all characters in lower-case.", @@ -12163,6 +12268,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "\u003c", @@ -12294,6 +12400,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "\u003c=", @@ -12422,6 +12529,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the maximum value in a collection.", @@ -12550,6 +12658,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the minimum value in a collection.", @@ -12681,6 +12790,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Minus subtracts the second number from the first number or computes the difference between two sets.", @@ -12813,6 +12923,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Multiplies two numbers.", @@ -12945,6 +13056,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "infix": "!=", @@ -13078,6 +13190,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks if a CIDR or IP is contained within another CIDR. `output` is `true` if `cidr_or_ip` (e.g. `127.0.0.64/26` or `127.0.0.1`) is contained within `cidr` (e.g. `127.0.0.1/24`) and `false` otherwise. Supports both IPv4 and IPv6 notations.", @@ -13206,6 +13319,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks if collections of cidrs or ips are contained within another collection of cidrs and returns matches. This function is similar to `net.cidr_contains` except it allows callers to pass collections of CIDRs or IPs as arguments and returns the matches (as opposed to a boolean result indicating a match between two CIDRs/IPs).", @@ -13334,6 +13448,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Expands CIDR to set of hosts (e.g., `net.cidr_expand(\"192.168.0.0/30\")` generates 4 hosts: `{\"192.168.0.0\", \"192.168.0.1\", \"192.168.0.2\", \"192.168.0.3\"}`).", @@ -13467,6 +13582,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks if a CIDR intersects with another CIDR (e.g. `192.168.0.0/16` overlaps with `192.168.1.0/24`). Supports both IPv4 and IPv6 notations.", @@ -13528,6 +13644,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Parses an IPv4/IPv6 CIDR and returns a boolean indicating if the provided CIDR is valid.", @@ -13635,6 +13752,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Merges IP addresses and subnets into the smallest possible list of CIDRs (e.g., `net.cidr_merge([\"192.0.128.0/24\", \"192.0.129.0/24\"])` generates `{\"192.0.128.0/23\"}`.This function merges adjacent subnets where possible, those contained within others and also removes any duplicates.\nSupports both IPv4 and IPv6 notations. IPv6 inputs need a prefix length (e.g. \"/128\").", @@ -13764,6 +13882,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -13841,6 +13960,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the set of IP addresses (both v4 and v6) that the passed-in `name` resolves to using the standard name resolution mechanisms available.", @@ -13957,6 +14077,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns an array of numbers in the given (inclusive) range. If `a==b`, then `range == [a]`; if `a \u003e b`, then `range` is in descending order.", @@ -14006,6 +14127,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns an array of numbers in the given (inclusive) range incremented by a positive step.\n\tIf \"a==b\", then \"range == [a]\"; if \"a \u003e b\", then \"range\" is in descending order.\n\tIf the provided \"step\" is less then 1, an error will be thrown.\n\tIf \"b\" is not in the range of the provided \"step\", \"b\" won't be included in the result.\n\t", @@ -14137,6 +14259,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Filters the object by keeping only specified keys. For example: `object.filter({\"a\": {\"b\": \"x\", \"c\": \"y\"}, \"d\": \"z\"}, [\"a\"])` will result in `{\"a\": {\"b\": \"x\", \"c\": \"y\"}}`).", @@ -14275,6 +14398,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns value of an object's key if present, otherwise a default. If the supplied `key` is an `array`, then `object.get` will search through a nested object or array using each key in turn. For example: `object.get({\"a\": [{ \"b\": true }]}, [\"a\", 0, \"b\"], false)` results in `true`.", @@ -14332,6 +14456,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a set of an object's keys. For example: `object.keys({\"a\": 1, \"b\": true, \"c\": \"d\")` results in `{\"a\", \"b\", \"c\"}`.", @@ -14463,6 +14588,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Removes specified keys from an object.", @@ -14536,6 +14662,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Determines if an object `sub` is a subset of another object `super`.Object `sub` is a subset of object `super` if and only if every key in `sub` is also in `super`, **and** for all keys which `sub` and `super` share, they have the same value. This function works with objects, sets, arrays and a set of array and set.If both arguments are objects, then the operation is recursive, e.g. `{\"c\": {\"x\": {10, 15, 20}}` is a subset of `{\"a\": \"b\", \"c\": {\"x\": {10, 15, 20, 25}, \"y\": \"z\"}`. If both arguments are sets, then this function checks if every element of `sub` is a member of `super`, but does not attempt to recurse. If both arguments are arrays, then this function checks if `sub` appears contiguously in order within `super`, and also does not attempt to recurse. If `super` is array and `sub` is set, then this function checks if `super` contains every element of `sub` with no consideration of ordering, and also does not attempt to recurse.", @@ -14667,6 +14794,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Creates a new object of the asymmetric union of two objects. For example: `object.union({\"a\": 1, \"b\": 2, \"c\": {\"d\": 3}}, {\"a\": 7, \"c\": {\"d\": 4, \"e\": 5}})` will result in `{\"a\": 7, \"b\": 2, \"c\": {\"d\": 4, \"e\": 5}}`.", @@ -14743,6 +14871,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Creates a new object that is the asymmetric union of all objects merged from left to right. For example: `object.union_n([{\"a\": 1}, {\"b\": 2}, {\"a\": 3}])` will result in `{\"b\": 2, \"a\": 3}`.", @@ -14865,6 +14994,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns an object that describes the runtime environment where OPA is deployed.", @@ -14996,6 +15126,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the union of two sets.", @@ -15128,6 +15259,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Plus adds two numbers together.", @@ -15205,6 +15337,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "introduced": "v0.34.0", @@ -15328,6 +15461,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Multiplies elements of an array or set of numbers", @@ -15395,6 +15529,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Signs an HTTP request object for Amazon Web Services. Currently implements [AWS Signature Version 4 request signing](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) by the `Authorization` header method.", @@ -15487,6 +15622,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a random integer between `0` and `n` (`n` exclusive). If `n` is `0`, then `y` is always `0`. For any given argument pair (`str`, `n`), the output will be consistent throughout a query evaluation.", @@ -15616,6 +15752,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -15752,6 +15889,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns all successive matches of the expression.", @@ -15890,6 +16028,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the specified number of matches when matching the input against the pattern.", @@ -16023,6 +16162,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks if the intersection of two glob-style regular expressions matches a non-empty set of non-empty strings.\nThe set of regex symbols is limited for this builtin: only `.`, `*`, `+`, `[`, `-`, `]` and `\\` are treated as special symbols.", @@ -16133,6 +16273,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Checks if a string is a valid regular expression: the detailed syntax for patterns is defined by https://github.com/google/re2/wiki/Syntax.", @@ -16248,6 +16389,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Matches a string against a regular expression.", @@ -16320,6 +16462,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Find and replaces the text using the regular expression pattern.", @@ -16453,6 +16596,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Splits the input string by the occurrences of the given pattern.", @@ -16596,6 +16740,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Matches a string against a pattern, where there pattern may be glob-like", @@ -16660,6 +16805,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the chain of metadata for the active rule.\nOrdered starting at the active rule, going outward to the most distant node in its package ancestry.\nA chain entry is a JSON document with two members: \"path\", an array representing the path of the node; and \"annotations\", a JSON document containing the annotations declared for the node.\nThe first entry in the chain always points to the active rule, even if it has no declared annotations (in which case the \"annotations\" member is not present).", @@ -16724,6 +16870,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns annotations declared for the active rule and using the _rule_ scope.", @@ -16857,6 +17004,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Parses the input Rego string and returns an object representation of the AST.", @@ -16988,6 +17136,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the remainder for of `x` divided by `y`, for `y != 0`.", @@ -17127,6 +17276,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Replace replaces all instances of a sub-string.", @@ -17255,6 +17405,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Rounds the number to the nearest integer.", @@ -17371,6 +17522,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Compares valid SemVer formatted version strings.", @@ -17482,6 +17634,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Validates that the input is a valid SemVer string.", @@ -17611,6 +17764,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "deprecated": true, @@ -17737,6 +17891,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a sorted array.", @@ -17870,6 +18025,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Split returns an array containing elements of the input string split on a delimiter.", @@ -18003,6 +18159,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the given string, formatted.", @@ -18136,6 +18293,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns true if the search string begins with the base string.", @@ -18204,6 +18362,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns true if any of the search strings begins with any of the base strings.", @@ -18272,6 +18431,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns true if any of the search strings ends with any of the base strings.", @@ -18302,6 +18462,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the number of non-overlapping instances of a substring in a string.", @@ -18342,6 +18503,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Renders a templated string with given template variables injected. For a given templated string and key/value mapping, values will be injected into the template where they are referenced by key.\n\tFor examples of templating syntax, see https://pkg.go.dev/text/template", @@ -18475,6 +18637,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Replaces a string from a list of old, new string pairs.\nReplacements are performed in the order they appear in the target string, without overlapping matches.\nThe old string comparisons are done in argument order.", @@ -18553,6 +18716,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Reverses a given string.", @@ -18691,6 +18855,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the portion of a string for a given `offset` and a `length`. If `length \u003c 0`, `output` is the remainder of the string.", @@ -18819,6 +18984,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Sums elements of an array or set of numbers.", @@ -18956,6 +19122,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the nanoseconds since epoch after adding years, months and days to nanoseconds. Month \u0026 day values outside their usual ranges after the operation and will be normalized - for example, October 32 would become November 1. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", @@ -19084,6 +19251,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the `[hour, minute, second]` of the day for the nanoseconds since epoch.", @@ -19212,6 +19380,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the `[year, month, day]` for the nanoseconds since epoch.", @@ -19313,6 +19482,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the difference between two unix timestamps in nanoseconds (with optional timezone strings).", @@ -19365,6 +19535,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the formatted timestamp for the nanoseconds since epoch.", @@ -19487,6 +19658,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the current time since epoch in nanoseconds.", @@ -19615,6 +19787,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the duration in nanoseconds represented by a string.", @@ -19748,6 +19921,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the time in nanoseconds parsed from the string in the given format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", @@ -19876,6 +20050,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the time in nanoseconds parsed from the string in RFC3339 format. `undefined` if the result would be outside the valid time range that can fit within an `int64`.", @@ -20004,6 +20179,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the day of the week (Monday, Tuesday, ...) for the nanoseconds since epoch.", @@ -20132,6 +20308,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Converts a string, bool, or number value to a number: Strings are converted to numbers using `strconv.Atoi`, Boolean `false` is converted to 0 and `true` is converted to 1.", @@ -20260,6 +20437,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Emits `note` as a `Note` event in the query explanation. Query explanations show the exact expressions evaluated by OPA during policy execution. For example, `trace(\"Hello There!\")` includes `Note \"Hello There!\"` in the query explanation. To include variables in the message, use `sprintf`. For example, `person := \"Bob\"; trace(sprintf(\"Hello There! %v\", [person]))` will emit `Note \"Hello There! Bob\"` inside of the explanation.", @@ -20393,6 +20571,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `value` with all leading or trailing instances of the `cutset` characters removed.", @@ -20526,6 +20705,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `value` with all leading instances of the `cutset` characters removed.", @@ -20659,6 +20839,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `value` without the prefix. If `value` doesn't start with `prefix`, it is returned unchanged.", @@ -20792,6 +20973,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `value` with all trailing instances of the `cutset` characters removed.", @@ -20920,6 +21102,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Return the given string with all leading and trailing white space removed.", @@ -21053,6 +21236,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns `value` without the suffix. If `value` doesn't end with `suffix`, it is returned unchanged.", @@ -21181,6 +21365,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the type of its input value.", @@ -21309,6 +21494,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the union of the given input sets.", @@ -21378,6 +21564,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Converts strings like \"10G\", \"5K\", \"4M\", \"1500m\", and the like into a number.\nThis number can be a non-integer, such as 1.5, 0.22, etc. Scientific notation is supported,\nallowing values such as \"1e-3K\" (1) or \"2.5e6M\" (2.5 million M).\n\nSupports standard metric decimal and binary SI units (e.g., K, Ki, M, Mi, G, Gi, etc.) where\nm, K, M, G, T, P, and E are treated as decimal units and Ki, Mi, Gi, Ti, Pi, and Ei are treated as\nbinary units.\n\nNote that 'm' and 'M' are case-sensitive to allow distinguishing between \"milli\" and \"mega\" units\nrespectively. Other units are case-insensitive.", @@ -21506,6 +21693,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Converts strings like \"10GB\", \"5K\", \"4mb\", or \"1e6KB\" into an integer number of bytes.\n\nSupports standard byte units (e.g., KB, KiB, etc.) where KB, MB, GB, and TB are treated as decimal\nunits, and KiB, MiB, GiB, and TiB are treated as binary units. Scientific notation is supported,\nenabling values like \"1.5e3MB\" (1500MB) or \"2e6GiB\" (2 million GiB).\n\nThe bytes symbol (b/B) in the unit is optional; omitting it will yield the same result (e.g., \"Mi\"\nand \"MiB\" are equivalent).", @@ -21634,6 +21822,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns the input string but with all characters in upper-case.", @@ -21762,6 +21951,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Decodes a URL-encoded input string.", @@ -21869,6 +22059,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Decodes the given URL query string into an object.", @@ -21997,6 +22188,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Encodes the input string into a URL-encoded string.", @@ -22125,6 +22317,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Encodes the given object into a URL encoded query string.", @@ -22163,6 +22356,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Parses the string value as an UUID and returns an object with the well-defined fields of the UUID if valid.", @@ -22282,6 +22476,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Returns a new UUIDv4.", @@ -22410,6 +22605,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Generates `[path, value]` tuples for all nested documents of `x` (recursively). Queries can use `walk` to traverse documents nested under `x`.", @@ -22517,6 +22713,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Verifies the input string is a valid YAML document.", @@ -22645,6 +22842,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Serializes the input term to YAML.", @@ -22773,6 +22971,7 @@ "v0.68.0", "v0.69.0", "v0.70.0", + "v1.0.0", "edge" ], "description": "Deserializes the input string.", diff --git a/capabilities.json b/capabilities.json index 862a4555f9..48a87b0c35 100644 --- a/capabilities.json +++ b/capabilities.json @@ -4819,12 +4819,6 @@ } } ], - "future_keywords": [ - "contains", - "every", - "if", - "in" - ], "wasm_abi_versions": [ { "version": 1, @@ -4836,8 +4830,6 @@ } ], "features": [ - "rule_head_ref_string_prefixes", - "rule_head_refs", - "rego_v1_import" + "rego_v1" ] } diff --git a/cmd/bench.go b/cmd/bench.go index a2e484ec44..2ec1e9a125 100644 --- a/cmd/bench.go +++ b/cmd/bench.go @@ -62,8 +62,9 @@ func newBenchmarkEvalParams() benchmarkCommandParams { evalPrettyOutput, benchmarkGoBenchOutput, }), - target: util.NewEnumFlag(compile.TargetRego, []string{compile.TargetRego, compile.TargetWasm}), - schema: &schemaFlags{}, + target: util.NewEnumFlag(compile.TargetRego, []string{compile.TargetRego, compile.TargetWasm}), + schema: &schemaFlags{}, + capabilities: newcapabilitiesFlag(), }, gracefulShutdownPeriod: 10, } diff --git a/cmd/build.go b/cmd/build.go index 1e3c6944cb..f6655c37ab 100644 --- a/cmd/build.go +++ b/cmd/build.go @@ -57,6 +57,17 @@ func newBuildParams() buildParams { } } +func (p *buildParams) regoVersion() ast.RegoVersion { + if p.v0Compatible { + // v0 takes precedence over v1 + return ast.RegoV0 + } + if p.v1Compatible { + return ast.RegoV1 + } + return ast.DefaultRegoVersion +} + func init() { buildParams := newBuildParams() @@ -292,7 +303,7 @@ func dobuild(params buildParams, args []string) error { if params.capabilities.C != nil { capabilities = params.capabilities.C } else { - capabilities = ast.CapabilitiesForThisVersion() + capabilities = ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(params.regoVersion())) } compiler := compile.New(). @@ -311,14 +322,7 @@ func dobuild(params buildParams, args []string) error { WithPartialNamespace(params.ns). WithFollowSymlinks(params.followSymlinks) - regoVersion := ast.DefaultRegoVersion - if params.v0Compatible { - // v0 takes precedence over v1 - regoVersion = ast.RegoV0 - } else if params.v1Compatible { - regoVersion = ast.RegoV1 - } - compiler = compiler.WithRegoVersion(regoVersion) + compiler = compiler.WithRegoVersion(params.regoVersion()) if params.revision.isSet { compiler = compiler.WithRevision(*params.revision.v) diff --git a/cmd/build_test.go b/cmd/build_test.go index f6361a1f95..8f99ba21ef 100644 --- a/cmd/build_test.go +++ b/cmd/build_test.go @@ -108,6 +108,7 @@ p { is_foo("bar") }`, caps: func() string { c := ast.CapabilitiesForThisVersion() c.FutureKeywords = []string{"in"} + c.Features = []string{} j, err := json.Marshal(c) if err != nil { panic(err) @@ -120,6 +121,23 @@ import future.keywords.in p if "opa" in input.tools`, err: "rego_parse_error: unexpected keyword, must be one of [in]", }, + { + note: "future kw NOT defined in caps, rego-v1 feature", + caps: func() string { + c := ast.CapabilitiesForThisVersion() + c.FutureKeywords = []string{"in"} + c.Features = []string{ast.FeatureRegoV1} + j, err := json.Marshal(c) + if err != nil { + panic(err) + } + return string(j) + }(), + policy: `package test +import future.keywords.if +import future.keywords.in +p if "opa" in input.tools`, + }, { note: "future kw are defined in caps", caps: func() string { @@ -136,6 +154,34 @@ import future.keywords.if import future.keywords.in p if "opa" in input.tools`, }, + { + note: "rego.v1 imported AND defined in capabilities", + caps: func() string { + c := ast.CapabilitiesForThisVersion() + c.Features = []string{ast.FeatureRegoV1Import} + j, err := json.Marshal(c) + if err != nil { + panic(err) + } + return string(j) + }(), + policy: `package test +import rego.v1`, + }, + { + note: "rego.v1 imported AND rego-v1 in capabilities", + caps: func() string { + c := ast.CapabilitiesForThisVersion() + c.Features = []string{ast.FeatureRegoV1} + j, err := json.Marshal(c) + if err != nil { + panic(err) + } + return string(j) + }(), + policy: `package test +import rego.v1`, + }, } // add same tests for bundle-mode == true: @@ -1098,7 +1144,48 @@ p contains 2 if { }, }, { - note: "multiple bundles with different rego-versions (v0-compatible)", + note: "multiple bundles with different rego-versions, v0-compatible", + v0Compatible: true, + roots: []string{"bundle1", "bundle2"}, + files: map[string]string{ + "bundle1/.manifest": `{ + "roots": ["test1"], + "rego_version": 0, + "file_rego_versions": { + "*/test2.rego": 1 + } +}`, + "bundle1/test1.rego": `package test1 +p[1] { + input.x == 1 +}`, + "bundle1/test2.rego": `package test1 +p contains 2 if { + input.x == 1 +}`, + "bundle2/.manifest": `{ + "roots": ["test2"], + "rego_version": 1, + "file_rego_versions": { + "*/test4.rego": 0 + } +}`, + "bundle2/test3.rego": `package test2 +p contains 3 if { + input.x == 1 +}`, + "bundle2/test4.rego": `package test2 +p[4] { + input.x == 1 +}`, + }, + expErrs: []string{ + // capabilities inferred from --v0-compatible doesn't include rego_v1 feature, which must be respected + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "multiple bundles with different rego-versions, v0-compatible, rego_v1 capabilities feature", v0Compatible: true, roots: []string{"bundle1", "bundle2"}, files: map[string]string{ @@ -1132,11 +1219,20 @@ p contains 3 if { p[4] { input.x == 1 }`, + "capabilities.json": func() string { + caps := ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)) + caps.Features = append(caps.Features, ast.FeatureRegoV1) + bs, err := json.Marshal(caps) + if err != nil { + t.Fatal(err) + } + return string(bs) + }(), }, expManifest: `{"revision":"","roots":["test1","test2"],"rego_version":0,"file_rego_versions":{"%ROOT%/bundle1/test2.rego":1,"%ROOT%/bundle2/test3.rego":1}}`, }, { - note: "multiple bundles with different rego-versions (v1-compatible)", + note: "multiple bundles with different rego-versions, v1-compatible", v1Compatible: true, roots: []string{"bundle1", "bundle2"}, files: map[string]string{ @@ -1184,6 +1280,10 @@ p[4] { params.v0Compatible = tc.v0Compatible params.v1Compatible = tc.v1Compatible + if _, ok := tc.files["capabilities.json"]; ok { + _ = params.capabilities.Set(path.Join(root, "capabilities.json")) + } + var roots []string if len(tc.roots) == 0 { roots = []string{root} @@ -1259,6 +1359,7 @@ func TestBuildBundleFromOtherBundles(t *testing.T) { note string v0Compatible bool v1Compatible bool + capabilities *ast.Capabilities bundles map[string]bundleInfo expBundle bundleInfo expErrs []string @@ -1520,6 +1621,37 @@ p { "policy_1.rego": `package test q contains 1 if { input.x == 1 +}`, + }, + }, + expErrs: []string{ + // capabilities inferred from --v0-compatible doesn't include rego_v1 feature, which must be respected + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "single v1 bundle, v0 per-file override, --v0-compatible, rego_v1 capabilities feature", + v0Compatible: true, + capabilities: func() *ast.Capabilities { + caps := ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)) + caps.Features = append(caps.Features, ast.FeatureRegoV1) + return caps + }(), + bundles: map[string]bundleInfo{ + "bundle.tar.gz": { + ".manifest": `{ + "rego_version": 1, + "file_rego_versions": { + "/policy_0.rego": 0 + } +}`, + "policy_0.rego": `package test +p { + input.x == 1 +}`, + "policy_1.rego": `package test +q contains 1 if { + input.x == 1 }`, }, }, @@ -1558,6 +1690,35 @@ p { "policy.rego": `package test2 q contains 1 if { input.x == 1 +}`, + }, + }, + expErrs: []string{ + // capabilities inferred from --v0-compatible doesn't include rego_v1 feature, which must be respected + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v0 bundle + v1 bundle, --v0-compatible, rego_v1 capabilities feature", + v0Compatible: true, + capabilities: func() *ast.Capabilities { + caps := ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)) + caps.Features = append(caps.Features, ast.FeatureRegoV1) + return caps + }(), + bundles: map[string]bundleInfo{ + "bundle_v0.tar.gz": { + ".manifest": `{"roots": ["test1"], "rego_version": 0}`, + "policy.rego": `package test1 +p { + input.x == 1 +}`, + }, + "bundle_v1.tar.gz": { + ".manifest": `{"roots": ["test2"], "rego_version": 1}`, + "policy.rego": `package test2 +q contains 1 if { + input.x == 1 }`, }, }, @@ -1649,6 +1810,10 @@ q contains 1 if { params.v0Compatible = tc.v0Compatible params.v1Compatible = tc.v1Compatible + if tc.capabilities != nil { + params.capabilities.C = tc.capabilities + } + err := dobuild(params, roots) if tc.expErrs != nil { if err == nil { @@ -1837,6 +2002,298 @@ p contains x if { } } +func TestBuildWithRegoV1Capability(t *testing.T) { + tests := []struct { + note string + v0Compatible bool + capabilities *ast.Capabilities + files map[string]string + expFiles map[string]string + expErrs []string + }{ + { + note: "v0 module, v0-compatible, no capabilities", + v0Compatible: true, + files: map[string]string{ + "test.rego": `package test + p[x] { + x := 42 + }`, + }, + expFiles: map[string]string{ + ".manifest": `{"revision":"","roots":[""],"rego_version":0} +`, + "test.rego": `package test + +p[x] { + x := 42 +} +`, + }, + }, + { + note: "v0 module, v0-compatible, v0 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + files: map[string]string{ + "test.rego": `package test + p[x] { + x := 42 + }`, + }, + expFiles: map[string]string{ + ".manifest": `{"revision":"","roots":[""],"rego_version":0} +`, + "test.rego": `package test + +p[x] { + x := 42 +} +`, + }, + }, + { + note: "v0 module, v0-compatible, v1 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + files: map[string]string{ + "test.rego": `package test + p[x] { + x := 42 + }`, + }, + expFiles: map[string]string{ + ".manifest": `{"revision":"","roots":[""],"rego_version":0} +`, + "test.rego": `package test + +p[x] { + x := 42 +} +`, + }, + }, + + { + note: "v0 module, not v0-compatible, no capabilities", + files: map[string]string{ + "test.rego": `package test + p[x] { + x := 42 + }`, + }, + expErrs: []string{ + "test.rego:2: rego_parse_error: `if` keyword is required before rule body", + "test.rego:2: rego_parse_error: `contains` keyword is required for partial set rules", + }, + }, + { + note: "v0 module, not v0-compatible, v0 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + files: map[string]string{ + "test.rego": `package test + p[x] { + x := 42 + }`, + }, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v0 module, not v0-compatible, v1 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + files: map[string]string{ + "test.rego": `package test + p[x] { + x := 42 + }`, + }, + expErrs: []string{ + "test.rego:2: rego_parse_error: `if` keyword is required before rule body", + "test.rego:2: rego_parse_error: `contains` keyword is required for partial set rules", + }, + }, + + { + note: "v1 module, v0-compatible, no capabilities", + v0Compatible: true, + files: map[string]string{ + "test.rego": `package test + + p contains x if { + x := 42 + }`, + }, + expErrs: []string{ + "test.rego:3: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, v0-compatible, v0 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + files: map[string]string{ + "test.rego": `package test + + p contains x if { + x := 42 + }`, + }, + expErrs: []string{ + "test.rego:3: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, v0-compatible, v1 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + files: map[string]string{ + "test.rego": `package test + + p contains x if { + x := 42 + }`, + }, + expErrs: []string{ + "test.rego:3: rego_parse_error: var cannot be used for rule name", + }, + }, + + { + note: "v1 module, not v0-compatible, no capabilities", + files: map[string]string{ + "test.rego": `package test + + p contains x if { + x := 42 + }`, + }, + expFiles: map[string]string{ + ".manifest": `{"revision":"","roots":[""],"rego_version":1} +`, + "test.rego": `package test + +p contains x if { + x := 42 +} +`, + }, + }, + { + note: "v1 module, not v0-compatible, v0 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + files: map[string]string{ + "test.rego": `package test + + p contains x if { + x := 42 + }`, + }, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v1 module, not v0-compatible, v1 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + files: map[string]string{ + "test.rego": `package test + + p contains x if { + x := 42 + }`, + }, + expFiles: map[string]string{ + ".manifest": `{"revision":"","roots":[""],"rego_version":1} +`, + "test.rego": `package test + +p contains x if { + x := 42 +} +`, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.note, func(t *testing.T) { + test.WithTempFS(tc.files, func(root string) { + params := newBuildParams() + params.outputFile = path.Join(root, "bundle.tar.gz") + params.v0Compatible = tc.v0Compatible + params.capabilities.C = tc.capabilities + + err := dobuild(params, []string{root}) + + if len(tc.expErrs) > 0 { + if err == nil { + t.Fatal("expected error but got nil") + } + for _, expErr := range tc.expErrs { + if !strings.Contains(err.Error(), expErr) { + t.Fatalf("expected error:\n\n%v\n\ngot:\n\n%v", expErr, err) + } + } + } else { + if err != nil { + t.Fatal(err) + } + + fl := loader.NewFileLoader() + _, err = fl.AsBundle(params.outputFile) + if err != nil { + t.Fatal(err) + } + + // Check that manifest is not written given no input manifest and no other flags + f, err := os.Open(params.outputFile) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + gr, err := gzip.NewReader(f) + if err != nil { + t.Fatal(err) + } + + tr := tar.NewReader(gr) + + foundFiles := map[string]struct{}{} + for { + f, err := tr.Next() + if err == io.EOF { + break + } else if err != nil { + t.Fatal(err) + } + foundFiles[path.Base(f.Name)] = struct{}{} + expectedFile := tc.expFiles[path.Base(f.Name)] + if expectedFile != "" { + data, err := io.ReadAll(tr) + if err != nil { + t.Fatal(err) + } + actualFile := string(data) + if actualFile != expectedFile { + t.Fatalf("expected file %s to be:\n\n%v\n\ngot:\n\n%v", f.Name, expectedFile, actualFile) + } + } + } + + for expectedFile := range tc.expFiles { + if _, ok := foundFiles[expectedFile]; !ok { + t.Fatalf("expected file %s not found in bundle, got: %v", expectedFile, foundFiles) + } + } + } + }) + }) + } +} + func TestBuildWithCompatibleFlags(t *testing.T) { tests := []struct { note string diff --git a/cmd/capabilities.go b/cmd/capabilities.go index bbcf4fc6ce..e031671a1c 100644 --- a/cmd/capabilities.go +++ b/cmd/capabilities.go @@ -15,9 +15,17 @@ import ( ) type capabilitiesParams struct { - showCurrent bool - version string - file string + showCurrent bool + version string + file string + v0Compatible bool +} + +func (p *capabilitiesParams) regoVersion() ast.RegoVersion { + if p.v0Compatible { + return ast.RegoV0 + } + return ast.DefaultRegoVersion } func init() { @@ -84,7 +92,8 @@ Print the capabilities of a capabilities file } capabilitiesCommand.Flags().BoolVar(&capabilitiesParams.showCurrent, "current", false, "print current capabilities") capabilitiesCommand.Flags().StringVar(&capabilitiesParams.version, "version", "", "print capabilities of a specific version") - capabilitiesCommand.Flags().StringVar(&capabilitiesParams.file, "file", "", "print current capabilities") + capabilitiesCommand.Flags().StringVar(&capabilitiesParams.file, "file", "", "print capabilities defined by a file") + addV0CompatibleFlag(capabilitiesCommand.Flags(), &capabilitiesParams.v0Compatible, false) RootCommand.AddCommand(capabilitiesCommand) } @@ -100,7 +109,7 @@ func doCapabilities(params capabilitiesParams) (string, error) { } else if len(params.file) > 0 { c, err = ast.LoadCapabilitiesFile(params.file) } else if params.showCurrent { - c = ast.CapabilitiesForThisVersion() + c = ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(params.regoVersion())) } else { return showVersions() } diff --git a/cmd/capabilities_test.go b/cmd/capabilities_test.go index 39f8a4a1f0..18162abdfc 100644 --- a/cmd/capabilities_test.go +++ b/cmd/capabilities_test.go @@ -5,9 +5,13 @@ package cmd import ( + "bytes" "path" + "reflect" + "sort" "testing" + "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/util/test" ) @@ -74,3 +78,64 @@ func TestCapabilitiesFile(t *testing.T) { }) } + +func TestCapabilitiesCurrent(t *testing.T) { + tests := []struct { + note string + v0Compatible bool + expFeatures []string + expFutureKeywords []string + }{ + { + note: "current", + expFeatures: []string{ + ast.FeatureRegoV1, + }, + }, + { + note: "current --v0-compatible", + v0Compatible: true, + expFeatures: []string{ + ast.FeatureRefHeadStringPrefixes, + ast.FeatureRefHeads, + ast.FeatureRegoV1Import, + }, + expFutureKeywords: []string{ + "in", + "every", + "contains", + "if", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.note, func(t *testing.T) { + // These are sorted in the output + sort.Strings(tc.expFutureKeywords) + sort.Strings(tc.expFeatures) + + params := capabilitiesParams{ + showCurrent: true, + v0Compatible: tc.v0Compatible, + } + capsStr, err := doCapabilities(params) + if err != nil { + t.Fatal("expected success", err) + } + + caps, err := ast.LoadCapabilitiesJSON(bytes.NewReader([]byte(capsStr))) + if err != nil { + t.Fatal("expected success", err) + } + + if !reflect.DeepEqual(caps.Features, tc.expFeatures) { + t.Errorf("expected features:\n\n%v\n\nbut got:\n\n%v", tc.expFeatures, caps.Features) + } + + if !reflect.DeepEqual(caps.FutureKeywords, tc.expFutureKeywords) { + t.Errorf("expected future keywords:\n\n%v\n\nbut got:\n\n%v", tc.expFutureKeywords, caps.FutureKeywords) + } + }) + } +} diff --git a/cmd/check.go b/cmd/check.go index ebd2b5e768..2db3233999 100644 --- a/cmd/check.go +++ b/cmd/check.go @@ -73,7 +73,7 @@ func checkModules(params checkParams, args []string) error { if params.capabilities.C != nil { capabilities = params.capabilities.C } else { - capabilities = ast.CapabilitiesForThisVersion() + capabilities = ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(params.regoVersion())) } ss, err := loader.Schemas(params.schema.path) @@ -194,6 +194,7 @@ func init() { addCapabilitiesFlag(checkCommand.Flags(), checkParams.capabilities) addSchemaFlags(checkCommand.Flags(), checkParams.schema) addStrictFlag(checkCommand.Flags(), &checkParams.strict, false) + // FIXME: Rename or add new flag with same effect? '--rego-v1' will become even more confusing in 1.0, as what it actually means is check that module is compatible with BOTH v0 and v1. addRegoV1FlagWithDescription(checkCommand.Flags(), &checkParams.regoV1, false, "check for Rego v1 compatibility (policies must also be compatible with current OPA version)") addV0CompatibleFlag(checkCommand.Flags(), &checkParams.v0Compatible, false) diff --git a/cmd/check_test.go b/cmd/check_test.go index d851e93240..64f5b7e48d 100644 --- a/cmd/check_test.go +++ b/cmd/check_test.go @@ -54,6 +54,7 @@ p { is_foo("bar") }`, caps: func() string { c := ast.CapabilitiesForThisVersion() c.FutureKeywords = []string{"in"} + c.Features = []string{} j, err := json.Marshal(c) if err != nil { panic(err) @@ -66,6 +67,23 @@ import future.keywords.in p if "opa" in input.tools`, err: "rego_parse_error: unexpected keyword, must be one of [in]", }, + { + note: "future kw NOT defined in caps, rego-v1 feature", + caps: func() string { + c := ast.CapabilitiesForThisVersion() + c.FutureKeywords = []string{"in"} + c.Features = []string{ast.FeatureRegoV1} + j, err := json.Marshal(c) + if err != nil { + panic(err) + } + return string(j) + }(), + policy: `package test +import future.keywords.if +import future.keywords.in +p if "opa" in input.tools`, + }, { note: "future kw are defined in caps", caps: func() string { @@ -109,6 +127,20 @@ import rego.v1`, return string(j) }(), policy: `package test +import rego.v1`, + }, + { + note: "rego.v1 imported AND rego-v1 in capabilities", + caps: func() string { + c := ast.CapabilitiesForThisVersion() + c.Features = []string{ast.FeatureRegoV1} + j, err := json.Marshal(c) + if err != nil { + panic(err) + } + return string(j) + }(), + policy: `package test import rego.v1`, }, } @@ -461,6 +493,170 @@ a contains x if { } } +func TestCheckWithRegoV1Capability(t *testing.T) { + cases := []struct { + note string + v0Compatible bool + capabilities *ast.Capabilities + policy string + expErrs []string + }{ + { + note: "v0 module, v0-compatible, no capabilities", + v0Compatible: true, + policy: `package test +a[x] { + x := 42 +}`, + }, + { + note: "v0 module, v0-compatible, v0 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + policy: `package test +a[x] { + x := 42 +}`, + }, + { + note: "v0 module, v0-compatible, v1 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + policy: `package test +a[x] { + x := 42 +}`, + }, + + { + note: "v0 module, not v0-compatible, no capabilities", + policy: `package test +a[x] { + x := 42 +}`, + expErrs: []string{ + "test.rego:2: rego_parse_error: `if` keyword is required before rule body", + "test.rego:2: rego_parse_error: `contains` keyword is required for partial set rules", + }, + }, + { + note: "v0 module, not v0-compatible, v0 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + policy: `package test +a[x] { + x := 42 +}`, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v0 module, not v0-compatible, v1 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + policy: `package test +a[x] { + x := 42 +}`, + expErrs: []string{ + "test.rego:2: rego_parse_error: `if` keyword is required before rule body", + "test.rego:2: rego_parse_error: `contains` keyword is required for partial set rules", + }, + }, + + { + note: "v1 module, v0-compatible, no capabilities", + v0Compatible: true, + policy: `package test +a contains x if { + x := 42 +}`, + expErrs: []string{ + "test.rego:2: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, v0-compatible, v0 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + policy: `package test +a contains x if { + x := 42 +}`, + expErrs: []string{ + "test.rego:2: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, v0-compatible, v1 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + policy: `package test +a contains x if { + x := 42 +}`, + expErrs: []string{ + "test.rego:2: rego_parse_error: var cannot be used for rule name", + }, + }, + + { + note: "v1 module, not v0-compatible, no capabilities", + policy: `package test +a contains x if { + x := 42 +}`, + }, + { + note: "v1 module, not v0-compatible, v0 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + policy: `package test +a contains x if { + x := 42 +}`, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v1 module, not v0-compatible, v1 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + policy: `package test +a contains x if { + x := 42 +}`, + }, + } + + for _, tc := range cases { + t.Run(tc.note, func(t *testing.T) { + files := map[string]string{ + "test.rego": tc.policy, + } + + test.WithTempFS(files, func(root string) { + params := newCheckParams() + params.v0Compatible = tc.v0Compatible + params.capabilities.C = tc.capabilities + + err := checkModules(params, []string{root}) + switch { + case err != nil && len(tc.expErrs) > 0: + for _, expErr := range tc.expErrs { + if !strings.Contains(err.Error(), expErr) { + t.Fatalf("expected err:\n\n%v\n\ngot:\n\n%v", expErr, err) + } + } + return // don't read back bundle below + case err != nil && len(tc.expErrs) == 0: + t.Fatalf("unexpected error: %v", err) + case err == nil && len(tc.expErrs) > 0: + t.Fatalf("expected error:\n\n%v\n\ngot: none", tc.expErrs) + } + }) + }) + } +} + func TestCheckCompatibleFlags(t *testing.T) { cases := []struct { note string diff --git a/cmd/eval.go b/cmd/eval.go index d6ddff6045..be9261b2c6 100644 --- a/cmd/eval.go +++ b/cmd/eval.go @@ -705,8 +705,10 @@ func setupEval(args []string, params evalCommandParams) (*evalContext, error) { regoArgs = append(regoArgs, rego.BuiltinErrorList(&builtInErrors)) } - if params.capabilities != nil { + if params.capabilities.C != nil { regoArgs = append(regoArgs, rego.Capabilities(params.capabilities.C)) + } else { + regoArgs = append(regoArgs, rego.Capabilities(ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(params.regoVersion())))) } if params.strict { @@ -878,7 +880,7 @@ func generateOptimizedBundle(params evalCommandParams, asBundle bool, filter loa if params.capabilities.C != nil { capabilities = params.capabilities.C } else { - capabilities = ast.CapabilitiesForThisVersion() + capabilities = ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(params.regoVersion())) } compiler := compile.New(). diff --git a/cmd/eval_test.go b/cmd/eval_test.go index 86826fb3cc..21cbfb02a9 100755 --- a/cmd/eval_test.go +++ b/cmd/eval_test.go @@ -2907,6 +2907,224 @@ func TestEvalPolicyWithCompatibleFlags(t *testing.T) { } } +func TestEvalPolicyWithRegoV1Capability(t *testing.T) { + tests := []struct { + note string + v0Compatible bool + capabilities *ast.Capabilities + modules map[string]string + expErrs []string + }{ + { + note: "v0 module, v0-compatible, no capabilities", + v0Compatible: true, + modules: map[string]string{ + "test.rego": `package test + allow { + 1 < 2 + }`, + }, + }, + { + note: "v0 module, v0-compatible, v0 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + modules: map[string]string{ + "test.rego": `package test + allow { + 1 < 2 + }`, + }, + }, + { + note: "v0 module, v0-compatible, v1 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + modules: map[string]string{ + "test.rego": `package test + allow { + 1 < 2 + }`, + }, + }, + { + note: "v0 module, not v0-compatible, no capabilities", + v0Compatible: false, + modules: map[string]string{ + "test.rego": `package test + allow { + 1 < 2 + }`, + }, + expErrs: []string{ + "test.rego:2: rego_parse_error: `if` keyword is required before rule body", + }, + }, + { + note: "v0 module, not v0-compatible, v0 capabilities", + v0Compatible: false, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + modules: map[string]string{ + "test.rego": `package test + allow { + 1 < 2 + }`, + }, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v0 module, not v0-compatible, v1 capabilities", + v0Compatible: false, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + modules: map[string]string{ + "test.rego": `package test + allow { + 1 < 2 + }`, + }, + expErrs: []string{ + "test.rego:2: rego_parse_error: `if` keyword is required before rule body", + }, + }, + + { + note: "v1 module, v0-compatible, no capabilities", + v0Compatible: true, + modules: map[string]string{ + "test.rego": `package test + allow if { + 1 < 2 + }`, + }, + expErrs: []string{ + "test.rego:2: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, v0-compatible, v0 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + modules: map[string]string{ + "test.rego": `package test + allow if { + 1 < 2 + }`, + }, + expErrs: []string{ + "test.rego:2: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, v0-compatible, v1 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + modules: map[string]string{ + "test.rego": `package test + allow if { + 1 < 2 + }`, + }, + expErrs: []string{ + "test.rego:2: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, not v0-compatible, no capabilities", + v0Compatible: false, + modules: map[string]string{ + "test.rego": `package test + allow if { + 1 < 2 + }`, + }, + }, + { + note: "v1 module, not v0-compatible, v0 capabilities", + v0Compatible: false, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + modules: map[string]string{ + "test.rego": `package test + allow if { + 1 < 2 + }`, + }, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v1 module, not v0-compatible, v1 capabilities", + v0Compatible: false, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + modules: map[string]string{ + "test.rego": `package test + allow if { + 1 < 2 + }`, + }, + }, + } + + setup := []struct { + name string + commandParams func(params *evalCommandParams, path string) + }{ + { + name: "Files", + commandParams: func(params *evalCommandParams, path string) { + params.dataPaths = newrepeatedStringFlag([]string{path}) + }, + }, + { + name: "Bundle", + commandParams: func(params *evalCommandParams, path string) { + if err := params.bundlePaths.Set(path); err != nil { + t.Fatal(err) + } + }, + }, + } + + for _, s := range setup { + for _, tc := range tests { + t.Run(fmt.Sprintf("%s: %s", s.name, tc.note), func(t *testing.T) { + test.WithTempFS(tc.modules, func(path string) { + params := newEvalCommandParams() + s.commandParams(¶ms, path) + _ = params.outputFormat.Set(evalPrettyOutput) + params.v0Compatible = tc.v0Compatible + params.capabilities.C = tc.capabilities + + var buf bytes.Buffer + + defined, err := eval([]string{"data.test.allow"}, params, &buf) + + if len(tc.expErrs) > 0 { + if err == nil { + t.Fatal("expected error, got none") + } + + actual := buf.String() + for _, expErr := range tc.expErrs { + if !strings.Contains(actual, expErr) { + t.Fatalf("expected error:\n\n%v\n\ngot\n\n%v", expErr, actual) + } + } + } else { + if err != nil { + t.Fatalf("Unexpected error: %v, buf: %s", err, buf.String()) + } else if !defined { + t.Fatal("expected result to be defined") + } + } + }) + }) + } + } +} + func TestEvalPolicyWithBundleRegoVersion(t *testing.T) { tests := []struct { note string diff --git a/cmd/inspect_test.go b/cmd/inspect_test.go index 4c23f0d5a9..1601acc299 100644 --- a/cmd/inspect_test.go +++ b/cmd/inspect_test.go @@ -54,7 +54,7 @@ func TestDoInspect(t *testing.T) { } res := `{ - "capabilities": {}, + "capabilities": {"features": ["rego_v1"]}, "manifest": {"revision": "rev", "roots": ["foo", "bar", "fuz", "baz", "a", "x"]}, "signatures_config": {}, "namespaces": {"data": ["/data.json"], "data.foo": ["/example/foo.rego"]} @@ -63,7 +63,7 @@ func TestDoInspect(t *testing.T) { exp := util.MustUnmarshalJSON([]byte(res)) result := util.MustUnmarshalJSON(out.Bytes()) if !reflect.DeepEqual(exp, result) { - t.Fatalf("expected inspect output to be %v, got %v", exp, result) + t.Fatalf("expected inspect output to be:\n\n%v\n\ngot:\n\n%v", exp, result) } }) } @@ -1075,6 +1075,9 @@ p if { "type": "function" } } + ], + "features": [ + "rego_v1" ] } }`, @@ -1084,7 +1087,6 @@ p if { note: "known ref replaced inside 'with' stmt", files: [][2]string{ {"/policy.rego", `package test -import rego.v1 foo.bar(_) := false @@ -1113,7 +1115,7 @@ test_p if { }, "capabilities": { "features": [ - "rego_v1_import" + "rego_v1" ] } }`, @@ -1122,7 +1124,6 @@ test_p if { note: "unknown ref replaced inside 'with' stmt", files: [][2]string{ {"/policy.rego", `package test -import rego.v1 p if { data.foo.bar(42) @@ -1149,7 +1150,7 @@ test_p if { }, "capabilities": { "features": [ - "rego_v1_import" + "rego_v1" ] } }`, @@ -1158,7 +1159,6 @@ test_p if { note: "unknown built-in (var) replaced inside 'with' stmt", files: [][2]string{ {"/policy.rego", `package test -import rego.v1 p if { foo(42) @@ -1185,7 +1185,7 @@ test_p if { }, "capabilities": { "features": [ - "rego_v1_import" + "rego_v1" ] } }`, @@ -1194,7 +1194,6 @@ test_p if { note: "unknown built-in (ref) replaced inside 'with' stmt", files: [][2]string{ {"/policy.rego", `package test -import rego.v1 p if { foo.bar(42) @@ -1221,7 +1220,7 @@ test_p if { }, "capabilities": { "features": [ - "rego_v1_import" + "rego_v1" ] } }`, @@ -1230,7 +1229,6 @@ test_p if { note: "call replaced by unknown data ref inside 'with' stmt", files: [][2]string{ {"/policy.rego", `package test -import rego.v1 p if { foo(42) @@ -1277,7 +1275,7 @@ test_p if { } ], "features": [ - "rego_v1_import" + "rego_v1" ] } }`, @@ -1286,7 +1284,6 @@ test_p if { note: "call replaced by unknown built-in (var) inside 'with' stmt", files: [][2]string{ {"/policy.rego", `package test -import rego.v1 p if { foo(42) @@ -1314,7 +1311,7 @@ test_p if { }, "capabilities": { "features": [ - "rego_v1_import" + "rego_v1" ] } }`, @@ -1323,7 +1320,6 @@ test_p if { note: "call replaced by unknown built-in (ref) inside 'with' stmt", files: [][2]string{ {"/policy.rego", `package test -import rego.v1 p if { foo(42) @@ -1371,7 +1367,7 @@ test_p if { } ], "features": [ - "rego_v1_import" + "rego_v1" ] } }`, @@ -1509,6 +1505,9 @@ p if { }, "infix": "==" } + ], + "features": [ + "rego_v1" ] } }`) diff --git a/cmd/test.go b/cmd/test.go index c7db7eae6e..f271395c6c 100644 --- a/cmd/test.go +++ b/cmd/test.go @@ -116,11 +116,17 @@ func opaTest(args []string, testParams testCommandParams) (int, error) { var bundles map[string]*bundle.Bundle var store storage.Store + popts := ast.ParserOptions{ + RegoVersion: testParams.RegoVersion(), + Capabilities: testParams.capabilities.C, + ProcessAnnotation: true, + } + if testParams.bundleMode { - bundles, err = tester.LoadBundlesWithRegoVersion(args, filter.Apply, testParams.RegoVersion()) + bundles, err = tester.LoadBundlesWithParserOptions(args, filter.Apply, popts) store = inmem.NewWithOpts(inmem.OptRoundTripOnWrite(false)) } else { - modules, store, err = tester.LoadWithRegoVersion(args, filter.Apply, testParams.RegoVersion()) + modules, store, err = tester.LoadWithParserOptions(args, filter.Apply, popts) } if err != nil { diff --git a/cmd/test_test.go b/cmd/test_test.go index ead9b48521..b8f438f796 100644 --- a/cmd/test_test.go +++ b/cmd/test_test.go @@ -2578,6 +2578,318 @@ test_l if { } } +func TestRunWithRegoV1Capability(t *testing.T) { + tests := []struct { + note string + v0Compatible bool + capabilities *ast.Capabilities + files map[string]string + expErrs []string + }{ + { + note: "v0 module, v0-compatible, no capabilities", + v0Compatible: true, + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2[v] { + v := l1[_] +} + +test_l { + l1 == l2 +}`, + }, + }, + { + note: "v0 module, v0-compatible, v0 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2[v] { + v := l1[_] +} + +test_l { + l1 == l2 +}`, + }, + }, + { + note: "v0 module, v0-compatible, v1 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2[v] { + v := l1[_] +} + +test_l { + l1 == l2 +}`, + }, + }, + + { + note: "v0 module, not v0-compatible, no capabilities", + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2[v] { + v := l1[_] +} + +test_l { + l1 == l2 +}`, + }, + expErrs: []string{ + "test.rego:4: rego_parse_error: `if` keyword is required before rule body", + "test.rego:4: rego_parse_error: `contains` keyword is required for partial set rules", + "test.rego:8: rego_parse_error: `if` keyword is required before rule body", + }, + }, + { + note: "v0 module, not v0-compatible, v0 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2[v] { + v := l1[_] +} + +test_l { + l1 == l2 +}`, + }, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v0 module, not v0-compatible, v1 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2[v] { + v := l1[_] +} + +test_l { + l1 == l2 +}`, + }, + expErrs: []string{ + "test.rego:4: rego_parse_error: `if` keyword is required before rule body", + "test.rego:4: rego_parse_error: `contains` keyword is required for partial set rules", + "test.rego:8: rego_parse_error: `if` keyword is required before rule body", + }, + }, + + { + note: "v1 module, v0-compatible, no capabilities", + v0Compatible: true, + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2 contains v if { + v := l1[_] +} + +test_l if { + l1 == l2 +}`, + }, + expErrs: []string{ + "test.rego:4: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, v0-compatible, v0 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2 contains v if { + v := l1[_] +} + +test_l if { + l1 == l2 +}`, + }, + expErrs: []string{ + "test.rego:4: rego_parse_error: var cannot be used for rule name", + }, + }, + { + note: "v1 module, v0-compatible, v1 capabilities", + v0Compatible: true, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2 contains v if { + v := l1[_] +} + +test_l if { + l1 == l2 +}`, + }, + expErrs: []string{ + "test.rego:4: rego_parse_error: var cannot be used for rule name", + }, + }, + + { + note: "v1 module, not v0-compatible, no capabilities", + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2 contains v if { + v := l1[_] +} + +test_l if { + l1 == l2 +}`, + }, + }, + { + note: "v1 module, not v0-compatible, v0 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2 contains v if { + v := l1[_] +} + +test_l if { + l1 == l2 +}`, + }, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v1 module, not v0-compatible, v1 capabilities", + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + files: map[string]string{ + "/test.rego": `package test + +l1 := {1, 3, 5} +l2 contains v if { + v := l1[_] +} + +test_l if { + l1 == l2 +}`, + }, + }, + } + + loadTypes := []loadType{loadFile, loadBundle, loadTarball} + + for _, tc := range tests { + for _, loadType := range loadTypes { + t.Run(fmt.Sprintf("%s (%s)", tc.note, loadType), func(t *testing.T) { + var files map[string]string + if loadType != loadTarball { + files = tc.files + } + test.WithTempFS(files, func(root string) { + if loadType == loadTarball { + f, err := os.Create(filepath.Join(root, "bundle.tar.gz")) + if err != nil { + t.Fatal(err) + } + + testBundle := bundle.Bundle{ + Data: map[string]interface{}{}, + } + for k, v := range tc.files { + testBundle.Modules = append(testBundle.Modules, bundle.ModuleFile{ + Path: k, + Raw: []byte(v), + }) + } + + if err := bundle.Write(f, testBundle); err != nil { + t.Fatal(err) + } + } + + var buf bytes.Buffer + var errBuf bytes.Buffer + + testParams := newTestCommandParams() + testParams.bundleMode = loadType == loadBundle + testParams.count = 1 + testParams.output = &buf + testParams.errOutput = &errBuf + testParams.v0Compatible = tc.v0Compatible + testParams.capabilities.C = tc.capabilities + + var paths []string + if loadType == loadTarball { + paths = []string{filepath.Join(root, "bundle.tar.gz")} + } else { + paths = []string{root} + } + + exitCode, _ := opaTest(paths, testParams) + if len(tc.expErrs) > 0 { + if exitCode == 0 { + t.Fatalf("expected non-zero exit code") + } + + for _, expErr := range tc.expErrs { + if actual := errBuf.String(); !strings.Contains(actual, expErr) { + t.Fatalf("expected error output to contain:\n\n%q\n\nbut got:\n\n%q", expErr, actual) + } + } + } else { + if exitCode != 0 { + t.Fatalf("unexpected exit code: %d", exitCode) + } + + if errBuf.Len() > 0 { + t.Fatalf("expected no error output but got:\n\n%q", buf.String()) + } + + expected := "PASS: 1/1" + if actual := buf.String(); !strings.Contains(actual, expected) { + t.Fatalf("expected output to contain:\n\n%s\n\nbut got:\n\n%q", expected, actual) + } + } + }) + }) + } + } +} + func TestRun_CompatibleFlags(t *testing.T) { tests := []struct { note string diff --git a/v1/ast/capabilities.go b/v1/ast/capabilities.go index b1491d7df4..e7d561d9e8 100644 --- a/v1/ast/capabilities.go +++ b/v1/ast/capabilities.go @@ -52,6 +52,7 @@ var minVersionIndex = func() VersionIndex { // heads, they wouldn't be able to parse them. const FeatureRefHeadStringPrefixes = "rule_head_ref_string_prefixes" const FeatureRefHeads = "rule_head_refs" +const FeatureRegoV1 = "rego_v1" const FeatureRegoV1Import = "rego_v1_import" // Capabilities defines a structure containing data that describes the capabilities @@ -83,8 +84,30 @@ type WasmABIVersion struct { Minor int `json:"minor_version"` } +type CapabilitiesOptions struct { + regoVersion RegoVersion +} + +func newCapabilitiesOptions(opts []CapabilitiesOption) CapabilitiesOptions { + co := CapabilitiesOptions{} + for _, opt := range opts { + opt(&co) + } + return co +} + +type CapabilitiesOption func(*CapabilitiesOptions) + +func CapabilitiesRegoVersion(regoVersion RegoVersion) CapabilitiesOption { + return func(o *CapabilitiesOptions) { + o.regoVersion = regoVersion + } +} + // CapabilitiesForThisVersion returns the capabilities of this version of OPA. -func CapabilitiesForThisVersion() *Capabilities { +func CapabilitiesForThisVersion(opts ...CapabilitiesOption) *Capabilities { + co := newCapabilitiesOptions(opts) + f := &Capabilities{} for _, vers := range capabilities.ABIVersions() { @@ -97,17 +120,29 @@ func CapabilitiesForThisVersion() *Capabilities { return f.Builtins[i].Name < f.Builtins[j].Name }) - for kw := range futureKeywords { - f.FutureKeywords = append(f.FutureKeywords, kw) - } - sort.Strings(f.FutureKeywords) + if co.regoVersion == RegoV0 || co.regoVersion == RegoV0CompatV1 { + for kw := range allFutureKeywords { + f.FutureKeywords = append(f.FutureKeywords, kw) + } - f.Features = []string{ - FeatureRefHeadStringPrefixes, - FeatureRefHeads, - FeatureRegoV1Import, + f.Features = []string{ + FeatureRefHeadStringPrefixes, + FeatureRefHeads, + FeatureRegoV1Import, + } + } else { + for kw := range futureKeywords { + f.FutureKeywords = append(f.FutureKeywords, kw) + } + + f.Features = []string{ + FeatureRegoV1, + } } + sort.Strings(f.FutureKeywords) + sort.Strings(f.Features) + return f } diff --git a/v1/ast/capabilities_test.go b/v1/ast/capabilities_test.go index 4ac499fa0a..29fab8df9f 100644 --- a/v1/ast/capabilities_test.go +++ b/v1/ast/capabilities_test.go @@ -9,25 +9,40 @@ import ( func TestParserCatchesIllegalCapabilities(t *testing.T) { tests := []struct { - note string - regoVersion RegoVersion + note string + regoVersion RegoVersion + capabilities Capabilities + expErr string }{ { - note: "v0", + note: "v0, bad future keyword", regoVersion: RegoV0, + capabilities: Capabilities{ + FutureKeywords: []string{"deadbeef"}, + }, + expErr: "illegal capabilities: unknown keyword: deadbeef", }, { - note: "v1", + note: "v1, bad future keyword", regoVersion: RegoV1, + capabilities: Capabilities{ + Features: []string{FeatureRegoV1}, + FutureKeywords: []string{"deadbeef"}, + }, + expErr: "illegal capabilities: unknown keyword: deadbeef", + }, + { + note: "v1, no rego_v1 feature", + regoVersion: RegoV1, + capabilities: Capabilities{}, + expErr: "illegal capabilities: rego_v1 feature required for parsing v1 Rego", }, } for _, tc := range tests { t.Run(tc.note, func(t *testing.T) { var opts ParserOptions - opts.Capabilities = &Capabilities{ - FutureKeywords: []string{"deadbeef"}, - } + opts.Capabilities = &tc.capabilities opts.RegoVersion = tc.regoVersion @@ -36,7 +51,7 @@ func TestParserCatchesIllegalCapabilities(t *testing.T) { t.Fatal("expected error") } else if errs, ok := err.(Errors); !ok || len(errs) != 1 { t.Fatal("expected exactly one error but got:", err) - } else if errs[0].Code != ParseErr || errs[0].Message != "illegal capabilities: unknown keyword: deadbeef" { + } else if errs[0].Code != ParseErr || errs[0].Message != tc.expErr { t.Fatal("unexpected error:", err) } }) diff --git a/v1/ast/compile.go b/v1/ast/compile.go index 91876a55e3..5f78b0da1f 100644 --- a/v1/ast/compile.go +++ b/v1/ast/compile.go @@ -117,7 +117,7 @@ type Compiler struct { // with the key being the generated name and value being the original. RewrittenVars map[Var]Var - // Capabliities required by the modules that were compiled. + // Capabilities required by the modules that were compiled. Required *Capabilities localvargen *localVarGenerator @@ -332,7 +332,7 @@ func NewCompiler() *Compiler { {"InitLocalVarGen", "compile_stage_init_local_var_gen", c.initLocalVarGen}, {"RewriteRuleHeadRefs", "compile_stage_rewrite_rule_head_refs", c.rewriteRuleHeadRefs}, {"CheckKeywordOverrides", "compile_stage_check_keyword_overrides", c.checkKeywordOverrides}, - {"CheckDuplicateImports", "compile_stage_check_duplicate_imports", c.checkDuplicateImports}, + {"CheckDuplicateImports", "compile_stage_check_imports", c.checkImports}, {"RemoveImports", "compile_stage_remove_imports", c.removeImports}, {"SetModuleTree", "compile_stage_set_module_tree", c.setModuleTree}, {"SetRuleTree", "compile_stage_set_rule_tree", c.setRuleTree}, // depends on RewriteRuleHeadRefs @@ -971,21 +971,46 @@ func (c *Compiler) buildRequiredCapabilities() { features := map[string]struct{}{} // extract required keywords from modules + keywords := map[string]struct{}{} futureKeywordsPrefix := Ref{FutureRootDocument, StringTerm("keywords")} for _, name := range c.sorted { for _, imp := range c.imports[name] { + mod := c.Modules[name] path := imp.Path.Value.(Ref) switch { case path.Equal(RegoV1CompatibleRef): - features[FeatureRegoV1Import] = struct{}{} + if !c.moduleIsRegoV1(mod) { + features[FeatureRegoV1Import] = struct{}{} + } case path.HasPrefix(futureKeywordsPrefix): if len(path) == 2 { - for kw := range futureKeywords { - keywords[kw] = struct{}{} + if c.moduleIsRegoV1(mod) { + for kw := range futureKeywords { + keywords[kw] = struct{}{} + } + } else { + for kw := range allFutureKeywords { + keywords[kw] = struct{}{} + } } } else { - keywords[string(path[2].Value.(String))] = struct{}{} + kw := string(path[2].Value.(String)) + if c.moduleIsRegoV1(mod) { + for allowedKw := range futureKeywords { + if kw == allowedKw { + keywords[kw] = struct{}{} + break + } + } + } else { + for allowedKw := range allFutureKeywords { + if kw == allowedKw { + keywords[kw] = struct{}{} + break + } + } + } } } } @@ -996,13 +1021,19 @@ func (c *Compiler) buildRequiredCapabilities() { // extract required features from modules for _, name := range c.sorted { - for _, rule := range c.Modules[name].Rules { - refLen := len(rule.Head.Reference) - if refLen >= 3 { - if refLen > len(rule.Head.Reference.ConstantPrefix()) { - features[FeatureRefHeads] = struct{}{} - } else { - features[FeatureRefHeadStringPrefixes] = struct{}{} + mod := c.Modules[name] + + if c.moduleIsRegoV1(mod) { + features[FeatureRegoV1] = struct{}{} + } else { + for _, rule := range mod.Rules { + refLen := len(rule.Head.Reference) + if refLen >= 3 { + if refLen > len(rule.Head.Reference.ConstantPrefix()) { + features[FeatureRefHeads] = struct{}{} + } else { + features[FeatureRefHeadStringPrefixes] = struct{}{} + } } } } @@ -1725,12 +1756,22 @@ func (c *Compiler) GetAnnotationSet() *AnnotationSet { return c.annotationSet } -func (c *Compiler) checkDuplicateImports() { +func (c *Compiler) checkImports() { modules := make([]*Module, 0, len(c.Modules)) + supportsRegoV1Import := c.capabilities.ContainsFeature(FeatureRegoV1Import) || + c.capabilities.ContainsFeature(FeatureRegoV1) + for _, name := range c.sorted { mod := c.Modules[name] - if c.strict || c.moduleIsRegoV1(mod) { + + for _, imp := range mod.Imports { + if !supportsRegoV1Import && Compare(imp.Path, RegoV1CompatibleRef) == 0 { + c.err(NewError(CompileErr, imp.Loc(), "rego.v1 import is not supported")) + } + } + + if c.strict || c.moduleIsRegoV1Compatible(mod) { modules = append(modules, mod) } } @@ -1744,7 +1785,7 @@ func (c *Compiler) checkDuplicateImports() { func (c *Compiler) checkKeywordOverrides() { for _, name := range c.sorted { mod := c.Modules[name] - if c.strict || c.moduleIsRegoV1(mod) { + if c.strict || c.moduleIsRegoV1Compatible(mod) { errs := checkRootDocumentOverrides(mod) for _, err := range errs { c.err(err) @@ -1756,6 +1797,23 @@ func (c *Compiler) checkKeywordOverrides() { func (c *Compiler) moduleIsRegoV1(mod *Module) bool { if mod.regoVersion == RegoUndefined { switch c.defaultRegoVersion { + case RegoUndefined: + c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module")) + return false + case RegoV1: + return true + } + return false + } + return mod.regoVersion == RegoV1 +} + +func (c *Compiler) moduleIsRegoV1Compatible(mod *Module) bool { + if mod.regoVersion == RegoUndefined { + switch c.defaultRegoVersion { + case RegoUndefined: + c.err(NewError(CompileErr, mod.Package.Loc(), "cannot determine rego version for module")) + return false case RegoV1, RegoV0CompatV1: return true } @@ -1896,6 +1954,9 @@ func (c *Compiler) rewriteRuleHeadRefs() { cannotSpeakStringPrefixRefs = false case FeatureRefHeads: cannotSpeakGeneralRefs = false + case FeatureRegoV1: + cannotSpeakStringPrefixRefs = false + cannotSpeakGeneralRefs = false } } @@ -5800,7 +5861,7 @@ func safetyErrorSlice(unsafe unsafeVars, rewritten map[Var]Var) (result Errors) v = w } if !v.IsGenerated() { - if _, ok := futureKeywords[string(v)]; ok { + if _, ok := allFutureKeywords[string(v)]; ok { result = append(result, NewError(UnsafeVarErr, pair.Loc, "var %[1]v is unsafe (hint: `import future.keywords.%[1]v` to import a future keyword)", v)) continue diff --git a/v1/ast/compile_test.go b/v1/ast/compile_test.go index a76bd277d9..1d52df8d8c 100644 --- a/v1/ast/compile_test.go +++ b/v1/ast/compile_test.go @@ -9463,9 +9463,10 @@ func TestCompilerBuildRequiredCapabilities(t *testing.T) { `, opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, builtins: []string{"eq", "gt"}, + features: []string{"rego_v1"}, }, { - note: "rego.v1 import", + note: "rego.v1 import, v0 module", module: ` package x @@ -9473,19 +9474,64 @@ func TestCompilerBuildRequiredCapabilities(t *testing.T) { p if { true } `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV0}}, features: []string{"rego_v1_import"}, }, { - note: "future.keywords wildcard", + note: "rego.v1 import, v1 module", + module: ` + package x + + import rego.v1 + + p if { true } + `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, + features: []string{"rego_v1"}, + }, + { + note: "rego.v1 import, default rego-version module (v1)", + module: ` + package x + + import rego.v1 + + p if { true } + `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, + features: []string{"rego_v1"}, + }, + { + note: "future.keywords wildcard, v0 module", module: ` package x import future.keywords `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV0}}, keywords: []string{"contains", "every", "if", "in"}, }, { - note: "future.keywords specific", + note: "future.keywords wildcard, v1 module", + module: ` + package x + + import future.keywords + `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, + features: []string{"rego_v1"}, + }, + { + note: "future.keywords wildcard, default rego-version module (v1)", + module: ` + package x + + import future.keywords + `, + features: []string{"rego_v1"}, + }, + { + note: "future.keywords specific, v0 module", module: ` package x @@ -9494,8 +9540,34 @@ func TestCompilerBuildRequiredCapabilities(t *testing.T) { import future.keywords.contains import future.keywords.every `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV0}}, keywords: []string{"contains", "every", "if", "in"}, }, + { + note: "future.keywords specific, v1 module", + module: ` + package x + + import future.keywords.in + import future.keywords.if + import future.keywords.contains + import future.keywords.every + `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, + features: []string{"rego_v1"}, + }, + { + note: "future.keywords specific, default rego-version module (v1)", + module: ` + package x + + import future.keywords.in + import future.keywords.if + import future.keywords.contains + import future.keywords.every + `, + features: []string{"rego_v1"}, + }, { note: "rewriting erases assignment", module: ` @@ -9505,6 +9577,7 @@ func TestCompilerBuildRequiredCapabilities(t *testing.T) { `, opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, builtins: []string{"assign", "eq"}, + features: []string{"rego_v1"}, }, { note: "rewriting erases equals", @@ -9515,6 +9588,7 @@ func TestCompilerBuildRequiredCapabilities(t *testing.T) { `, opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, builtins: []string{"eq", "equal"}, + features: []string{"rego_v1"}, }, { note: "rewriting erases print", @@ -9525,6 +9599,7 @@ func TestCompilerBuildRequiredCapabilities(t *testing.T) { `, opts: CompileOpts{EnablePrintStatements: true, ParserOptions: ParserOptions{RegoVersion: RegoV1}}, builtins: []string{"eq", "internal.print", "print"}, + features: []string{"rego_v1"}, }, { @@ -9536,16 +9611,69 @@ func TestCompilerBuildRequiredCapabilities(t *testing.T) { `, opts: CompileOpts{EnablePrintStatements: false, ParserOptions: ParserOptions{RegoVersion: RegoV1}}, builtins: []string{"print"}, // only print required because compiler will replace with true + features: []string{"rego_v1"}, }, { - note: "dots in the head", + note: "dots in the head, v0 module", module: ` package x a.b.c := 7 `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV0}}, features: []string{"rule_head_ref_string_prefixes"}, }, + { + note: "dots in the head, v1 module", + module: ` + package x + + a.b.c := 7 + `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, + features: []string{"rego_v1"}, // rego_v1 includes rule_head_ref_string_prefixes + }, + { + note: "dots in the head, default rego-version module (v1)", + module: ` + package x + + a.b.c := 7 + `, + features: []string{"rego_v1"}, // rego_v1 includes rule_head_ref_string_prefixes + }, + { + note: "dynamic dots in the head, v0 module", + module: ` + package x + + a[x].c[y] := z { x := "b"; y := "c"; z := "d" } + `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV0}}, + builtins: []string{"assign", "eq"}, + features: []string{"rule_head_refs"}, + }, + { + note: "dynamic dots in the head, v1 module", + module: ` + package x + + a[x].c[y] := z if { x := "b"; y := "c"; z := "d" } + `, + opts: CompileOpts{ParserOptions: ParserOptions{RegoVersion: RegoV1}}, + builtins: []string{"assign", "eq"}, + features: []string{"rego_v1"}, // rego_v1 includes rule_head_refs + }, + { + note: "dynamic dots in the head, default rego-version module (v1)", + module: ` + package x + + a[x].c[y] := z if { x := "b"; y := "c"; z := "d" } + `, + builtins: []string{"assign", "eq"}, + features: []string{"rego_v1"}, // rego_v1 includes rule_head_refs + }, } for _, tc := range tests { @@ -10273,6 +10401,14 @@ func TestCompilerCapabilitiesFeatures(t *testing.T) { module: `package test p.q.r := 42`, }, + { + note: "rego-v1 feature, ref-head rule", + features: []string{ + FeatureRegoV1, + }, + module: `package test + p.q.r := 42`, + }, { note: "string-prefix-ref-head feature, general-ref-head rule", features: []string{ @@ -10290,6 +10426,14 @@ func TestCompilerCapabilitiesFeatures(t *testing.T) { module: `package test p[q].r[s] := 42 if { q := "foo"; s := "bar" }`, }, + { + note: "rego-v1 feature, general-ref-head rule", + features: []string{ + FeatureRegoV1, + }, + module: `package test + p[q].r[s] := 42 if { q := "foo"; s := "bar" }`, + }, { note: "string-prefix-ref-head & ref-head features, general-ref-head rule", features: []string{ @@ -10299,6 +10443,16 @@ func TestCompilerCapabilitiesFeatures(t *testing.T) { module: `package test p[q].r[s] := 42 if { q := "foo"; s := "bar" }`, }, + { + note: "string-prefix-ref-head & ref-head & rego-v1 features, general-ref-head rule", + features: []string{ + FeatureRefHeadStringPrefixes, + FeatureRefHeads, + FeatureRegoV1, + }, + module: `package test + p[q].r[s] := 42 if { q := "foo"; s := "bar" }`, + }, { note: "string-prefix-ref-head & ref-head features, ref-head rule", features: []string{ @@ -10308,6 +10462,16 @@ func TestCompilerCapabilitiesFeatures(t *testing.T) { module: `package test p.q.r := 42`, }, + { + note: "string-prefix-ref-head & ref-head & rego-v1 features, ref-head rule", + features: []string{ + FeatureRefHeadStringPrefixes, + FeatureRefHeads, + FeatureRegoV1, + }, + module: `package test + p.q.r := 42`, + }, { note: "no features, string-prefix-ref-head with contains kw", features: []string{}, @@ -10334,6 +10498,15 @@ func TestCompilerCapabilitiesFeatures(t *testing.T) { import future.keywords.contains p.x contains 1`, }, + { + note: "rego-v1 feature, string-prefix-ref-head with contains kw", + features: []string{ + FeatureRegoV1, + }, + module: `package test + import future.keywords.contains + p.x contains 1`, + }, { note: "no features, general-ref-head with contains kw", @@ -10362,6 +10535,41 @@ func TestCompilerCapabilitiesFeatures(t *testing.T) { import future.keywords p[x] contains 1 if x = "foo"`, }, + { + note: "rego-v1 feature, general-ref-head with contains kw", + features: []string{ + FeatureRegoV1, + }, + module: `package test + import future.keywords + p[x] contains 1 if x = "foo"`, + }, + + { + note: "no features, rego.v1 import", + module: `package test + import rego.v1 + p if { true }`, + expectedErr: "rego_compile_error: rego.v1 import is not supported", + }, + { + note: "rego-v1-import feature, rego.v1 import", + module: `package test + import rego.v1 + p if { true }`, + features: []string{ + FeatureRegoV1Import, + }, + }, + { + note: "rego-v1-import feature, rego.v1 import", + module: `package test + import rego.v1 + p if { true }`, + features: []string{ + FeatureRegoV1, + }, + }, } for _, tc := range cases { @@ -10369,8 +10577,11 @@ func TestCompilerCapabilitiesFeatures(t *testing.T) { capabilities := CapabilitiesForThisVersion() capabilities.Features = tc.features + // Modules are parsed with full set of capabilities + mod := module(tc.module) + compiler := NewCompiler().WithCapabilities(capabilities) - compiler.Compile(map[string]*Module{"test": module(tc.module)}) + compiler.Compile(map[string]*Module{"test": mod}) if tc.expectedErr != "" { if !compiler.Failed() { t.Fatal("expected error but got success") diff --git a/v1/ast/parser.go b/v1/ast/parser.go index a537d8b67d..6639ca990b 100644 --- a/v1/ast/parser.go +++ b/v1/ast/parser.go @@ -283,7 +283,7 @@ func (p *Parser) parsedTermCachePush(t *Term, s0 *state) { func (p *Parser) futureParser() *Parser { q := *p q.s = p.save() - q.s.s = p.s.s.WithKeywords(futureKeywords) + q.s.s = p.s.s.WithKeywords(allFutureKeywords) q.cache = parsedTermCache{} return &q } @@ -301,7 +301,7 @@ func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) { var cpy map[string]tokens.Token q := *p q.s = p.save() - q.s.s, cpy = p.s.s.WithoutKeywords(futureKeywords) + q.s.s, cpy = p.s.s.WithoutKeywords(allFutureKeywords) q.cache = parsedTermCache{} return &q, cpy } @@ -312,30 +312,45 @@ func (p *Parser) presentParser() (*Parser, map[string]tokens.Token) { func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { if p.po.Capabilities == nil { - p.po.Capabilities = CapabilitiesForThisVersion() + p.po.Capabilities = CapabilitiesForThisVersion(CapabilitiesRegoVersion(p.po.RegoVersion)) } allowedFutureKeywords := map[string]tokens.Token{} if p.po.EffectiveRegoVersion() == RegoV1 { - // RegoV1 includes all future keywords in the default language definition - for k, v := range futureKeywords { + if !p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: "illegal capabilities: rego_v1 feature required for parsing v1 Rego", + Location: nil, + }, + } + } + + // rego-v1 includes all v0 future keywords in the default language definition + for k, v := range futureKeywordsV0 { allowedFutureKeywords[k] = v } - // For sake of error reporting, we still need to check that keywords in capabilities are known, for _, kw := range p.po.Capabilities.FutureKeywords { - if _, ok := futureKeywords[kw]; !ok { - return nil, nil, Errors{ - &Error{ - Code: ParseErr, - Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), - Location: nil, - }, + if tok, ok := futureKeywords[kw]; ok { + allowedFutureKeywords[kw] = tok + } else { + // For sake of error reporting, we still need to check that keywords in capabilities are known in v0 + if _, ok := futureKeywordsV0[kw]; !ok { + return nil, nil, Errors{ + &Error{ + Code: ParseErr, + Message: fmt.Sprintf("illegal capabilities: unknown keyword: %v", kw), + Location: nil, + }, + } } } } - // and that explicitly requested future keywords are known. + + // Check that explicitly requested future keywords are known. for _, kw := range p.po.FutureKeywords { if _, ok := allowedFutureKeywords[kw]; !ok { return nil, nil, Errors{ @@ -350,7 +365,7 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { } else { for _, kw := range p.po.Capabilities.FutureKeywords { var ok bool - allowedFutureKeywords[kw], ok = futureKeywords[kw] + allowedFutureKeywords[kw], ok = allFutureKeywords[kw] if !ok { return nil, nil, Errors{ &Error{ @@ -361,6 +376,13 @@ func (p *Parser) Parse() ([]Statement, []*Comment, Errors) { } } } + + if p.po.Capabilities.ContainsFeature(FeatureRegoV1) { + // rego-v1 includes all v0 future keywords in the default language definition + for k, v := range futureKeywordsV0 { + allowedFutureKeywords[k] = v + } + } } var err error @@ -2145,8 +2167,7 @@ func (p *Parser) illegal(note string, a ...interface{}) { tokType := "token" if tokens.IsKeyword(p.s.tok) { tokType = "keyword" - } - if _, ok := futureKeywords[p.s.tok.String()]; ok { + } else if _, ok := allFutureKeywords[p.s.tok.String()]; ok { tokType = "keyword" } @@ -2641,16 +2662,34 @@ func convertYAMLMapKeyTypes(x any, path []string) (any, error) { // futureKeywords is the source of truth for future keywords that will // eventually become standard keywords inside of Rego. -var futureKeywords = map[string]tokens.Token{ +var futureKeywords = map[string]tokens.Token{} + +// futureKeywordsV0 is the source of truth for future keywords that were +// not yet a standard part of Rego in v0, and required importing. +var futureKeywordsV0 = map[string]tokens.Token{ "in": tokens.In, "every": tokens.Every, "contains": tokens.Contains, "if": tokens.If, } +var allFutureKeywords map[string]tokens.Token + func IsFutureKeyword(s string) bool { - _, ok := futureKeywords[s] - return ok + return IsFutureKeywordForRegoVersion(s, RegoV1) +} + +func IsFutureKeywordForRegoVersion(s string, v RegoVersion) bool { + var yes bool + + switch v { + case RegoV0, RegoV0CompatV1: + _, yes = futureKeywordsV0[s] + case RegoV1: + _, yes = futureKeywords[s] + } + + return yes } func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]tokens.Token) { @@ -2666,11 +2705,6 @@ func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]toke return } - if p.s.s.RegoV1Compatible() { - p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef) - return - } - kwds := make([]string, 0, len(allowedFutureKeywords)) for k := range allowedFutureKeywords { kwds = append(kwds, k) @@ -2700,7 +2734,7 @@ func (p *Parser) futureImport(imp *Import, allowedFutureKeywords map[string]toke } func (p *Parser) regoV1Import(imp *Import) { - if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) { + if !p.po.Capabilities.ContainsFeature(FeatureRegoV1Import) && !p.po.Capabilities.ContainsFeature(FeatureRegoV1) { p.errorf(imp.Path.Location, "invalid import, `%s` is not supported by current capabilities", RegoV1CompatibleRef) return } @@ -2724,19 +2758,23 @@ func (p *Parser) regoV1Import(imp *Import) { } // import all future keywords with the rego.v1 import - kwds := make([]string, 0, len(futureKeywords)) - for k := range futureKeywords { + kwds := make([]string, 0, len(futureKeywordsV0)) + for k := range futureKeywordsV0 { kwds = append(kwds, k) } - if p.s.s.HasKeyword(futureKeywords) && !p.s.s.RegoV1Compatible() { - // We have imported future keywords, but they didn't come from another `rego.v1` import. - p.errorf(imp.Path.Location, "the `%s` import implies `future.keywords`, these are therefore mutually exclusive", RegoV1CompatibleRef) - return - } - p.s.s.SetRegoV1Compatible() for _, kw := range kwds { - p.s.s.AddKeyword(kw, futureKeywords[kw]) + p.s.s.AddKeyword(kw, futureKeywordsV0[kw]) + } +} + +func init() { + allFutureKeywords = map[string]tokens.Token{} + for k, v := range futureKeywords { + allFutureKeywords[k] = v + } + for k, v := range futureKeywordsV0 { + allFutureKeywords[k] = v } } diff --git a/v1/ast/parser_test.go b/v1/ast/parser_test.go index ad5e67d7a9..00d74f9f9c 100644 --- a/v1/ast/parser_test.go +++ b/v1/ast/parser_test.go @@ -1266,26 +1266,6 @@ func TestFutureImports(t *testing.T) { } assertParseModule(t, "multiple imports, all kw in options", mod, &parsed, ParserOptions{AllFutureKeywords: true}) assertParseModule(t, "multiple imports, single in options", mod, &parsed, ParserOptions{FutureKeywords: []string{"in"}}) - - mod = ` - package p - import rego.v1 - import future.keywords.in - ` - // Only applies to v0, as the 'rego.v1' import is a no-op in v1 - assertParseModuleErrorMatch(t, "rego.v1 and future.keywords.in imported", mod, - "rego_parse_error: the `rego.v1` import implies `future.keywords`, these are therefore mutually exclusive", - ParserOptions{RegoVersion: RegoV0}) - - mod = ` - package p - import future.keywords - import rego.v1 - ` - // Only applies to v0, as the 'rego.v1' import is a no-op in v1 - assertParseModuleErrorMatch(t, "rego.v1 and future.keywords imported", mod, - "rego_parse_error: the `rego.v1` import implies `future.keywords`, these are therefore mutually exclusive", - ParserOptions{RegoVersion: RegoV0}) } func TestFutureAndRegoV1ImportsExtraction(t *testing.T) { @@ -1388,7 +1368,6 @@ import future.keywords p contains 1 if { input.x == 1 }`, - expectedErrors: []string{"rego_parse_error: the `rego.v1` import implies `future.keywords`, these are therefore mutually exclusive"}, }, { note: "`if` keyword used on rule", @@ -6612,23 +6591,6 @@ func assertParseModuleError(t *testing.T, msg, input string) { } } -func assertParseModuleErrorMatch(t *testing.T, msg, input string, expected string, opts ...ParserOptions) { - t.Helper() - - opt := ParserOptions{} - if len(opts) == 1 { - opt = opts[0] - } - - m, err := ParseModuleWithOpts("", input, opt) - if err == nil { - t.Errorf("Error on test \"%s\": expected parse error: %v (parsed)", msg, m) - } - if !strings.Contains(err.Error(), expected) { - t.Errorf("Error on test \"%s\"; expected:\n\n%v\n\ngot:\n\n%v", msg, expected, err) - } -} - func assertParsePackage(t *testing.T, msg string, input string, correct *Package) { assertParseOne(t, msg, input, func(parsed interface{}) { pkg := parsed.(*Package) diff --git a/v1/ast/version_index.json b/v1/ast/version_index.json index 718df220f9..b888b3e028 100644 --- a/v1/ast/version_index.json +++ b/v1/ast/version_index.json @@ -1395,6 +1395,13 @@ } }, "features": { + "rego_v1": { + "Major": 1, + "Minor": 0, + "Patch": 0, + "PreRelease": "", + "Metadata": "" + }, "rego_v1_import": { "Major": 0, "Minor": 59, diff --git a/v1/bundle/bundle.go b/v1/bundle/bundle.go index e4f42b8ded..be320f6a73 100644 --- a/v1/bundle/bundle.go +++ b/v1/bundle/bundle.go @@ -1082,9 +1082,9 @@ func hashBundleFiles(hash SignatureHasher, b *Bundle) ([]FileInfo, error) { } // FormatModules formats Rego modules -// Modules will be formatted to comply with rego-v0, but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). +// Modules will be formatted to comply with [ast.DefaultRegoVersion], but Rego compatibility of individual parsed modules will be respected (e.g. if 'rego.v1' is imported). func (b *Bundle) FormatModules(useModulePath bool) error { - return b.FormatModulesForRegoVersion(ast.RegoV0, true, useModulePath) + return b.FormatModulesForRegoVersion(ast.DefaultRegoVersion, true, useModulePath) } // FormatModulesForRegoVersion formats Rego modules to comply with a given Rego version diff --git a/v1/capabilities/v1.0.0.json b/v1/capabilities/v1.0.0.json new file mode 100644 index 0000000000..c1b4b8a006 --- /dev/null +++ b/v1/capabilities/v1.0.0.json @@ -0,0 +1,4844 @@ +{ + "builtins": [ + { + "name": "abs", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "all", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "and", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "\u0026" + }, + { + "name": "any", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "array.concat", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.reverse", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "array.slice", + "decl": { + "args": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "assign", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": ":=" + }, + { + "name": "base64.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "base64url.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "base64url.encode_no_pad", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "bits.and", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.lsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.negate", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.or", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.rsh", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "bits.xor", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "cast_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "cast_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "cast_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "null" + }, + "type": "function" + } + }, + { + "name": "cast_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "cast_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "cast_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "ceil", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "concat", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "count", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.equal", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.md5", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha1", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.hmac.sha512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.md5", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.parse_private_keys", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.sha1", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.sha256", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_and_verify_certificates_with_options", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificate_request", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_certificates", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_keypair", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "crypto.x509.parse_rsa_private_key", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "div", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "/" + }, + { + "name": "endswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "eq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "=" + }, + { + "name": "equal", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "==" + }, + { + "name": "floor", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "format_int", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "glob.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "of": [ + { + "type": "null" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + } + ], + "type": "any" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "glob.quote_meta", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "graph.reachable", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graph.reachable_paths", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "graphql.is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "graphql.parse", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_and_verify", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_query", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.parse_schema", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "graphql.schema_is_valid", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "gt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e" + }, + { + "name": "gte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003e=" + }, + { + "name": "hex.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "hex.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "http.send", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "indexof", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "indexof_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "internal.member_2", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.member_3", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "in" + }, + { + "name": "internal.print", + "decl": { + "args": [ + { + "dynamic": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "array" + } + ], + "type": "function" + } + }, + { + "name": "intersection", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "static": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "io.jwt.decode_verify", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "array" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.encode_sign_raw", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "io.jwt.verify_es256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_es512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_hs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_ps512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs256", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs384", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "io.jwt.verify_rs512", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_array", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_boolean", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_null", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_number", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_object", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_set", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "is_string", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "json.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.marshal_with_options", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "indent", + "value": { + "type": "string" + } + }, + { + "key": "prefix", + "value": { + "type": "string" + } + }, + { + "key": "pretty", + "value": { + "type": "boolean" + } + } + ], + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "json.match_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "dynamic": { + "static": [ + { + "key": "desc", + "value": { + "type": "string" + } + }, + { + "key": "error", + "value": { + "type": "string" + } + }, + { + "key": "field", + "value": { + "type": "string" + } + }, + { + "key": "type", + "value": { + "type": "string" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "json.patch", + "decl": { + "args": [ + { + "type": "any" + }, + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "static": [ + { + "key": "op", + "value": { + "type": "string" + } + }, + { + "key": "path", + "value": { + "type": "any" + } + } + ], + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "json.verify_schema", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "boolean" + }, + { + "of": [ + { + "type": "null" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "lower", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "lt", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c" + }, + { + "name": "lte", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "\u003c=" + }, + { + "name": "max", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "min", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "minus", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": [ + { + "type": "number" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + "type": "function" + }, + "infix": "-" + }, + { + "name": "mul", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "*" + }, + { + "name": "neq", + "decl": { + "args": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + }, + "infix": "!=" + }, + { + "name": "net.cidr_contains", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_contains_matches", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "static": [ + { + "type": "any" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_expand", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_intersects", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.cidr_merge", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "of": [ + { + "type": "string" + } + ], + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "net.cidr_overlap", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "net.lookup_ip_addr", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "of": { + "type": "string" + }, + "type": "set" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "numbers.range", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "numbers.range_step", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "object.filter", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.get", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "any" + }, + { + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.keys", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "object.remove", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.subset", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "object.union_n", + "decl": { + "args": [ + { + "dynamic": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "array" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "opa.runtime", + "decl": { + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "or", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + }, + "infix": "|" + }, + { + "name": "plus", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "+" + }, + { + "name": "print", + "decl": { + "type": "function", + "variadic": { + "type": "any" + } + } + }, + { + "name": "product", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "providers.aws.sign_req", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "key": { + "type": "any" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rand.intn", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "re_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.find_all_string_submatch_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.find_n", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "number" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.globs_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "regex.replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "regex.split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "regex.template_match", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.chain", + "decl": { + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "rego.metadata.rule", + "decl": { + "result": { + "type": "any" + }, + "type": "function" + } + }, + { + "name": "rego.parse_module", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "rem", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + }, + "infix": "%" + }, + { + "name": "replace", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "round", + "decl": { + "args": [ + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.compare", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "semver.is_valid", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "set_diff", + "decl": { + "args": [ + { + "of": { + "type": "any" + }, + "type": "set" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "sort", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "of": { + "type": "any" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "split", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + "type": "function" + } + }, + { + "name": "sprintf", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "any" + }, + "type": "array" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "startswith", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_prefix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.any_suffix_match", + "decl": { + "args": [ + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "strings.count", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "strings.render_template", + "decl": { + "args": [ + { + "type": "string" + }, + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.replace_n", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "strings.reverse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "substring", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "sum", + "decl": { + "args": [ + { + "of": [ + { + "dynamic": { + "type": "number" + }, + "type": "array" + }, + { + "of": { + "type": "number" + }, + "type": "set" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.add_date", + "decl": { + "args": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.clock", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.date", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.diff", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + }, + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "static": [ + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + }, + { + "type": "number" + } + ], + "type": "array" + }, + "type": "function" + } + }, + { + "name": "time.format", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "time.now_ns", + "decl": { + "result": { + "type": "number" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "time.parse_duration_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_ns", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.parse_rfc3339_ns", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "time.weekday", + "decl": { + "args": [ + { + "of": [ + { + "type": "number" + }, + { + "static": [ + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "array" + } + ], + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "to_number", + "decl": { + "args": [ + { + "of": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + } + ], + "type": "any" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "trace", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "trim", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_left", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_prefix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_right", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_space", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "trim_suffix", + "decl": { + "args": [ + { + "type": "string" + }, + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "type_name", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "union", + "decl": { + "args": [ + { + "of": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "set" + } + ], + "result": { + "of": { + "type": "any" + }, + "type": "set" + }, + "type": "function" + } + }, + { + "name": "units.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "units.parse_bytes", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "number" + }, + "type": "function" + } + }, + { + "name": "upper", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.decode_object", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "dynamic": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "urlquery.encode_object", + "decl": { + "args": [ + { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "of": [ + { + "type": "string" + }, + { + "dynamic": { + "type": "string" + }, + "type": "array" + }, + { + "of": { + "type": "string" + }, + "type": "set" + } + ], + "type": "any" + } + }, + "type": "object" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "uuid.parse", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "dynamic": { + "key": { + "type": "string" + }, + "value": { + "type": "any" + } + }, + "type": "object" + }, + "type": "function" + } + }, + { + "name": "uuid.rfc4122", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "string" + }, + "type": "function" + }, + "nondeterministic": true + }, + { + "name": "walk", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "static": [ + { + "dynamic": { + "type": "any" + }, + "type": "array" + }, + { + "type": "any" + } + ], + "type": "array" + }, + "type": "function" + }, + "relation": true + }, + { + "name": "yaml.is_valid", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "boolean" + }, + "type": "function" + } + }, + { + "name": "yaml.marshal", + "decl": { + "args": [ + { + "type": "any" + } + ], + "result": { + "type": "string" + }, + "type": "function" + } + }, + { + "name": "yaml.unmarshal", + "decl": { + "args": [ + { + "type": "string" + } + ], + "result": { + "type": "any" + }, + "type": "function" + } + } + ], + "future_keywords": [ + "contains", + "every", + "if", + "in" + ], + "wasm_abi_versions": [ + { + "version": 1, + "minor_version": 1 + }, + { + "version": 1, + "minor_version": 2 + } + ], + "features": [ + "rule_head_ref_string_prefixes", + "rule_head_refs", + "rego_v1", + "rego_v1_import" + ] +} diff --git a/v1/compile/compile_test.go b/v1/compile/compile_test.go index dc6bcb0fc7..3454ad310d 100644 --- a/v1/compile/compile_test.go +++ b/v1/compile/compile_test.go @@ -1663,7 +1663,7 @@ update { for _, useMemoryFS := range []bool{false, true} { test.WithTestFS(tc.files, useMemoryFS, func(root string, fsys fs.FS) { - caps := ast.CapabilitiesForThisVersion() + caps := ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)) caps.Features = []string{ ast.FeatureRefHeadStringPrefixes, ast.FeatureRefHeads, @@ -1891,6 +1891,9 @@ p if { ast.FeatureRefHeadStringPrefixes, ast.FeatureRefHeads, } + if tc.modulesRegoVersion == ast.RegoV1 { + capabilities.Features = append(capabilities.Features, ast.FeatureRegoV1) + } if tc.regoV1ImportCapable { capabilities.Features = append(capabilities.Features, ast.FeatureRegoV1Import) } diff --git a/v1/loader/loader.go b/v1/loader/loader.go index c20500970b..8daf22458b 100644 --- a/v1/loader/loader.go +++ b/v1/loader/loader.go @@ -762,6 +762,7 @@ func loadBundleFile(path string, bs []byte, m metrics.Metrics, opts ast.ParserOp tl := bundle.NewTarballLoaderWithBaseURL(bytes.NewBuffer(bs), path) br := bundle.NewCustomReader(tl). WithRegoVersion(opts.RegoVersion). + WithCapabilities(opts.Capabilities). WithJSONOptions(opts.JSONOptions). WithProcessAnnotations(opts.ProcessAnnotation). WithMetrics(m). diff --git a/v1/plugins/plugins_test.go b/v1/plugins/plugins_test.go index e878022a19..ae587f078d 100644 --- a/v1/plugins/plugins_test.go +++ b/v1/plugins/plugins_test.go @@ -18,6 +18,7 @@ import ( internal_tracing "github.com/open-policy-agent/opa/internal/distributedtracing" "github.com/open-policy-agent/opa/internal/file/archive" "github.com/open-policy-agent/opa/internal/storage/mock" + "github.com/open-policy-agent/opa/v1/ast" "github.com/open-policy-agent/opa/v1/bundle" "github.com/open-policy-agent/opa/v1/logging" "github.com/open-policy-agent/opa/v1/logging/test" @@ -192,116 +193,143 @@ func TestPluginStatusUpdateOnStartAndStop(t *testing.T) { } func TestManagerWithOPATelemetryUpdateLoop(t *testing.T) { - // test server - mux := http.NewServeMux() - ts := httptest.NewServer(mux) - - versions := []string{} - mux.HandleFunc("/v1/version", func(w http.ResponseWriter, req *http.Request) { - var data map[string]string - - body, err := io.ReadAll(req.Body) - if err != nil { - t.Fatal(err) - } - - err = json.Unmarshal(body, &data) - if err != nil { - t.Fatal(err) - } - - versions = append(versions, data["min_compatible_version"]) - - w.WriteHeader(http.StatusOK) - bs, _ := json.Marshal(map[string]string{"foo": "bar"}) // dummy data - w.Header().Set("Content-Type", "application/json") - _, _ = w.Write(bs) // ignore error - }) - defer ts.Close() - - t.Setenv("OPA_TELEMETRY_SERVICE_URL", ts.URL) - - ctx := context.Background() - - m, err := New([]byte{}, "test", inmem.New(), WithEnableTelemetry(true)) - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - defaultUploadIntervalSec = int64(1) - - err = m.Start(context.Background()) - if err != nil { - t.Fatalf("Unexpected error: %s", err) - } - - // add a policy to the store to trigger a telemetry update (v0.36.0) - module := `package x - p := array.reverse([1,2,3])` - - err = storage.Txn(ctx, m.Store, storage.WriteParams, func(txn storage.Transaction) error { - return m.Store.UpsertPolicy(ctx, txn, "policy.rego", []byte(module)) - }) - if err != nil { - t.Fatalf("unexpected error: %v", err) + tests := []struct { + note string + regoVersion ast.RegoVersion + exp []string + }{ + { + note: "v0 manager", + regoVersion: ast.RegoV0, + exp: []string{"0.36.0", "0.46.0"}, + }, + { + note: "v1 manager", + regoVersion: ast.RegoV1, + exp: []string{"1.0.0", "1.0.0"}, + }, } - time.Sleep(2 * time.Second) + for _, tc := range tests { + t.Run(tc.note, func(t *testing.T) { + // test server + mux := http.NewServeMux() + ts := httptest.NewServer(mux) - // add data to the store and verify there is no trigger for a telemetry update - err = storage.Txn(ctx, m.Store, storage.WriteParams, func(txn storage.Transaction) error { - return m.Store.Write(ctx, txn, storage.AddOp, storage.MustParsePath("/a"), `[2,1,3]`) - }) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + versions := []string{} + mux.HandleFunc("/v1/version", func(w http.ResponseWriter, req *http.Request) { + var data map[string]string - // add a bundle with some policy to trigger a telemetry update (v0.46.0) - txn := storage.NewTransactionOrDie(ctx, m.Store, storage.WriteParams) - - var archiveFiles = map[string]string{ - "/a/b/c/data.json": "[1,2,3]", - "/policy.rego": "package foo\n import future.keywords.every", - "/roles/policy.rego": "package bar\n import future.keywords.if\n p.a.b.c.d if { true }", - } + body, err := io.ReadAll(req.Body) + if err != nil { + t.Fatal(err) + } - files := make([][2]string, 0, len(archiveFiles)) - for name, content := range archiveFiles { - files = append(files, [2]string{name, content}) - } + err = json.Unmarshal(body, &data) + if err != nil { + t.Fatal(err) + } - buf := archive.MustWriteTarGz(files) - b, err := bundle.NewReader(buf).WithLazyLoadingMode(true).Read() - if err != nil { - t.Fatal(err) - } + versions = append(versions, data["min_compatible_version"]) - iterator := bundle.NewIterator(b.Raw) + w.WriteHeader(http.StatusOK) + bs, _ := json.Marshal(map[string]string{"foo": "bar"}) // dummy data + w.Header().Set("Content-Type", "application/json") + _, _ = w.Write(bs) // ignore error + }) + defer ts.Close() - params := storage.WriteParams - params.BasePaths = []string{""} + t.Setenv("OPA_TELEMETRY_SERVICE_URL", ts.URL) - err = m.Store.Truncate(ctx, txn, params, iterator) - if err != nil { - t.Fatalf("Unexpected truncate error: %v", err) - } + ctx := context.Background() - if err := m.Store.Commit(ctx, txn); err != nil { - t.Fatalf("Unexpected commit error: %v", err) - } + m, err := New([]byte{}, "test", inmem.New(), + WithEnableTelemetry(true), + WithParserOptions(ast.ParserOptions{RegoVersion: tc.regoVersion})) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } - time.Sleep(2 * time.Second) + defaultUploadIntervalSec = int64(1) - m.Stop(ctx) + err = m.Start(context.Background()) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } - exp := 2 - if len(versions) != exp { - t.Fatalf("Expected number of server calls: %+v but got: %+v", exp, len(versions)) - } + // add a policy to the store to trigger a telemetry update + // (v0.36.0 with v0 manager) + // (v1.0.0 with v1 manager) + module := `package x + p := array.reverse([1,2,3])` - expVers := []string{"0.36.0", "0.46.0"} - if !reflect.DeepEqual(expVers, versions) { - t.Fatalf("Expected OPA versions: %+v but got: %+v", expVers, versions) + err = storage.Txn(ctx, m.Store, storage.WriteParams, func(txn storage.Transaction) error { + return m.Store.UpsertPolicy(ctx, txn, "policy.rego", []byte(module)) + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + time.Sleep(2 * time.Second) + + // add data to the store and verify there is no trigger for a telemetry update + err = storage.Txn(ctx, m.Store, storage.WriteParams, func(txn storage.Transaction) error { + return m.Store.Write(ctx, txn, storage.AddOp, storage.MustParsePath("/a"), `[2,1,3]`) + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // add a bundle with some policy to trigger a telemetry update + // (v0.46.0 with v0 manager) + // (v1.0.0 with v1 manager) + txn := storage.NewTransactionOrDie(ctx, m.Store, storage.WriteParams) + + var archiveFiles = map[string]string{ + ".manifest": `{"rego_version": 0}`, + "/a/b/c/data.json": "[1,2,3]", + "/policy.rego": "package foo\n import future.keywords.every", + "/roles/policy.rego": "package bar\n import future.keywords.if\n p.a.b.c.d if { true }", + } + + files := make([][2]string, 0, len(archiveFiles)) + for name, content := range archiveFiles { + files = append(files, [2]string{name, content}) + } + + buf := archive.MustWriteTarGz(files) + b, err := bundle.NewReader(buf).WithLazyLoadingMode(true).Read() + if err != nil { + t.Fatal(err) + } + + iterator := bundle.NewIterator(b.Raw) + + params := storage.WriteParams + params.BasePaths = []string{""} + + err = m.Store.Truncate(ctx, txn, params, iterator) + if err != nil { + t.Fatalf("Unexpected truncate error: %v", err) + } + + if err := m.Store.Commit(ctx, txn); err != nil { + t.Fatalf("Unexpected commit error: %v", err) + } + + time.Sleep(2 * time.Second) + + m.Stop(ctx) + + exp := 2 + if len(versions) != exp { + t.Fatalf("Expected number of server calls: %+v but got: %+v", exp, len(versions)) + } + + if !reflect.DeepEqual(tc.exp, versions) { + t.Fatalf("Expected OPA versions: %+v but got: %+v", tc.exp, versions) + } + }) } } diff --git a/v1/rego/rego.go b/v1/rego/rego.go index caa21dec56..9499a213ff 100644 --- a/v1/rego/rego.go +++ b/v1/rego/rego.go @@ -1913,6 +1913,7 @@ func (r *Rego) loadFiles(ctx context.Context, txn storage.Transaction, m metrics WithMetrics(m). WithProcessAnnotation(true). WithRegoVersion(r.regoVersion). + WithCapabilities(r.capabilities). Filtered(r.loadPaths.paths, r.loadPaths.filter) if err != nil { return err @@ -1944,6 +1945,7 @@ func (r *Rego) loadBundles(_ context.Context, _ storage.Transaction, m metrics.M WithProcessAnnotation(true). WithSkipBundleVerification(r.skipBundleVerification). WithRegoVersion(r.regoVersion). + WithCapabilities(r.capabilities). AsBundle(path) if err != nil { return fmt.Errorf("loading error: %s", err) @@ -2489,7 +2491,10 @@ func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, } // If the target rego-version is v0, and the rego.v1 import is available, then we attempt to apply it to support modules. - if r.regoVersion == ast.RegoV0 && (r.capabilities == nil || r.capabilities.ContainsFeature(ast.FeatureRegoV1Import)) { + if r.regoVersion == ast.RegoV0 && + (r.capabilities == nil || + r.capabilities.ContainsFeature(ast.FeatureRegoV1Import) || + r.capabilities.ContainsFeature(ast.FeatureRegoV1)) { for i, mod := range support { // We can't apply the RegoV0CompatV1 version to the support module if it contains rules or vars that @@ -2501,7 +2506,7 @@ func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, if name == "" && len(r.Head.Reference) > 0 { name = r.Head.Reference[0].Value.(ast.Var) } - if ast.IsFutureKeyword(name.String()) { + if ast.IsFutureKeywordForRegoVersion(name.String(), ast.RegoV0) { applyRegoVersion = false return true } @@ -2510,7 +2515,7 @@ func (r *Rego) partial(ctx context.Context, ectx *EvalContext) (*PartialQueries, if applyRegoVersion { ast.WalkVars(mod, func(v ast.Var) bool { - if ast.IsFutureKeyword(v.String()) { + if ast.IsFutureKeywordForRegoVersion(v.String(), ast.RegoV0) { applyRegoVersion = false return true } diff --git a/v1/rego/rego_test.go b/v1/rego/rego_test.go index 9bab2ed917..c798851b3f 100644 --- a/v1/rego/rego_test.go +++ b/v1/rego/rego_test.go @@ -137,6 +137,213 @@ p contains x if { } } +func TestRegoEval_Capabilities(t *testing.T) { + tests := []struct { + note string + regoVersion ast.RegoVersion + capabilities *ast.Capabilities + module string + expResult interface{} + expErrs []string + }{ + { + note: "v0 module, rego-v0, no capabilities", + regoVersion: ast.RegoV0, + module: `package test + +p[x] { + x = ["a", "b", "c"][_] +}`, + expResult: []string{"a", "b", "c"}, + }, + { + note: "v0 module, rego-v0, v0 capabilities", + regoVersion: ast.RegoV0, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + module: `package test + +p[x] { + x = ["a", "b", "c"][_] +}`, + expResult: []string{"a", "b", "c"}, + }, + { + note: "v0 module, rego-v0, v1 capabilities", + regoVersion: ast.RegoV0, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + module: `package test + +p[x] { + x = ["a", "b", "c"][_] +}`, + expResult: []string{"a", "b", "c"}, + }, + + { + note: "v0 module, rego-v1, no capabilities", + regoVersion: ast.RegoV1, + module: `package test + +p[x] { + x = ["a", "b", "c"][_] +}`, + expErrs: []string{ + "test.rego:3: rego_parse_error: `if` keyword is required before rule body", + "test.rego:3: rego_parse_error: `contains` keyword is required for partial set rules", + }, + }, + { + note: "v0 module, rego-v1, v0 capabilities", + regoVersion: ast.RegoV1, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + module: `package test + +p[x] { + x = ["a", "b", "c"][_] +}`, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v0 module, rego-v1, v1 capabilities", + regoVersion: ast.RegoV1, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + module: `package test + +p[x] { + x = ["a", "b", "c"][_] +}`, + expErrs: []string{ + "test.rego:3: rego_parse_error: `if` keyword is required before rule body", + "test.rego:3: rego_parse_error: `contains` keyword is required for partial set rules", + }, + }, + + { + note: "v1 module, rego-v0, no capabilities", + regoVersion: ast.RegoV0, + module: `package test + +p contains x if { + some x in ["a", "b", "c"] +}`, + expErrs: []string{ + "test.rego:4: rego_parse_error: unexpected identifier token", + }, + }, + { + note: "v1 module, rego-v0, v0 capabilities", + regoVersion: ast.RegoV0, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + module: `package test + +p contains x if { + some x in ["a", "b", "c"] +}`, + expErrs: []string{ + "test.rego:4: rego_parse_error: unexpected identifier token", + }, + }, + { + note: "v1 module, rego-v0, v1 capabilities", + regoVersion: ast.RegoV0, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + module: `package test + +p contains x if { + some x in ["a", "b", "c"] +}`, + expErrs: []string{ + "test.rego:4: rego_parse_error: unexpected identifier token", + }, + }, + + { + note: "v1 module, rego-v1, no capabilities", + regoVersion: ast.RegoV1, + module: `package test + +p contains x if { + some x in ["a", "b", "c"] +}`, + expResult: []string{"a", "b", "c"}, + }, + { + note: "v1 module, rego-v1, v0 capabilities", + regoVersion: ast.RegoV1, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV0)), + module: `package test + +p contains x if { + some x in ["a", "b", "c"] +}`, + expErrs: []string{ + "rego_parse_error: illegal capabilities: rego_v1 feature required for parsing v1 Rego", + }, + }, + { + note: "v1 module, rego-v1, v1 capabilities", + regoVersion: ast.RegoV1, + capabilities: ast.CapabilitiesForThisVersion(ast.CapabilitiesRegoVersion(ast.RegoV1)), + module: `package test + +p contains x if { + some x in ["a", "b", "c"] +}`, + expResult: []string{"a", "b", "c"}, + }, + } + + for _, tc := range tests { + t.Run(tc.note, func(t *testing.T) { + files := map[string]string{ + "test.rego": tc.module, + } + + test.WithTempFS(files, func(root string) { + ctx := context.Background() + + pq, err := New( + SetRegoVersion(tc.regoVersion), + Capabilities(tc.capabilities), + Load([]string{root}, nil), + Query("data.test.p"), + ).PrepareForEval(ctx) + + if tc.expErrs != nil { + if err == nil { + t.Fatalf("Expected error but got nil") + } + + for _, expErr := range tc.expErrs { + if !strings.Contains(err.Error(), expErr) { + t.Fatalf("Expected error to contain:\n\n%q\n\nbut got:\n\n%v", expErr, err) + } + } + } else { + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + rs, err := pq.Eval(ctx) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + if len(rs) != 1 { + t.Fatalf("Expected exactly one result but got:\n\n%v", rs) + } + + if reflect.DeepEqual(rs[0].Expressions[0].Value, tc.expResult) { + t.Fatalf("Expected %v but got: %v", tc.expResult, rs[0].Expressions[0].Value) + } + } + }) + }) + } +} + func assertEval(t *testing.T, r *Rego, expected string) { t.Helper() rs, err := r.Eval(context.Background()) diff --git a/v1/repl/repl.go b/v1/repl/repl.go index f4d8274b2d..4147a86281 100644 --- a/v1/repl/repl.go +++ b/v1/repl/repl.go @@ -929,12 +929,7 @@ func (r *REPL) parserOptions() (ast.ParserOptions, error) { if err == nil { for _, i := range r.modules[r.currentModuleID].Imports { if ast.Compare(i.Path.Value, ast.RegoV1CompatibleRef) == 0 { - opts.RegoVersion = ast.RegoV0CompatV1 - - // ast.RegoV0CompatV1 sets parsing requirements, but doesn't imply allowed future keywords - if r.capabilities != nil { - opts.FutureKeywords = r.capabilities.FutureKeywords - } + opts.RegoVersion = ast.RegoV1 } } } diff --git a/v1/tester/runner.go b/v1/tester/runner.go index 5df0a2d5ca..4522ffceff 100644 --- a/v1/tester/runner.go +++ b/v1/tester/runner.go @@ -622,6 +622,10 @@ func Load(args []string, filter loader.Filter) (map[string]*ast.Module, storage. // LoadWithRegoVersion returns modules and an in-memory store for running tests. // Modules are parsed in accordance with the given RegoVersion. func LoadWithRegoVersion(args []string, filter loader.Filter, regoVersion ast.RegoVersion) (map[string]*ast.Module, storage.Store, error) { + if regoVersion == ast.RegoUndefined { + regoVersion = ast.DefaultRegoVersion + } + loaded, err := loader.NewFileLoader(). WithRegoVersion(regoVersion). WithProcessAnnotation(true). @@ -649,6 +653,38 @@ func LoadWithRegoVersion(args []string, filter loader.Filter, regoVersion ast.Re return modules, store, err } +// LoadWithParserOptions returns modules and an in-memory store for running tests. +// Modules are parsed in accordance with the given [ast.ParserOptions]. +func LoadWithParserOptions(args []string, filter loader.Filter, popts ast.ParserOptions) (map[string]*ast.Module, storage.Store, error) { + loaded, err := loader.NewFileLoader(). + WithRegoVersion(popts.RegoVersion). + WithCapabilities(popts.Capabilities). + WithProcessAnnotation(popts.ProcessAnnotation). + WithJSONOptions(popts.JSONOptions). + Filtered(args, filter) + if err != nil { + return nil, nil, err + } + store := inmem.NewFromObject(loaded.Documents) + modules := map[string]*ast.Module{} + ctx := context.Background() + err = storage.Txn(ctx, store, storage.WriteParams, func(txn storage.Transaction) error { + for _, loadedModule := range loaded.Modules { + modules[loadedModule.Name] = loadedModule.Parsed + + // Add the policies to the store to ensure that any future bundle + // activations will preserve them and re-compile the module with + // the bundle modules. + err := store.UpsertPolicy(ctx, txn, loadedModule.Name, loadedModule.Raw) + if err != nil { + return err + } + } + return nil + }) + return modules, store, err +} + // LoadBundles will load the given args as bundles, either tarball or directory is OK. func LoadBundles(args []string, filter loader.Filter) (map[string]*bundle.Bundle, error) { return LoadBundlesWithRegoVersion(args, filter, ast.RegoV0) @@ -677,3 +713,29 @@ func LoadBundlesWithRegoVersion(args []string, filter loader.Filter, regoVersion return bundles, nil } + +// LoadBundlesWithParserOptions will load the given args as bundles, either tarball or directory is OK. +// Bundles are parsed in accordance with the given [ast.ParserOptions]. +func LoadBundlesWithParserOptions(args []string, filter loader.Filter, popts ast.ParserOptions) (map[string]*bundle.Bundle, error) { + if popts.RegoVersion == ast.RegoUndefined { + popts.RegoVersion = ast.DefaultRegoVersion + } + + bundles := map[string]*bundle.Bundle{} + for _, bundleDir := range args { + b, err := loader.NewFileLoader(). + WithRegoVersion(popts.RegoVersion). + WithCapabilities(popts.Capabilities). + WithProcessAnnotation(popts.ProcessAnnotation). + WithJSONOptions(popts.JSONOptions). + WithSkipBundleVerification(true). + WithFilter(filter). + AsBundle(bundleDir) + if err != nil { + return nil, fmt.Errorf("unable to load bundle %s: %s", bundleDir, err) + } + bundles[bundleDir] = b + } + + return bundles, nil +} From ba028b59b4bd65b1becdd5c0b7b678cfbb49deed Mon Sep 17 00:00:00 2001 From: cli-docs-updater Date: Tue, 17 Dec 2024 10:51:49 +0000 Subject: [PATCH 5/7] docs: Update generated CLI docs --- docs/content/cli.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/content/cli.md b/docs/content/cli.md index 33e1a269b8..0d512c6c68 100755 --- a/docs/content/cli.md +++ b/docs/content/cli.md @@ -322,8 +322,9 @@ opa capabilities [flags] ``` --current print current capabilities - --file string print current capabilities + --file string print capabilities defined by a file -h, --help help for capabilities + --v0-compatible opt-in to OPA features and behaviors prior to the OPA v1.0 release. Takes precedence over --v1-compatible --version string print capabilities of a specific version ``` From f88306274f5a792b9a84318ff98be5c7ccb50497 Mon Sep 17 00:00:00 2001 From: Johan Fylling Date: Tue, 17 Dec 2024 17:12:05 +0100 Subject: [PATCH 6/7] Updating formatter to not drop `rego.v1` and `future.keywords` imports (#7224) to maximize compatibility surface across OPA versions. Adding `--drop-v0-imports` flag to `opa fmt` for opting in to dropping redundant v0 imports. Signed-off-by: Johan Fylling --- cmd/build_test.go | 33 ++++++- cmd/fmt.go | 21 +++-- cmd/fmt_test.go | 90 ++++++++++++++++++- v1/format/format.go | 19 +++- .../testfiles/v1/test_future_kw_import.rego | 21 +++++ .../v1/test_future_kw_import.rego.formatted | 21 +++++ v1/format/testfiles/v1/test_rego_v1.rego | 3 +- .../testfiles/v1/test_rego_v1.rego.formatted | 4 +- 8 files changed, 192 insertions(+), 20 deletions(-) create mode 100644 v1/format/testfiles/v1/test_future_kw_import.rego create mode 100644 v1/format/testfiles/v1/test_future_kw_import.rego.formatted diff --git a/cmd/build_test.go b/cmd/build_test.go index 8f99ba21ef..6852307f9b 100644 --- a/cmd/build_test.go +++ b/cmd/build_test.go @@ -2393,12 +2393,14 @@ allow if { 1 < 2 }`, }, - // the rego.v1 import is obsolete in rego-v1, and is removed + // the rego.v1 import is kept to maximize compatibility surface expectedFiles: map[string]string{ ".manifest": `{"revision":"","roots":[""],"rego_version":1} `, "test.rego": `package test +import rego.v1 + allow if { 1 < 2 } @@ -2415,12 +2417,39 @@ allow if { 1 < 2 }`, }, - // future.keywords imports are obsolete in rego-v1, and are removed + // future.keywords imports are kept to maximize compatibility surface expectedFiles: map[string]string{ ".manifest": `{"revision":"","roots":[""],"rego_version":1} `, "test.rego": `package test +import future.keywords.if + +allow if { + 1 < 2 +} +`, + }, + }, + { + note: "v1 compatibility: policy with rego.v1 and future.keywords imports", + v1Compatible: true, + files: map[string]string{ + "test.rego": `package test + import rego.v1 + import future.keywords.if + allow if { + 1 < 2 + }`, + }, + // future.keywords are dropped as these are covered by rego.v1 + expectedFiles: map[string]string{ + ".manifest": `{"revision":"","roots":[""],"rego_version":1} +`, + "test.rego": `package test + +import rego.v1 + allow if { 1 < 2 } diff --git a/cmd/fmt.go b/cmd/fmt.go index 712d5d1d2c..196ea074d8 100644 --- a/cmd/fmt.go +++ b/cmd/fmt.go @@ -21,14 +21,15 @@ import ( ) type fmtCommandParams struct { - overwrite bool - list bool - diff bool - fail bool - regoV1 bool - v0Compatible bool - v1Compatible bool - checkResult bool + overwrite bool + list bool + diff bool + fail bool + regoV1 bool + v0Compatible bool + v1Compatible bool + checkResult bool + dropV0Imports bool } var fmtParams = fmtCommandParams{} @@ -133,7 +134,8 @@ func formatFile(params *fmtCommandParams, out io.Writer, filename string, info o } opts := format.Opts{ - RegoVersion: params.regoVersion(), + RegoVersion: params.regoVersion(), + DropV0Imports: params.dropV0Imports, } if params.v0Compatible { @@ -254,6 +256,7 @@ func init() { addV0CompatibleFlag(formatCommand.Flags(), &fmtParams.v0Compatible, false) addV1CompatibleFlag(formatCommand.Flags(), &fmtParams.v1Compatible, false) formatCommand.Flags().BoolVar(&fmtParams.checkResult, "check-result", true, "assert that the formatted code is valid and can be successfully parsed (default true)") + formatCommand.Flags().BoolVar(&fmtParams.dropV0Imports, "drop-v0-imports", false, "drop v0 imports from the formatted code, such as 'rego.v1' and 'future.keywords'") RootCommand.AddCommand(formatCommand) } diff --git a/cmd/fmt_test.go b/cmd/fmt_test.go index f6b67682e0..32b893ff55 100644 --- a/cmd/fmt_test.go +++ b/cmd/fmt_test.go @@ -887,10 +887,11 @@ q := all([true, false]) func TestFmt_DefaultRegoVersion(t *testing.T) { tests := []struct { - note string - input string - expected string - expectedErrs []string + note string + dropV0Imports bool + input string + expected string + expectedErrs []string }{ { note: "no keywords used", @@ -939,6 +940,86 @@ p if { input.x == 1 } +q contains "foo" if { + input.x == 2 +} +`, + // NOTE: We keep the future imports to create the broadest possible compatibility surface + expected: `package test + +import future.keywords + +p if { + input.x == 1 +} + +q contains "foo" if { + input.x == 2 +} +`, + }, + { + note: "future imports, drop v0 imports", + input: `package test +import future.keywords.if +import future.keywords.contains +p if { + input.x == 1 +} + +q contains "foo" if { + input.x == 2 +} +`, + expected: `package test + +import future.keywords.contains +import future.keywords.if + +p if { + input.x == 1 +} + +q contains "foo" if { + input.x == 2 +} +`, + }, + { + note: "rego.v1 import", + input: `package test +import rego.v1 +p if { + input.x == 1 +} + +q contains "foo" if { + input.x == 2 +} +`, + // NOTE: We keep the rego.v1 import to create the broadest possible compatibility surface + expected: `package test + +import rego.v1 + +p if { + input.x == 1 +} + +q contains "foo" if { + input.x == 2 +} +`, + }, + { + note: "rego.v1 import, drop v0 imports", + dropV0Imports: true, + input: `package test +import rego.v1 +p if { + input.x == 1 +} + q contains "foo" if { input.x == 2 } @@ -998,6 +1079,7 @@ q := all([true, false]) for _, tc := range tests { t.Run(tc.note, func(t *testing.T) { params := fmtCommandParams{} + params.dropV0Imports = tc.dropV0Imports files := map[string]string{ "policy.rego": tc.input, diff --git a/v1/format/format.go b/v1/format/format.go index c1727099a3..56c30171dd 100644 --- a/v1/format/format.go +++ b/v1/format/format.go @@ -31,6 +31,10 @@ type Opts struct { // ParserOptions is the parser options used when parsing the module to be formatted. ParserOptions *ast.ParserOptions + + // DropV0Imports instructs the formatter to drop all v0 imports from the module; i.e. 'rego.v1' and 'future.keywords' imports. + // Imports are only removed if [Opts.RegoVersion] makes them redundant. + DropV0Imports bool } func (o Opts) effectiveRegoVersion() ast.RegoVersion { @@ -140,6 +144,7 @@ type fmtOpts struct { refHeads bool regoV1 bool + regoV1Imported bool futureKeywords []string } @@ -200,6 +205,7 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { switch { case isRegoV1Compatible(n): + o.regoV1Imported = true o.contains = true o.ifs = true case future.IsAllFutureKeywords(n): @@ -234,14 +240,21 @@ func AstWithOpts(x interface{}, opts Opts) ([]byte, error) { switch x := x.(type) { case *ast.Module: - if regoVersion == ast.RegoV1 { + if regoVersion == ast.RegoV1 && opts.DropV0Imports { x.Imports = filterRegoV1Import(x.Imports) } else if regoVersion == ast.RegoV0CompatV1 { x.Imports = ensureRegoV1Import(x.Imports) } - if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 || moduleIsRegoV1Compatible(x) { - x.Imports = future.FilterFutureImports(x.Imports) + regoV1Imported := moduleIsRegoV1Compatible(x) + if regoVersion == ast.RegoV0CompatV1 || regoVersion == ast.RegoV1 || regoV1Imported { + if !opts.DropV0Imports && !regoV1Imported { + for _, kw := range o.futureKeywords { + x.Imports = ensureFutureKeywordImport(x.Imports, kw) + } + } else { + x.Imports = future.FilterFutureImports(x.Imports) + } } else { for kw := range extraFutureKeywordImports { x.Imports = ensureFutureKeywordImport(x.Imports, kw) diff --git a/v1/format/testfiles/v1/test_future_kw_import.rego b/v1/format/testfiles/v1/test_future_kw_import.rego new file mode 100644 index 0000000000..9c85383106 --- /dev/null +++ b/v1/format/testfiles/v1/test_future_kw_import.rego @@ -0,0 +1,21 @@ +package p + +# existing future.keywords imports kept for broadest compatibility surface +import future.keywords.every +import future.keywords.if + +r if { + every x in [1,3,5] { + is_odd(x) + } + + every x in [1,3,5] { is_odd(x); true } + + every x in [1,3,5] { + is_odd(x) + true + x < 10 + } +} + +is_odd(x) = x % 2 == 0 \ No newline at end of file diff --git a/v1/format/testfiles/v1/test_future_kw_import.rego.formatted b/v1/format/testfiles/v1/test_future_kw_import.rego.formatted new file mode 100644 index 0000000000..16033bed7d --- /dev/null +++ b/v1/format/testfiles/v1/test_future_kw_import.rego.formatted @@ -0,0 +1,21 @@ +package p + +# existing future.keywords imports kept for broadest compatibility surface +import future.keywords.every +import future.keywords.if + +r if { + every x in [1, 3, 5] { + is_odd(x) + } + + every x in [1, 3, 5] { is_odd(x); true} + + every x in [1, 3, 5] { + is_odd(x) + true + x < 10 + } +} + +is_odd(x) := (x % 2) == 0 diff --git a/v1/format/testfiles/v1/test_rego_v1.rego b/v1/format/testfiles/v1/test_rego_v1.rego index 8ebb10058a..6dbfde0a6e 100644 --- a/v1/format/testfiles/v1/test_rego_v1.rego +++ b/v1/format/testfiles/v1/test_rego_v1.rego @@ -1,6 +1,7 @@ package example -import rego.v1 # import will be dropped +import rego.v1 # rego.v1 import kept for broadest compatibility surface +import future.keywords.if # future.keywords imports are dropped, as they're covered by rego.v1 # R1: constant a := 1 diff --git a/v1/format/testfiles/v1/test_rego_v1.rego.formatted b/v1/format/testfiles/v1/test_rego_v1.rego.formatted index 5583e0e662..d7558fd545 100644 --- a/v1/format/testfiles/v1/test_rego_v1.rego.formatted +++ b/v1/format/testfiles/v1/test_rego_v1.rego.formatted @@ -1,6 +1,8 @@ package example -# import will be dropped +import rego.v1 # rego.v1 import kept for broadest compatibility surface + +# future.keywords imports are dropped, as they're covered by rego.v1 # R1: constant a := 1 From 67cfa136482e6266698be98e3bce5f72bc88a6c4 Mon Sep 17 00:00:00 2001 From: cli-docs-updater Date: Tue, 17 Dec 2024 16:14:21 +0000 Subject: [PATCH 7/7] docs: Update generated CLI docs --- docs/content/cli.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/content/cli.md b/docs/content/cli.md index 0d512c6c68..6b070b2ae8 100755 --- a/docs/content/cli.md +++ b/docs/content/cli.md @@ -672,14 +672,15 @@ opa fmt [path [...]] [flags] ### Options ``` - --check-result assert that the formatted code is valid and can be successfully parsed (default true) (default true) - -d, --diff only display a diff of the changes - --fail non zero exit code on reformat - -h, --help help for fmt - -l, --list list all files who would change when formatted - --rego-v1 format module(s) to be compatible with both Rego v1 and current OPA version) - --v0-compatible opt-in to OPA features and behaviors prior to the OPA v1.0 release. Takes precedence over --v1-compatible - -w, --write overwrite the original source file + --check-result assert that the formatted code is valid and can be successfully parsed (default true) (default true) + -d, --diff only display a diff of the changes + --drop-v0-imports drop v0 imports from the formatted code, such as 'rego.v1' and 'future.keywords' + --fail non zero exit code on reformat + -h, --help help for fmt + -l, --list list all files who would change when formatted + --rego-v1 format module(s) to be compatible with both Rego v1 and current OPA version) + --v0-compatible opt-in to OPA features and behaviors prior to the OPA v1.0 release. Takes precedence over --v1-compatible + -w, --write overwrite the original source file ``` ____