From 91412e540ef3b8464d6e3dc4a5cb9d6f14d0bce3 Mon Sep 17 00:00:00 2001 From: Luiz Carvalho Date: Tue, 7 Nov 2023 09:06:42 -0500 Subject: [PATCH] Add support for informative tests Some tests are required to be executed, but their results do not have to be successful. They are meant to inform the user that their results need some attention, but they are non-blocking. An example of such test is a certain code scanner which may produce a high number of false positives. These tests are referred to as informative. This commit modifies the `test` package to more easily support these types of tests. Also, since `test.no_skipped_tests` has been modified to emit a violation instead of a warning, an effective date to a month from now has been added. This should allow users to gradually address any issues before they become blocking. Ref: EC-224 Signed-off-by: Luiz Carvalho --- policy/lib/rule_data.rego | 6 +-- policy/release/test.rego | 71 +++++++++++++++++++++---- policy/release/test_test.rego | 99 +++++++++++++++++++++-------------- 3 files changed, 121 insertions(+), 55 deletions(-) diff --git a/policy/lib/rule_data.rego b/policy/lib/rule_data.rego index 5779ec21..7b59bedf 100644 --- a/policy/lib/rule_data.rego +++ b/policy/lib/rule_data.rego @@ -22,10 +22,8 @@ rule_data_defaults := { "SKIPPED", "WARNING", ], - "failed_tests_results": [ - "FAILURE", - "ERROR", - ], + "failed_tests_results": ["FAILURE"], + "erred_tests_results": ["ERROR"], "skipped_tests_results": ["SKIPPED"], "warned_tests_results": ["WARNING"], # diff --git a/policy/release/test.rego b/policy/release/test.rego index ae5b74c7..c6ae61e4 100644 --- a/policy/release/test.rego +++ b/policy/release/test.rego @@ -91,18 +91,17 @@ deny contains result if { } # METADATA -# title: All required tests passed +# title: No tests failed # description: >- -# Confirm that all the tests in the test results -# have a successful result. A successful result is one that isn't a -# "FAILURE" or "ERROR". This will fail if any of the tests failed and -# the failure message will list the names of the failing tests. +# Produce a violation if any non-informative tests have their result set to "FAILED". +# The result type is configurable by the "failed_tests_results" key, and the list +# of informative tests is configurable by the "informative_tests" key in the rule data. # custom: -# short_name: required_tests_passed -# failure_msg: "Test %q did not complete successfully" +# short_name: no_failed_tests +# failure_msg: "Test %q failed" # solution: >- -# There is a required test that did not pass. Make sure that any task -# in the build pipeline with a result named 'TEST_OUTPUT' passes. +# There is a test that failed. Make sure that any task in the build pipeline +# with a result named 'TEST_OUTPUT' does not fail. # collections: # - redhat # depends_on: @@ -110,13 +109,61 @@ deny contains result if { # deny contains result if { some test in resulted_in(lib.rule_data("failed_tests_results")) + not test in lib.rule_data("informative_tests") + result := lib.result_helper_with_term(rego.metadata.chain(), [test], test) +} + +# METADATA +# title: No informative tests failed +# description: >- +# Produce a warning if any informative tests have their result set to "FAILED". +# The result type is configurable by the "failed_tests_results" key, and the list +# of informative tests is configurable by the "informative_tests" key in the rule data. +# custom: +# short_name: no_failed_informative_tests +# failure_msg: "Informative test %q failed" +# solution: >- +# There is a test that failed. Make sure that any task in the build pipeline +# with a result named 'TEST_OUTPUT' does not fail. +# collections: +# - redhat +# depends_on: +# - test.test_data_found +# +warn contains result if { + some test in resulted_in(lib.rule_data("failed_tests_results")) + test in lib.rule_data("informative_tests") + result := lib.result_helper_with_term(rego.metadata.chain(), [test], test) +} + +# METADATA +# title: No tests erred +# description: >- +# Produce a violation if any tests have their result set to "ERROR". +# The result type is configurable by the "erred_tests_results" key in the rule data. +# custom: +# short_name: no_erred_tests +# failure_msg: "Test %q erred" +# solution: >- +# There is a test that erred. Make sure that any task in the build pipeline +# with a result named 'TEST_OUTPUT' does not err. +# collections: +# - redhat +# depends_on: +# - test.test_data_found +# +deny contains result if { + some test in resulted_in(lib.rule_data("erred_tests_results")) result := lib.result_helper_with_term(rego.metadata.chain(), [test], test) } # METADATA # title: No tests were skipped # description: >- -# Produce a warning if any tests have their result set to "SKIPPED". +# Produce a violation if any tests have their result set to "SKIPPED". +# A skipped result means a pre-requirement for executing the test was not met, e.g. a +# license key for executing a scanner was not provided. +# The result type is configurable by the "skipped_tests_results" key in the rule data. # custom: # short_name: no_skipped_tests # failure_msg: "Test %q was skipped" @@ -128,8 +175,9 @@ deny contains result if { # - redhat # depends_on: # - test.test_data_found +# effective_on: 2023-12-08T00:00:00Z # -warn contains result if { +deny contains result if { some test in resulted_in(lib.rule_data("skipped_tests_results")) result := lib.result_helper_with_term(rego.metadata.chain(), [test], test) } @@ -138,6 +186,7 @@ warn contains result if { # title: No tests produced warnings # description: >- # Produce a warning if any tests have their result set to "WARNING". +# The result type is configurable by the "warned_tests_results" key in the rule data. # custom: # short_name: no_test_warnings # failure_msg: "Test %q returned a warning" diff --git a/policy/release/test_test.rego b/policy/release/test_test.rego index 10e25f35..b025d152 100644 --- a/policy/release/test_test.rego +++ b/policy/release/test_test.rego @@ -92,18 +92,37 @@ test_failure_data { ), lib_test.mock_slsav1_attestation_with_tasks([slsav1_task]), ] + + lib.assert_empty(test.warn) with input.attestations as attestations lib.assert_equal_results(test.deny, { { - "code": "test.required_tests_passed", - "msg": "Test \"failed_1\" did not complete successfully", + "code": "test.no_failed_tests", + "msg": "Test \"failed_1\" failed", "term": "failed_1", }, { - "code": "test.required_tests_passed", - "msg": "Test \"task1\" did not complete successfully", + "code": "test.no_failed_tests", + "msg": "Test \"task1\" failed", "term": "task1", }, }) with input.attestations as attestations + + # Failed informative tests cause warnings, not violations + lib.assert_empty(test.deny) with input.attestations as attestations + with data.rule_data.informative_tests as ["task1", "failed_1"] + lib.assert_equal_results(test.warn, { + { + "code": "test.no_failed_informative_tests", + "msg": "Informative test \"failed_1\" failed", + "term": "failed_1", + }, + { + "code": "test.no_failed_informative_tests", + "msg": "Informative test \"task1\" failed", + "term": "task1", + }, + }) with input.attestations as attestations + with data.rule_data.informative_tests as ["task1", "failed_1"] } mock_an_errored_test := lib_test.att_mock_helper_ref( @@ -126,13 +145,13 @@ test_error_data { ] lib.assert_equal_results(test.deny, { { - "code": "test.required_tests_passed", - "msg": "Test \"errored_1\" did not complete successfully", + "code": "test.no_erred_tests", + "msg": "Test \"errored_1\" erred", "term": "errored_1", }, { - "code": "test.required_tests_passed", - "msg": "Test \"errored_2\" did not complete successfully", + "code": "test.no_erred_tests", + "msg": "Test \"errored_2\" erred", "term": "errored_2", }, }) with input.attestations as attestations @@ -157,29 +176,29 @@ test_mix_data { ] lib.assert_equal_results(test.deny, { { - "code": "test.required_tests_passed", - "msg": "Test \"failed_1\" did not complete successfully", + "code": "test.no_failed_tests", + "msg": "Test \"failed_1\" failed", "term": "failed_1", }, { - "code": "test.required_tests_passed", - "msg": "Test \"errored_1\" did not complete successfully", + "code": "test.no_erred_tests", + "msg": "Test \"errored_1\" erred", "term": "errored_1", }, { - "code": "test.required_tests_passed", - "msg": "Test \"failed_2\" did not complete successfully", + "code": "test.no_failed_tests", + "msg": "Test \"failed_2\" failed", "term": "failed_2", }, { - "code": "test.required_tests_passed", - "msg": "Test \"errored_2\" did not complete successfully", + "code": "test.no_erred_tests", + "msg": "Test \"errored_2\" erred", "term": "errored_2", }, }) with input.attestations as attestations } -test_skipped_is_not_deny { +test_skipped_is_not_warning { attestations := [ lib_test.att_mock_helper_ref( lib.task_test_result_name, @@ -191,10 +210,10 @@ test_skipped_is_not_deny { "value": {"result": "SKIPPED"}, }])]), ] - lib.assert_empty(test.deny) with input.attestations as attestations + lib.assert_empty(test.warn) with input.attestations as attestations } -test_skipped_is_warning { +test_skipped_is_deny { attestations := [ lib_test.att_mock_helper_ref( lib.task_test_result_name, @@ -206,7 +225,7 @@ test_skipped_is_warning { "value": {"result": "SKIPPED"}, }])]), ] - lib.assert_equal_results(test.warn, { + lib.assert_equal_results(test.deny, { { "code": "test.no_skipped_tests", "msg": "Test \"skipped_1\" was skipped", @@ -287,38 +306,35 @@ test_mixed_statuses { lib.assert_equal_results(test.deny, { { - "code": "test.required_tests_passed", - "msg": "Test \"error_1\" did not complete successfully", + "code": "test.no_erred_tests", + "msg": "Test \"error_1\" erred", "term": "error_1", }, { - "code": "test.required_tests_passed", - "msg": "Test \"error_2\" did not complete successfully", + "code": "test.no_erred_tests", + "msg": "Test \"error_2\" erred", "term": "error_2", }, { - "code": "test.required_tests_passed", - "msg": "Test \"failure_1\" did not complete successfully", + "code": "test.no_failed_tests", + "msg": "Test \"failure_1\" failed", "term": "failure_1", }, { - "code": "test.required_tests_passed", - "msg": "Test \"failure_2\" did not complete successfully", + "code": "test.no_failed_tests", + "msg": "Test \"failure_2\" failed", "term": "failure_2", }, { - "code": "test.required_tests_passed", - "msg": "Test \"failure_20\" did not complete successfully", + "code": "test.no_failed_tests", + "msg": "Test \"failure_20\" failed", "term": "failure_20", }, { - "code": "test.required_tests_passed", - "msg": "Test \"error_20\" did not complete successfully", + "code": "test.no_erred_tests", + "msg": "Test \"error_20\" erred", "term": "error_20", }, - }) with input.attestations as test_results - - lib.assert_equal_results(test.warn, { { "code": "test.no_skipped_tests", "msg": "Test \"skipped_1\" was skipped", @@ -329,6 +345,14 @@ test_mixed_statuses { "msg": "Test \"skipped_2\" was skipped", "term": "skipped_2", }, + { + "code": "test.no_skipped_tests", + "msg": "Test \"skipped_20\" was skipped", + "term": "skipped_20", + }, + }) with input.attestations as test_results + + lib.assert_equal_results(test.warn, { { "code": "test.no_test_warnings", "msg": "Test \"warning_1\" returned a warning", @@ -344,11 +368,6 @@ test_mixed_statuses { "msg": "Test \"warning_20\" returned a warning", "term": "warning_20", }, - { - "code": "test.no_skipped_tests", - "msg": "Test \"skipped_20\" was skipped", - "term": "skipped_20", - }, }) with input.attestations as test_results }