Skip to content

Commit

Permalink
Merge pull request #793 from lcarva/EC-224
Browse files Browse the repository at this point in the history
Add support for informative tests
  • Loading branch information
lcarva authored Nov 8, 2023
2 parents f801730 + 91412e5 commit c4603b8
Show file tree
Hide file tree
Showing 3 changed files with 121 additions and 55 deletions.
6 changes: 2 additions & 4 deletions policy/lib/rule_data.rego
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,8 @@ rule_data_defaults := {
"SKIPPED",
"WARNING",
],
"failed_tests_results": [
"FAILURE",
"ERROR",
],
"failed_tests_results": ["FAILURE"],
"erred_tests_results": ["ERROR"],
"skipped_tests_results": ["SKIPPED"],
"warned_tests_results": ["WARNING"],
#
Expand Down
71 changes: 60 additions & 11 deletions policy/release/test.rego
Original file line number Diff line number Diff line change
Expand Up @@ -91,32 +91,79 @@ deny contains result if {
}

# METADATA
# title: All required tests passed
# title: No tests failed
# description: >-
# Confirm that all the tests in the test results
# have a successful result. A successful result is one that isn't a
# "FAILURE" or "ERROR". This will fail if any of the tests failed and
# the failure message will list the names of the failing tests.
# Produce a violation if any non-informative tests have their result set to "FAILED".
# The result type is configurable by the "failed_tests_results" key, and the list
# of informative tests is configurable by the "informative_tests" key in the rule data.
# custom:
# short_name: required_tests_passed
# failure_msg: "Test %q did not complete successfully"
# short_name: no_failed_tests
# failure_msg: "Test %q failed"
# solution: >-
# There is a required test that did not pass. Make sure that any task
# in the build pipeline with a result named 'TEST_OUTPUT' passes.
# There is a test that failed. Make sure that any task in the build pipeline
# with a result named 'TEST_OUTPUT' does not fail.
# collections:
# - redhat
# depends_on:
# - test.test_data_found
#
deny contains result if {
some test in resulted_in(lib.rule_data("failed_tests_results"))
not test in lib.rule_data("informative_tests")
result := lib.result_helper_with_term(rego.metadata.chain(), [test], test)
}

# METADATA
# title: No informative tests failed
# description: >-
# Produce a warning if any informative tests have their result set to "FAILED".
# The result type is configurable by the "failed_tests_results" key, and the list
# of informative tests is configurable by the "informative_tests" key in the rule data.
# custom:
# short_name: no_failed_informative_tests
# failure_msg: "Informative test %q failed"
# solution: >-
# There is a test that failed. Make sure that any task in the build pipeline
# with a result named 'TEST_OUTPUT' does not fail.
# collections:
# - redhat
# depends_on:
# - test.test_data_found
#
warn contains result if {
some test in resulted_in(lib.rule_data("failed_tests_results"))
test in lib.rule_data("informative_tests")
result := lib.result_helper_with_term(rego.metadata.chain(), [test], test)
}

# METADATA
# title: No tests erred
# description: >-
# Produce a violation if any tests have their result set to "ERROR".
# The result type is configurable by the "erred_tests_results" key in the rule data.
# custom:
# short_name: no_erred_tests
# failure_msg: "Test %q erred"
# solution: >-
# There is a test that erred. Make sure that any task in the build pipeline
# with a result named 'TEST_OUTPUT' does not err.
# collections:
# - redhat
# depends_on:
# - test.test_data_found
#
deny contains result if {
some test in resulted_in(lib.rule_data("erred_tests_results"))
result := lib.result_helper_with_term(rego.metadata.chain(), [test], test)
}

# METADATA
# title: No tests were skipped
# description: >-
# Produce a warning if any tests have their result set to "SKIPPED".
# Produce a violation if any tests have their result set to "SKIPPED".
# A skipped result means a pre-requirement for executing the test was not met, e.g. a
# license key for executing a scanner was not provided.
# The result type is configurable by the "skipped_tests_results" key in the rule data.
# custom:
# short_name: no_skipped_tests
# failure_msg: "Test %q was skipped"
Expand All @@ -128,8 +175,9 @@ deny contains result if {
# - redhat
# depends_on:
# - test.test_data_found
# effective_on: 2023-12-08T00:00:00Z
#
warn contains result if {
deny contains result if {
some test in resulted_in(lib.rule_data("skipped_tests_results"))
result := lib.result_helper_with_term(rego.metadata.chain(), [test], test)
}
Expand All @@ -138,6 +186,7 @@ warn contains result if {
# title: No tests produced warnings
# description: >-
# Produce a warning if any tests have their result set to "WARNING".
# The result type is configurable by the "warned_tests_results" key in the rule data.
# custom:
# short_name: no_test_warnings
# failure_msg: "Test %q returned a warning"
Expand Down
99 changes: 59 additions & 40 deletions policy/release/test_test.rego
Original file line number Diff line number Diff line change
Expand Up @@ -92,18 +92,37 @@ test_failure_data {
),
lib_test.mock_slsav1_attestation_with_tasks([slsav1_task]),
]

lib.assert_empty(test.warn) with input.attestations as attestations
lib.assert_equal_results(test.deny, {
{
"code": "test.required_tests_passed",
"msg": "Test \"failed_1\" did not complete successfully",
"code": "test.no_failed_tests",
"msg": "Test \"failed_1\" failed",
"term": "failed_1",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"task1\" did not complete successfully",
"code": "test.no_failed_tests",
"msg": "Test \"task1\" failed",
"term": "task1",
},
}) with input.attestations as attestations

# Failed informative tests cause warnings, not violations
lib.assert_empty(test.deny) with input.attestations as attestations
with data.rule_data.informative_tests as ["task1", "failed_1"]
lib.assert_equal_results(test.warn, {
{
"code": "test.no_failed_informative_tests",
"msg": "Informative test \"failed_1\" failed",
"term": "failed_1",
},
{
"code": "test.no_failed_informative_tests",
"msg": "Informative test \"task1\" failed",
"term": "task1",
},
}) with input.attestations as attestations
with data.rule_data.informative_tests as ["task1", "failed_1"]
}

mock_an_errored_test := lib_test.att_mock_helper_ref(
Expand All @@ -126,13 +145,13 @@ test_error_data {
]
lib.assert_equal_results(test.deny, {
{
"code": "test.required_tests_passed",
"msg": "Test \"errored_1\" did not complete successfully",
"code": "test.no_erred_tests",
"msg": "Test \"errored_1\" erred",
"term": "errored_1",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"errored_2\" did not complete successfully",
"code": "test.no_erred_tests",
"msg": "Test \"errored_2\" erred",
"term": "errored_2",
},
}) with input.attestations as attestations
Expand All @@ -157,29 +176,29 @@ test_mix_data {
]
lib.assert_equal_results(test.deny, {
{
"code": "test.required_tests_passed",
"msg": "Test \"failed_1\" did not complete successfully",
"code": "test.no_failed_tests",
"msg": "Test \"failed_1\" failed",
"term": "failed_1",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"errored_1\" did not complete successfully",
"code": "test.no_erred_tests",
"msg": "Test \"errored_1\" erred",
"term": "errored_1",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"failed_2\" did not complete successfully",
"code": "test.no_failed_tests",
"msg": "Test \"failed_2\" failed",
"term": "failed_2",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"errored_2\" did not complete successfully",
"code": "test.no_erred_tests",
"msg": "Test \"errored_2\" erred",
"term": "errored_2",
},
}) with input.attestations as attestations
}

test_skipped_is_not_deny {
test_skipped_is_not_warning {
attestations := [
lib_test.att_mock_helper_ref(
lib.task_test_result_name,
Expand All @@ -191,10 +210,10 @@ test_skipped_is_not_deny {
"value": {"result": "SKIPPED"},
}])]),
]
lib.assert_empty(test.deny) with input.attestations as attestations
lib.assert_empty(test.warn) with input.attestations as attestations
}

test_skipped_is_warning {
test_skipped_is_deny {
attestations := [
lib_test.att_mock_helper_ref(
lib.task_test_result_name,
Expand All @@ -206,7 +225,7 @@ test_skipped_is_warning {
"value": {"result": "SKIPPED"},
}])]),
]
lib.assert_equal_results(test.warn, {
lib.assert_equal_results(test.deny, {
{
"code": "test.no_skipped_tests",
"msg": "Test \"skipped_1\" was skipped",
Expand Down Expand Up @@ -287,38 +306,35 @@ test_mixed_statuses {

lib.assert_equal_results(test.deny, {
{
"code": "test.required_tests_passed",
"msg": "Test \"error_1\" did not complete successfully",
"code": "test.no_erred_tests",
"msg": "Test \"error_1\" erred",
"term": "error_1",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"error_2\" did not complete successfully",
"code": "test.no_erred_tests",
"msg": "Test \"error_2\" erred",
"term": "error_2",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"failure_1\" did not complete successfully",
"code": "test.no_failed_tests",
"msg": "Test \"failure_1\" failed",
"term": "failure_1",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"failure_2\" did not complete successfully",
"code": "test.no_failed_tests",
"msg": "Test \"failure_2\" failed",
"term": "failure_2",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"failure_20\" did not complete successfully",
"code": "test.no_failed_tests",
"msg": "Test \"failure_20\" failed",
"term": "failure_20",
},
{
"code": "test.required_tests_passed",
"msg": "Test \"error_20\" did not complete successfully",
"code": "test.no_erred_tests",
"msg": "Test \"error_20\" erred",
"term": "error_20",
},
}) with input.attestations as test_results

lib.assert_equal_results(test.warn, {
{
"code": "test.no_skipped_tests",
"msg": "Test \"skipped_1\" was skipped",
Expand All @@ -329,6 +345,14 @@ test_mixed_statuses {
"msg": "Test \"skipped_2\" was skipped",
"term": "skipped_2",
},
{
"code": "test.no_skipped_tests",
"msg": "Test \"skipped_20\" was skipped",
"term": "skipped_20",
},
}) with input.attestations as test_results

lib.assert_equal_results(test.warn, {
{
"code": "test.no_test_warnings",
"msg": "Test \"warning_1\" returned a warning",
Expand All @@ -344,11 +368,6 @@ test_mixed_statuses {
"msg": "Test \"warning_20\" returned a warning",
"term": "warning_20",
},
{
"code": "test.no_skipped_tests",
"msg": "Test \"skipped_20\" was skipped",
"term": "skipped_20",
},
}) with input.attestations as test_results
}

Expand Down

0 comments on commit c4603b8

Please sign in to comment.