Skip to content

Commit

Permalink
Merge branch 'master' into openapi_runner
Browse files Browse the repository at this point in the history
  • Loading branch information
ayajbara committed Apr 13, 2022
2 parents ebbbe01 + 4c72288 commit 7cdf941
Show file tree
Hide file tree
Showing 37 changed files with 3,437 additions and 2,889 deletions.
2 changes: 1 addition & 1 deletion admissioncontroller/checkov-requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
checkov==2.0.1051
checkov==2.0.1058
2 changes: 1 addition & 1 deletion admissioncontroller/k8s/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ spec:
drop:
- ALL
- NET_RAW
image: bridgecrew/whorf@sha256:d3f459cba6a2cd325855de025b5be6e3b2efc79cc926da72ea8c31690d3044ec
image: bridgecrew/whorf@sha256:4eec51a5818b74e6d6a003e2b2c0f7a9e0d32b0f69e298427ed16bfb0245a657
resources:
limits:
cpu: "1"
Expand Down
3 changes: 3 additions & 0 deletions checkov/bicep/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,9 @@ def run(
if not self.context or not self.definitions:
file_paths = get_scannable_file_paths(root_folder=root_folder, files=files)

if not file_paths:
return report

self.definitions, self.definitions_raw, parsing_errors = Parser().get_files_definitions(file_paths)

report.add_parsing_errors(parsing_errors)
Expand Down
6 changes: 6 additions & 0 deletions checkov/common/output/record.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,13 @@
from checkov.common.models.enums import CheckResult
from checkov.common.typing import _CheckResult
from checkov.common.util.file_utils import convert_to_unix_path
from checkov.common.util.type_forcers import force_int

init(autoreset=True)

DEFAULT_SEVERITY = "none" # equivalent to a score of 0.0 in the CVSS v3.0 Ratings

OUTPUT_CODE_LINE_LIMIT = force_int(os.getenv('CHECKOV_OUTPUT_CODE_LINE_LIMIT')) or 50

class Record:
check_id = ""
Expand Down Expand Up @@ -118,6 +120,10 @@ def _code_line_string(code_block: List[Tuple[int, str]], colorized: bool = True)
color_codes = (Fore.WHITE if colorized else "", Fore.YELLOW if colorized else "")
last_line_number_len = len(str(code_block[-1][0]))

if len(code_block) >= OUTPUT_CODE_LINE_LIMIT:
return f'\t\t{color_codes[1]}Code lines for this resource are too many. ' \
f'Please use IDE of your choice to review the file.'

for line_num, line in code_block:
spaces = " " * (last_line_number_len - len(str(line_num)))
if line.lstrip().startswith("#"):
Expand Down
18 changes: 9 additions & 9 deletions checkov/common/parsers/yaml/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
logger = logging.getLogger(__name__)


def parse(filename: str) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | tuple[None, None]:
def parse(filename: str) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | None:
template = None
template_lines = None
try:
Expand All @@ -23,27 +23,27 @@ def parse(filename: str) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tu
if t and (isinstance(t, dict) or isinstance(t, list)):
return t, template_lines
else:
return None, None
return None
else:
return None, None
return None
except IOError as e:
if e.errno == 2:
logger.error('Template file not found: %s', filename)
return None, None
return None
elif e.errno == 21:
logger.error('Template references a directory, not a file: %s',
filename)
return None, None
return None
elif e.errno == 13:
logger.error('Permission denied when accessing template file: %s',
filename)
return None, None
return None
except UnicodeDecodeError:
logger.error('Cannot read file contents: %s', filename)
return None, None
return None
except YAMLError:
if filename.endswith(".yaml") or filename.endswith(".yml"):
logger.debug('Cannot read file contents: %s - is it a yaml?', filename)
return None, None
return None

return None, None
return None
1 change: 1 addition & 0 deletions checkov/common/runners/base_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ def set_external_data(
definitions: Optional[Dict[str, Dict[str, Any]]],
context: Optional[Dict[str, Dict[str, Any]]],
breadcrumbs: Optional[Dict[str, Dict[str, Any]]],
**kwargs
) -> None:
self.definitions = definitions
self.context = context
Expand Down
2 changes: 1 addition & 1 deletion checkov/common/runners/object_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def _load_files(
@abstractmethod
def _parse_file(
self, f: str
) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | tuple[None, None]:
) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | None:
raise Exception("parser should be imported by deriving class")

def run(
Expand Down
4 changes: 2 additions & 2 deletions checkov/example_runner/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,15 @@ def import_registry(self) -> BaseCheckRegistry:

def _parse_file(
self, f: str
) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | tuple[None, None]:
) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | None:
# EDIT" add conditional here to ensure this file is something we should parse.
# Below is this example for github actions
# as the file is always located in a predictable path
# There should always be a conditional otherwise you'll parse ALL files.
if ".github/workflows/" in os.path.abspath(f):
return super()._parse_file(f)

return None, None
return None


# An abstract function placeholder to determine the start and end lines.
Expand Down
2 changes: 2 additions & 0 deletions checkov/github/dal.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,8 @@ def get_organization_security(self):
}
}
""", variables={'org': self.org})
if not data:
return None
if org_security_schema.validate(data):
self._organization_security = data
return self._organization_security
Expand Down
7 changes: 5 additions & 2 deletions checkov/json_doc/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,11 @@ def import_registry(self) -> BaseCheckRegistry:
from checkov.json_doc.registry import registry
return registry

def _parse_file(self, f: str) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | tuple[None, None]:
content: tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | tuple[None, None] = parse(f)
def _parse_file(self, f: str) -> tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | None:
if not f.endswith(".json"):
return None

content: tuple[dict[str, Any] | list[dict[str, Any]], list[tuple[int, str]]] | None = parse(f)
return content

def get_start_end_lines(self, end: int, result_config: DictNode, start: int) -> tuple[int, int]:
Expand Down
83 changes: 83 additions & 0 deletions checkov/kubernetes/checks/resource/base_rbac_check.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
from checkov.kubernetes.checks.resource.base_spec_check import BaseK8Check
from checkov.common.models.enums import CheckCategories, CheckResult
from typing import Dict, Any, List


class RbacOperation():
"""
A collection of RBAC permissions that permit a certain operation within Kubernetes.
For example, the RbacOperation below denotes a write operation on admission webhooks.
control_webhooks = RbacOperation(
apigroups=["admissionregistration.k8s.io"],
verbs=["create", "update", "patch"],
resources=["mutatingwebhookconfigurations", "validatingwebhookconfigurations"])
Rules matching an apiGroup, verb and resource should be able to perform the operation.
"""
def __init__(self, apigroups: List[str], verbs: List[str],
resources: List[str]):
self.apigroups = apigroups
self.verbs = verbs
self.resources = resources


class BaseRbacK8sCheck(BaseK8Check):
"""
Base class for checks that evaluate RBAC permissions in Roles and ClusterRoles
"""
def __init__(self, name, id, supported_entities=None):
if supported_entities is None:
supported_entities = ["Role", "ClusterRole"]
categories = [CheckCategories.KUBERNETES]
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_entities)
# A role that grants *ALL* the RbacOperation in failing_operations fails this check
self.failing_operations: RbacOperation = []

def scan_spec_conf(self, conf):
rules = conf.get("rules")
if rules and isinstance(rules, list):
for operation in self.failing_operations:
# if one operation can't be found, check passes
if not any(self.rule_can(rule, operation) for rule in rules):
return CheckResult.PASSED
# all operations were found, therefore the check fails
return CheckResult.FAILED

return CheckResult.PASSED

# Check if a rule has an apigroup, verb, and resource specified in @operation
def rule_can(self, rule: Dict[str, Any], operation: RbacOperation) -> bool:
return self.apigroup_or_wildcard(rule, operation.apigroups) and \
self.verb_or_wildcard(rule, operation.verbs) and \
self.resource_or_wildcard(rule, operation.resources)

def apigroup_or_wildcard(self, rule: Dict[str, Any], apigroups: List[str]) -> bool:
return self.value_or_wildcard(rule, "apiGroups", apigroups)

def verb_or_wildcard(self, rule: Dict[str, Any], verbs: List[str]) -> bool:
return self.value_or_wildcard(rule, "verbs", verbs)

def resource_or_wildcard(self, rule: Dict[str, Any], resources: List[str]) -> bool:
if "resources" in rule:
for granted_resource in rule["resources"]:
if self.is_wildcard(granted_resource):
return True
for failing_resource in resources:
if granted_resource == failing_resource:
return True
# Check for '*/subresource' syntax
if "/" in failing_resource and "/" in granted_resource:
if granted_resource == "*/" + failing_resource.split("/")[1]:
return True
return False

# Check if rule has a key with a wildcard or a value from @value_list
def value_or_wildcard(self, rule: Dict[str, Any], key: str, value_list: List[str]) -> bool:
if key in rule:
for value in rule[key]:
if self.is_wildcard(value) or value in value_list:
return True
return False

# Check if value is a K8s RBAC wildcard
def is_wildcard(self, value: str) -> bool:
return value == "*" or value == "*/*"
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from checkov.kubernetes.checks.resource.base_rbac_check import BaseRbacK8sCheck, RbacOperation


class RbacApproveCertificateSigningRequests(BaseRbacK8sCheck):
def __init__(self):
name = "Minimize ClusterRoles that grant permissions to approve CertificateSigningRequests"
id = "CKV_K8S_156"
supported_entities = ["ClusterRole"]
super().__init__(name=name, id=id, supported_entities=supported_entities)

# See https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/
self.failing_operations = [
RbacOperation(
apigroups=["certificates.k8s.io"],
verbs=["update", "patch"],
resources=["certificatesigningrequests/approval"]
),
RbacOperation(
apigroups=["certificates.k8s.io"],
verbs=["approve"],
resources=["signers"]
),
]


check = RbacApproveCertificateSigningRequests()
20 changes: 20 additions & 0 deletions checkov/kubernetes/checks/resource/k8s/RbacControlWebhooks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from checkov.kubernetes.checks.resource.base_rbac_check import BaseRbacK8sCheck, RbacOperation


class RbacControlWebhooks(BaseRbacK8sCheck):
def __init__(self):
name = "Minimize ClusterRoles that grant control over validating or mutating admission webhook configurations"
id = "CKV_K8S_155"
supported_entities = ["ClusterRole"]
super().__init__(name=name, id=id, supported_entities=supported_entities)

self.failing_operations = [
RbacOperation(
apigroups=["admissionregistration.k8s.io"],
verbs=["create", "update", "patch"],
resources=["mutatingwebhookconfigurations", "validatingwebhookconfigurations"]
),
]


check = RbacControlWebhooks()
19 changes: 10 additions & 9 deletions checkov/kubernetes/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,9 @@ def __init__(

self.graph_registry = get_graph_checks_registry(self.check_type)
self.definitions_raw = {}
self.report_mutator_data = None

def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True, helmChart=None, reportMutatorData=None):
def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(), collect_skip_comments=True, helmChart=None):
report = Report(self.check_type)
if self.context is None or self.definitions is None:
if files or root_folder:
Expand All @@ -58,13 +59,13 @@ def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=R
self.graph_manager.save_graph(local_graph)
self.definitions = local_graph.definitions

report = self.check_definitions(root_folder, runner_filter, report, reportMutatorData=reportMutatorData, collect_skip_comments=collect_skip_comments, helmChart=helmChart)
graph_report = self.get_graph_checks_report(root_folder, runner_filter, helmChart=helmChart, reportMutatorData=reportMutatorData)
report = self.check_definitions(root_folder, runner_filter, report, collect_skip_comments=collect_skip_comments, helmChart=helmChart)
graph_report = self.get_graph_checks_report(root_folder, runner_filter, helmChart=helmChart)
merge_reports(report, graph_report)

return report

def check_definitions(self, root_folder, runner_filter, report, reportMutatorData, collect_skip_comments=True, helmChart=None,):
def check_definitions(self, root_folder, runner_filter, report, collect_skip_comments=True, helmChart=None,):
for k8_file in self.definitions.keys():
# There are a few cases here. If -f was used, there could be a leading / because it's an absolute path,
# or there will be no leading slash; root_folder will always be none.
Expand All @@ -88,17 +89,17 @@ def check_definitions(self, root_folder, runner_filter, report, reportMutatorDat
# TODO? - Variable Eval Message!
variable_evaluations = {}

report = self.mutateKubernetesResults(results, report, k8_file, k8_file_path, file_abs_path, entity_conf, variable_evaluations, reportMutatorData)
report = self.mutateKubernetesResults(results, report, k8_file, k8_file_path, file_abs_path, entity_conf, variable_evaluations)

return report

def get_graph_checks_report(self, root_folder: str, runner_filter: RunnerFilter, helmChart, reportMutatorData) -> Report:
def get_graph_checks_report(self, root_folder: str, runner_filter: RunnerFilter, helmChart) -> Report:
report = Report(self.check_type)
checks_results = self.run_graph_checks_results(runner_filter)
report = self.mutateKubernetesGraphResults(root_folder, runner_filter, report, checks_results, reportMutatorData=reportMutatorData)
report = self.mutateKubernetesGraphResults(root_folder, runner_filter, report, checks_results)
return report

def mutateKubernetesResults(self, results, report, k8_file=None, k8_file_path=None, file_abs_path=None, entity_conf=None, variable_evaluations=None, reportMutatorData=None):
def mutateKubernetesResults(self, results, report, k8_file=None, k8_file_path=None, file_abs_path=None, entity_conf=None, variable_evaluations=None):
# Moves report generation logic out of run() method in Runner class.
# Allows function overriding of a much smaller function than run() for other "child" frameworks such as Kustomize, Helm
# Where Kubernetes CHECKS are needed, but the specific file references are to another framework for the user output (or a mix of both).
Expand All @@ -117,7 +118,7 @@ def mutateKubernetesResults(self, results, report, k8_file=None, k8_file_path=No

return report

def mutateKubernetesGraphResults(self, root_folder: str, runner_filter: RunnerFilter, report: Report, checks_results, reportMutatorData=None) -> Report:
def mutateKubernetesGraphResults(self, root_folder: str, runner_filter: RunnerFilter, report: Report, checks_results) -> Report:
# Moves report generation logic out of run() method in Runner class.
# Allows function overriding of a much smaller function than run() for other "child" frameworks such as Kustomize, Helm
# Where Kubernetes CHECKS are needed, but the specific file references are to another framework for the user output (or a mix of both).
Expand Down
Loading

0 comments on commit 7cdf941

Please sign in to comment.