diff --git a/docs/content/releases/os_upgrading/2.58.md b/docs/content/releases/os_upgrading/2.58.md new file mode 100644 index 00000000000..b235594609b --- /dev/null +++ b/docs/content/releases/os_upgrading/2.58.md @@ -0,0 +1,25 @@ +--- +title: 'Upgrading to DefectDojo Version 2.58.x' +toc_hide: true +weight: -20260504 +description: 'Breaking change: parsers no longer set Finding.service directly' +--- + +## Breaking Change: Parsers No Longer Set `Finding.service` + +Starting with DefectDojo 2.58.x, parsers no longer set the `service` field directly on findings. + +### Why this is a breaking change + +Whenever parsers set the `service` field on findings, this breaks `close_old_findings` functionality. + +The reason is that import and reimport differ in a way that import uses the `service` field, however reimport does not include a `service` value. The `close_old_findings` feature only closes findings that match the service value provided in the request. As a result, findings with a non-empty parser-populated service value are not closed. + +Also, if the application name changes, findings in the reimport report are no longer matched against existing findings. + +### Required actions + +- If your integrations relied on parser-populated `service` field, update your workflow to pass service explicitly at import/reimport time when needed. +- Review automation that depends on `close_old_findings` behavior and verify expected closure scope after upgrading. + +For more information, check the [Release Notes](https://github.com/DefectDojo/django-DefectDojo/releases/tag/2.58.0). diff --git a/dojo/db_migrations/0264_clear_service_for_affected_parsers.py b/dojo/db_migrations/0264_clear_service_for_affected_parsers.py new file mode 100644 index 00000000000..d1f5015c9c6 --- /dev/null +++ b/dojo/db_migrations/0264_clear_service_for_affected_parsers.py @@ -0,0 +1,95 @@ +import logging + +from django.db import migrations +from django.db.models import Q + +logger = logging.getLogger(__name__) + + +AFFECTED_PARSER_SCAN_TYPES = [ + "Trivy Scan", + "Trivy Operator Scan", + "Hydra Scan", + "JFrog Xray API Summary Artifact Scan", + "Orca Security Alerts", + "OpenReports", + "StackHawk HawkScan", +] + + +def clear_service_and_rehash_findings(apps, schema_editor): + """ + Clear parser-populated service values for affected parser scan types and + recompute hash_code. + + This migration only touches findings where: + - the finding belongs to an affected parser by test_type or scan_type + - service is set (not NULL and not empty) + """ + historical_finding = apps.get_model("dojo", "Finding") + + affected_ids = set() + for scan_type in AFFECTED_PARSER_SCAN_TYPES: + findings = ( + historical_finding.objects + .filter( + Q(test__test_type__name=scan_type) + | Q(test__scan_type=scan_type), + ) + .exclude(service__isnull=True) + .exclude(service="") + ) + count = findings.count() + if count: + logger.warning( + "Identified %d findings with parser-populated service for scan type '%s'", + count, + scan_type, + ) + affected_ids.update(findings.values_list("id", flat=True)) + + if not affected_ids: + logger.warning("No findings found for parser service cleanup migration") + return + + # Use live model here to access compute_hash_code() and save() behavior. + from dojo.models import Finding # noqa: PLC0415 + + migrated = 0 + for finding in ( + Finding.objects + .filter(id__in=affected_ids) + .select_related("test", "test__test_type") + .iterator(chunk_size=200) + ): + finding.service = None + finding.hash_code = finding.compute_hash_code() + finding.save( + dedupe_option=False, + rules_option=False, + product_grading_option=False, + issue_updater_option=False, + push_to_jira=False, + ) + migrated += 1 + + logger.warning( + "Parser service cleanup migration updated %d findings (service cleared, hash_code recomputed)", + migrated, + ) + + +def noop_reverse(apps, schema_editor): + # Intentionally irreversible: previous parser-populated service values are not recoverable. + pass + + +class Migration(migrations.Migration): + + dependencies = [ + ("dojo", "0263_language_type_unique_language"), + ] + + operations = [ + migrations.RunPython(clear_service_and_rehash_findings, noop_reverse), + ] diff --git a/dojo/tools/hydra/parser.py b/dojo/tools/hydra/parser.py index 143394fef4f..188073db8e0 100644 --- a/dojo/tools/hydra/parser.py +++ b/dojo/tools/hydra/parser.py @@ -91,7 +91,7 @@ def __extract_finding( + password, static_finding=False, dynamic_finding=True, - service=metadata.service_type, + component_name=metadata.service_type, ) if settings.V3_FEATURE_LOCATIONS: finding.unsaved_locations = [LocationData.url(host=host, port=port)] diff --git a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py index 2e8cba494a4..ca8eb988d7b 100644 --- a/dojo/tools/jfrog_xray_api_summary_artifact/parser.py +++ b/dojo/tools/jfrog_xray_api_summary_artifact/parser.py @@ -43,9 +43,9 @@ def get_items(self, tree, test): service = decode_service(artifact_general["name"]) item = get_item( node, - str(service), test, artifact.name, + str(service), artifact.version, artifact.sha256, ) @@ -56,9 +56,9 @@ def get_items(self, tree, test): # Retrieve the findings of the affected 1st level component (Artifact) def get_item( vulnerability, - service, test, artifact_name, + artifact_service, artifact_version, artifact_sha256, ): @@ -114,7 +114,6 @@ def get_item( finding = Finding( vuln_id_from_tool=vuln_id_from_tool, - service=service, title=vulnerability["summary"], cwe=cwe, cvssv3=cvssv3, @@ -126,7 +125,7 @@ def get_item( + vulnerability["description"], test=test, file_path=impact_paths[0], - component_name=artifact_name, + component_name=artifact_name or artifact_service, component_version=artifact_version, static_finding=True, dynamic_finding=False, diff --git a/dojo/tools/openreports/parser.py b/dojo/tools/openreports/parser.py index e222676fcea..3f1cd9f8ec7 100644 --- a/dojo/tools/openreports/parser.py +++ b/dojo/tools/openreports/parser.py @@ -19,6 +19,7 @@ DESCRIPTION_TEMPLATE = """{message} +**Service:** {service} **Category:** {category} **Policy:** {policy} **Result:** {result} @@ -218,6 +219,7 @@ def _create_finding_from_result(self, test, result, service_name, report_name, r # Create description description = DESCRIPTION_TEMPLATE.format( message=message, + service=service_name, category=category, policy=policy, result=result_status, @@ -250,7 +252,6 @@ def _create_finding_from_result(self, test, result, service_name, report_name, r mitigation=mitigation, component_name=pkg_name, component_version=installed_version, - service=service_name, active=active, verified=verified, static_finding=True, diff --git a/dojo/tools/orca_security/csv_parser.py b/dojo/tools/orca_security/csv_parser.py index 6ca3c5790e2..b62f74c78ec 100644 --- a/dojo/tools/orca_security/csv_parser.py +++ b/dojo/tools/orca_security/csv_parser.py @@ -84,7 +84,6 @@ def parse(self, content): severity_justification=build_severity_justification(orca_score_raw), static_finding=True, # CSPM scan data is static analysis dynamic_finding=False, - service=source or None, # Source identifies the cloud resource/service component_name=inventory_name or None, # Inventory is the specific resource date=parse_date(created_at), ) diff --git a/dojo/tools/orca_security/json_parser.py b/dojo/tools/orca_security/json_parser.py index 36b95362e9e..49a4202a405 100644 --- a/dojo/tools/orca_security/json_parser.py +++ b/dojo/tools/orca_security/json_parser.py @@ -89,7 +89,6 @@ def parse(self, content): severity_justification=build_severity_justification(orca_score), static_finding=True, # CSPM scan data is static analysis dynamic_finding=False, - service=source or None, # Source identifies the cloud resource/service component_name=inventory_name or None, # Inventory is the specific resource date=parse_date(created_at), ) diff --git a/dojo/tools/stackhawk/parser.py b/dojo/tools/stackhawk/parser.py index 04c52950612..f5a5d7c2ded 100644 --- a/dojo/tools/stackhawk/parser.py +++ b/dojo/tools/stackhawk/parser.py @@ -14,7 +14,6 @@ def __init__(self, completed_scan): self.component_version = completed_scan["scan"]["env"] self.static_finding = False self.dynamic_finding = True - self.service = completed_scan["scan"]["application"] class StackHawkParser: @@ -106,7 +105,6 @@ def __extract_finding( dynamic_finding=metadata.dynamic_finding, vuln_id_from_tool=raw_finding["pluginId"], nb_occurences=raw_finding["totalCount"], - service=metadata.service, false_p=are_all_endpoints_false_positive, risk_accepted=are_all_endpoints_risk_accepted, ) diff --git a/dojo/tools/trivy/parser.py b/dojo/tools/trivy/parser.py index fa41dd6c350..ac4daa60030 100644 --- a/dojo/tools/trivy/parser.py +++ b/dojo/tools/trivy/parser.py @@ -33,28 +33,34 @@ **Type:** {type} **Fixed version:** {fixed_version} -{description_text} +{service_text}{description_text} """ MISC_DESCRIPTION_TEMPLATE = """**Target:** {target} **Type:** {type} -{description} +{service_text}{description} {message} """ SECRET_DESCRIPTION_TEMPLATE = """{title} **Category:** {category} -**Match:** {match} +{service_text}**Match:** {match} """ # noqa: S105 LICENSE_DESCRIPTION_TEMPLATE = """{title} **Category:** {category} -**Package:** {package} +{service_text}**Package:** {package} """ class TrivyParser: + @staticmethod + def _service_text(service_name): + if service_name: + return f"**Service:** {service_name}\n" + return "" + def get_scan_types(self): return ["Trivy Scan"] @@ -319,6 +325,7 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): title=vuln.get("Title", ""), target=target, type=vul_type, + service_text=self._service_text(service_name), fixed_version=mitigation, description_text=vuln.get("Description", ""), ) @@ -341,7 +348,6 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): static_finding=True, dynamic_finding=False, fix_available=fix_available, - service=service_name, **status_fields, ) finding.unsaved_tags = [vul_type, target_class] @@ -377,6 +383,7 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): description = MISC_DESCRIPTION_TEMPLATE.format( target=target_target, type=misc_type, + service_text=self._service_text(service_name), description=misc_description, message=misc_message, ) @@ -400,7 +407,6 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): fix_available=True, static_finding=True, dynamic_finding=False, - service=service_name, ) if misc_avdid: finding.unsaved_vulnerability_ids = [] @@ -420,6 +426,7 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): description = SECRET_DESCRIPTION_TEMPLATE.format( title=secret_title, category=secret_category, + service_text=self._service_text(service_name), match=secret_match, ) severity = TRIVY_SEVERITIES[secret_severity] @@ -434,7 +441,6 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): static_finding=True, dynamic_finding=False, fix_available=True, - service=service_name, ) finding.unsaved_tags = [target_class] items.append(finding) @@ -453,6 +459,7 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): description = LICENSE_DESCRIPTION_TEMPLATE.format( title=license_name, category=license_category, + service_text=self._service_text(service_name), package=license_pkgname, ) severity = TRIVY_SEVERITIES[license_severity] @@ -468,7 +475,6 @@ def get_result_items(self, test, results, service_name=None, artifact_name=""): static_finding=True, dynamic_finding=False, fix_available=True, - service=service_name, ) finding.unsaved_tags = [target_class] items.append(finding) diff --git a/dojo/tools/trivy_operator/checks_handler.py b/dojo/tools/trivy_operator/checks_handler.py index 506058ef6e9..a13e3b97b19 100644 --- a/dojo/tools/trivy_operator/checks_handler.py +++ b/dojo/tools/trivy_operator/checks_handler.py @@ -17,9 +17,6 @@ def handle_checks(self, labels, checks, test): resource_kind = labels.get("trivy-operator.resource.kind", "") resource_name = labels.get("trivy-operator.resource.name", "") container_name = labels.get("trivy-operator.container.name", "") - service = f"{resource_namespace}/{resource_kind}/{resource_name}" - if container_name: - service = f"{service}/{container_name}" for check in checks: check_title = check.get("title") check_severity = TRIVY_SEVERITIES[check.get("severity")] @@ -55,7 +52,6 @@ def handle_checks(self, labels, checks, test): description=check_description, static_finding=True, dynamic_finding=False, - service=service, fix_available=True, ) finding_tags = [resource_namespace, check_category] diff --git a/dojo/tools/trivy_operator/secrets_handler.py b/dojo/tools/trivy_operator/secrets_handler.py index e0a1b1996f5..bf89407df29 100644 --- a/dojo/tools/trivy_operator/secrets_handler.py +++ b/dojo/tools/trivy_operator/secrets_handler.py @@ -21,9 +21,6 @@ def handle_secrets(self, labels, secrets, test): resource_kind = labels.get("trivy-operator.resource.kind", "") resource_name = labels.get("trivy-operator.resource.name", "") container_name = labels.get("trivy-operator.container.name", "") - service = f"{resource_namespace}/{resource_kind}/{resource_name}" - if container_name: - service = f"{service}/{container_name}" for secret in secrets: secret_title = secret.get("title") secret_category = secret.get("category") @@ -52,7 +49,6 @@ def handle_secrets(self, labels, secrets, test): file_path=secret_target, static_finding=True, dynamic_finding=False, - service=service, fix_available=True, ) if resource_namespace: diff --git a/dojo/tools/trivy_operator/vulnerability_handler.py b/dojo/tools/trivy_operator/vulnerability_handler.py index 32ed7baafad..2e836e992ab 100644 --- a/dojo/tools/trivy_operator/vulnerability_handler.py +++ b/dojo/tools/trivy_operator/vulnerability_handler.py @@ -24,9 +24,6 @@ def handle_vulns(self, labels, vulnerabilities, test): resource_kind = labels.get("trivy-operator.resource.kind", "") resource_name = labels.get("trivy-operator.resource.name", "") container_name = labels.get("trivy-operator.container.name", "") - service = f"{resource_namespace}/{resource_kind}/{resource_name}" - if container_name: - service = f"{service}/{container_name}" for vulnerability in vulnerabilities: vuln_id = vulnerability.get("vulnerabilityID", "0") severity = TRIVY_SEVERITIES[vulnerability.get("severity")] @@ -92,7 +89,6 @@ def handle_vulns(self, labels, vulnerabilities, test): description=description, static_finding=True, dynamic_finding=False, - service=service, file_path=file_path, publish_date=publish_date, fix_available=fix_available, diff --git a/unittests/test_importers_importer.py b/unittests/test_importers_importer.py index 6d5b4024a8c..ca6cb2a58f8 100644 --- a/unittests/test_importers_importer.py +++ b/unittests/test_importers_importer.py @@ -1,5 +1,7 @@ +import ast import logging import uuid +from pathlib import Path from unittest.mock import patch from django.core.exceptions import ValidationError @@ -933,3 +935,62 @@ def test_change_vulnerability_ids_on_reimport(self): vuln_ids = list(Vulnerability_Id.objects.filter(finding=finding).values_list("vulnerability_id", flat=True)) self.assertEqual(set(new_vulnerability_ids), set(vuln_ids)) finding.delete() + + +class TestParserPolicy(DojoTestCase): + @staticmethod + def _iter_parser_files(tools_root): + yield from tools_root.rglob("parser.py") + + @staticmethod + def _finding_service_offenders(repo_root, parser_file): + try: + source = parser_file.read_text(encoding="utf-8") + tree = ast.parse(source) + except (OSError, SyntaxError): + return [] + + offenders = [] + for node in ast.walk(tree): + if not isinstance(node, ast.Call): + continue + + func = node.func + is_finding_call = ( + isinstance(func, ast.Name) and func.id == "Finding" + ) or ( + isinstance(func, ast.Attribute) and func.attr == "Finding" + ) + + if not is_finding_call: + continue + + has_service_kwarg = any(keyword.arg == "service" for keyword in node.keywords if keyword.arg) + if has_service_kwarg: + rel_path = parser_file.relative_to(repo_root) + offenders.append(f"{rel_path}:{node.lineno}") + + return offenders + + def test_parsers_must_not_set_service_on_finding_directly(self): + """ + Policy test: parser implementations must not set `Finding.service` directly. + + Rationale: + - `service` should be controlled via import/reimport options and not parser-specific mapping. + - Direct parser assignment leads to inconsistent close-old-findings and dedupe behavior. + """ + repo_root = Path(__file__).resolve().parents[1] + tools_root = repo_root / "dojo" / "tools" + + offenders = [] + + for parser_file in self._iter_parser_files(tools_root): + offenders.extend(self._finding_service_offenders(repo_root, parser_file)) + + self.assertEqual( + [], + offenders, + "Parser must not set Finding.service directly. Offenders:\n" + + "\n".join(offenders), + ) diff --git a/unittests/test_importers_performance.py b/unittests/test_importers_performance.py index 1a9c9fc137d..a7aafcd58b4 100644 --- a/unittests/test_importers_performance.py +++ b/unittests/test_importers_performance.py @@ -201,7 +201,6 @@ def _import_reimport_performance( "verified": True, "sync": True, "scan_type": scan_type, - "service": "Secured Application", "tags": ["performance-test-reimport", "reimport-tag-in-param", "reimport-go-faster"], "apply_tags_to_findings": True, } @@ -231,7 +230,6 @@ def _import_reimport_performance( "verified": True, "sync": True, "scan_type": scan_type, - "service": "Secured Application", } reimporter = DefaultReImporter(**reimport_options) test, _, _len_new_findings, _len_closed_findings, _, _, _ = reimporter.process_scan(scan) @@ -259,9 +257,6 @@ def _import_reimport_performance( "verified": True, "sync": True, "scan_type": scan_type, - # StackHawk parser sets the service field causing close old findings to fail if we do not specify the service field - # This is a big problem that needs fixing. Parsers should not set the service field. - "service": "Secured Application", "close_old_findings": close_old_findings4, } reimporter = DefaultReImporter(**reimport_options) diff --git a/unittests/tools/test_hydra_parser.py b/unittests/tools/test_hydra_parser.py index a0b1fd61fc3..319738d6965 100644 --- a/unittests/tools/test_hydra_parser.py +++ b/unittests/tools/test_hydra_parser.py @@ -128,6 +128,7 @@ def __assertFindingEquals( actual_finding.description) self.assertFalse(actual_finding.static_finding) self.assertTrue(actual_finding.dynamic_finding) + self.assertEqual(actual_finding.component_name, "http-post-form") # The following fields should be not be set from this parser. self.assertIsNone(actual_finding.unique_id_from_tool) self.assertEqual(self.get_unsaved_locations(actual_finding)[0].host, finding_url) diff --git a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py index 2307a4c18bd..12fbeb9c940 100644 --- a/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py +++ b/unittests/tools/test_jfrog_xray_api_summary_artifact_parser.py @@ -33,6 +33,7 @@ def test_parse_file_with_one_vuln(self): self.assertEqual(" code.", item.description[-6:]) self.assertIsNone(item.mitigation) self.assertEqual("artifact1", item.component_name) + self.assertIsNone(item.service) self.assertIsNotNone(item.tags) self.assertEqual("1.0", item.component_version) self.assertEqual("artifact_path/artifact1/1.0/", item.file_path[:28]) diff --git a/unittests/tools/test_openreports_parser.py b/unittests/tools/test_openreports_parser.py index 480722b9152..6be5b8883f5 100644 --- a/unittests/tools/test_openreports_parser.py +++ b/unittests/tools/test_openreports_parser.py @@ -28,7 +28,8 @@ def test_single_report(self): self.assertEqual("3.5.2-r1", finding1.component_version) self.assertEqual("Upgrade to version: 3.5.4-r0", finding1.mitigation) self.assertEqual("https://avd.aquasec.com/nvd/cve-2025-9232", finding1.references) - self.assertEqual("test/Deployment/test-app", finding1.service) + self.assertIsNone(finding1.service) + self.assertIn("**Service:** test/Deployment/test-app", finding1.description) self.assertTrue(finding1.active) self.assertTrue(finding1.verified) self.assertTrue(finding1.fix_available) @@ -47,7 +48,8 @@ def test_single_report(self): self.assertEqual("v1.24.4", finding2.component_version) self.assertEqual("Upgrade to version: 1.23.12, 1.24.6", finding2.mitigation) self.assertEqual("https://avd.aquasec.com/nvd/cve-2025-47907", finding2.references) - self.assertEqual("test/Deployment/test-app", finding2.service) + self.assertIsNone(finding2.service) + self.assertIn("**Service:** test/Deployment/test-app", finding2.description) self.assertTrue(finding2.active) self.assertTrue(finding2.verified) self.assertTrue(finding2.fix_available) @@ -63,7 +65,8 @@ def test_single_report(self): self.assertEqual("N/A", finding3.component_version) self.assertEqual("Upgrade to version: Configure proper security headers", finding3.mitigation) self.assertEqual("https://www.cisecurity.org/benchmark/docker", finding3.references) - self.assertEqual("test/Deployment/test-app", finding3.service) + self.assertIsNone(finding3.service) + self.assertIn("**Service:** test/Deployment/test-app", finding3.description) self.assertTrue(finding3.active) self.assertTrue(finding3.verified) self.assertTrue(finding3.fix_available) @@ -80,8 +83,11 @@ def test_list_format(self): findings = parser.get_findings(test_file, Test()) self.assertEqual(len(findings), 3) - # Verify findings from different reports have different services - services = {finding.service for finding in findings} + # Verify findings from different reports have different service contexts in descriptions + services = { + finding.description.split("**Service:** ")[1].split("\n", 1)[0] + for finding in findings if "**Service:** " in finding.description + } self.assertEqual(len(services), 2) self.assertIn("test/Deployment/app1", services) self.assertIn("test/Deployment/app2", services) diff --git a/unittests/tools/test_orca_security_parser.py b/unittests/tools/test_orca_security_parser.py index 95d3ac9c472..8ca57f6456f 100644 --- a/unittests/tools/test_orca_security_parser.py +++ b/unittests/tools/test_orca_security_parser.py @@ -25,7 +25,7 @@ def test_parse_csv_one_finding(self): self.assertTrue(finding.static_finding) self.assertFalse(finding.dynamic_finding) self.assertEqual("TestRole_abc123", finding.component_name) - self.assertEqual("TestRole_abc123", finding.service) + self.assertIsNone(finding.service) self.assertEqual("OrcaScore: 5.1", finding.severity_justification) self.assertIn("IAM misconfigurations", finding.description) self.assertEqual(["CSPM", "source: Orca Scan"], finding.unsaved_tags) @@ -69,7 +69,7 @@ def test_parse_json_one_finding(self): self.assertTrue(finding.static_finding) self.assertFalse(finding.dynamic_finding) self.assertEqual("TestRole_abc123", finding.component_name) - self.assertEqual("TestRole_abc123", finding.service) + self.assertIsNone(finding.service) self.assertEqual("OrcaScore: 5.1", finding.severity_justification) self.assertIn("IAM misconfigurations", finding.description) self.assertEqual(["CSPM", "source: Orca Scan"], finding.unsaved_tags) diff --git a/unittests/tools/test_stackhawk_parser.py b/unittests/tools/test_stackhawk_parser.py index 02281d151c6..0d67141c9d3 100644 --- a/unittests/tools/test_stackhawk_parser.py +++ b/unittests/tools/test_stackhawk_parser.py @@ -243,7 +243,7 @@ def __assertFindingEquals( self.assertTrue(actual_finding.dynamic_finding) self.assertEqual(finding_id, actual_finding.vuln_id_from_tool) self.assertEqual(count, actual_finding.nb_occurences) - self.assertEqual(application_name, actual_finding.service) + self.assertIsNone(actual_finding.service) self.assertEqual(false_positive, actual_finding.false_p) self.assertEqual(risk_accepted, actual_finding.risk_accepted) # The following fields should be not be set from this parser. diff --git a/unittests/tools/test_trivy_parser.py b/unittests/tools/test_trivy_parser.py index f43b079ea18..0a06a66722d 100644 --- a/unittests/tools/test_trivy_parser.py +++ b/unittests/tools/test_trivy_parser.py @@ -118,6 +118,7 @@ def test_kubernetes(self): **Type:** debian **Fixed version:** 1.8.2.2 +**Service:** default / Deployment / redis-follower APT had several integer overflows and underflows while parsing .deb packages, aka GHSL-2020-168 GHSL-2020-169, in files apt-pkg/contrib/extracttar.cc, apt-pkg/deb/debfile.cc, and apt-pkg/contrib/arfile.cc. This issue affects: apt 1.2.32ubuntu0 versions prior to 1.2.32ubuntu0.2; 1.6.12ubuntu0 versions prior to 1.6.12ubuntu0.2; 2.0.2ubuntu0 versions prior to 2.0.2ubuntu0.2; 2.1.10ubuntu0 versions prior to 2.1.10ubuntu0.1; """ self.assertEqual(description, finding.description) @@ -127,7 +128,7 @@ def test_kubernetes(self): self.assertEqual(["debian", "os-pkgs"], finding.unsaved_tags) self.assertEqual("apt", finding.component_name) self.assertEqual("1.8.2.1", finding.component_version) - self.assertEqual("default / Deployment / redis-follower", finding.service) + self.assertIsNone(finding.service) self.assertEqual(finding.file_path, "gcr.io/google_samples/gb-redis-follower:v2 (debian 10.4)") finding = findings[5] self.assertEqual("CVE-2020-27350 apt 1.8.2.1", finding.title) @@ -137,6 +138,7 @@ def test_kubernetes(self): **Type:** debian **Fixed version:** 1.8.2.2 +**Service:** default / Deployment / redis-leader APT had several integer overflows and underflows while parsing .deb packages, aka GHSL-2020-168 GHSL-2020-169, in files apt-pkg/contrib/extracttar.cc, apt-pkg/deb/debfile.cc, and apt-pkg/contrib/arfile.cc. This issue affects: apt 1.2.32ubuntu0 versions prior to 1.2.32ubuntu0.2; 1.6.12ubuntu0 versions prior to 1.6.12ubuntu0.2; 2.0.2ubuntu0 versions prior to 2.0.2ubuntu0.2; 2.1.10ubuntu0 versions prior to 2.1.10ubuntu0.1; """ self.assertEqual(description, finding.description) @@ -146,13 +148,14 @@ def test_kubernetes(self): self.assertEqual(["debian", "os-pkgs"], finding.unsaved_tags) self.assertEqual("apt", finding.component_name) self.assertEqual("1.8.2.1", finding.component_version) - self.assertEqual("default / Deployment / redis-leader", finding.service) + self.assertIsNone(finding.service) finding = findings[10] self.assertEqual("KSV001 - Process can elevate its own privileges", finding.title) self.assertEqual("Medium", finding.severity) description = """**Target:** Deployment/redis-follower **Type:** Kubernetes Security Check +**Service:** default / Deployment / redis-follower A program inside the container can elevate its own privileges and run as root, which might give the program control over the container and node. Container 'follower' of Deployment 'redis-follower' should set 'securityContext.allowPrivilegeEscalation' to false Number Content @@ -174,7 +177,7 @@ def test_kubernetes(self): self.assertEqual(["kubernetes", "config"], finding.unsaved_tags) self.assertIsNone(finding.component_name) self.assertIsNone(finding.component_version) - self.assertEqual("default / Deployment / redis-follower", finding.service) + self.assertIsNone(finding.service) def test_license_scheme(self): with sample_path("license_scheme.json").open(encoding="utf-8") as test_file: