diff --git a/dojo/finding/helper.py b/dojo/finding/helper.py index ba11567869b..b3c3031f241 100644 --- a/dojo/finding/helper.py +++ b/dojo/finding/helper.py @@ -764,7 +764,7 @@ def bulk_clear_finding_m2m(finding_qs): Notes.objects.filter(id__in=note_ids).delete() -def bulk_delete_findings(finding_qs, chunk_size=1000): +def _bulk_delete_findings_internal(finding_qs, chunk_size=1000): """ Delete findings and all related objects efficiently. Including any related object in Dojo-Pro @@ -799,6 +799,29 @@ def bulk_delete_findings(finding_qs, chunk_size=1000): ) +def bulk_delete_findings(finding_qs, chunk_size=1000, cascade_root=None, product_id=None): + """ + Entry point; may delegate to Pro via settings.BULK_DELETE_FINDINGS_METHOD. + + cascade_root: optional dict describing the top-level object whose cascade triggered + this bulk delete (e.g. {"model": "dojo.engagement", "pk": 9}). Ignored by OSS + when no custom method is configured. + + product_id: optional owning product id for callers that already know scope (e.g. + async cascade delete). Ignored by OSS when no custom method is configured. + """ + from dojo.utils import get_custom_method # noqa: PLC0415 circular import + + if fn := get_custom_method("BULK_DELETE_FINDINGS_METHOD"): + return fn( + finding_qs, + chunk_size=chunk_size, + cascade_root=cascade_root, + product_id=product_id, + ) + return _bulk_delete_findings_internal(finding_qs, chunk_size=chunk_size) + + def fix_loop_duplicates(scope_qs=None): """Due to bugs in the past and even currently when under high parallel load, there can be transitive duplicates.""" """ i.e. A -> B -> C. This can lead to problems when deleting findingns, performing deduplication, etc """ diff --git a/dojo/utils.py b/dojo/utils.py index 0e528b67d5d..1a08256528e 100644 --- a/dojo/utils.py +++ b/dojo/utils.py @@ -2015,7 +2015,9 @@ def async_delete_task(obj, **kwargs): # Capture product reference before deletion for product grading at the end product = None with suppress(Product.DoesNotExist, Engagement.DoesNotExist, Test.DoesNotExist): - if isinstance(obj, Engagement): + if isinstance(obj, Product): + product = obj + elif isinstance(obj, Engagement): product = obj.product elif isinstance(obj, Test): product = obj.engagement.product @@ -2024,6 +2026,8 @@ def async_delete_task(obj, **kwargs): scope_field = FINDING_SCOPE_FILTERS.get(type(obj)) if scope_field: finding_qs = Finding.objects.filter(**{scope_field: obj}) + # cascade_root is some context we provide to the bulk_delete_findings function + cascade_root = {"model": obj._meta.label_lower, "pk": obj.pk} # Step 2: Prepare duplicate clusters (must happen before any deletion) # When CASCADE_DELETE=True, reconfigure_duplicate_cluster skips reconfiguration — @@ -2042,10 +2046,20 @@ def async_delete_task(obj, **kwargs): outside_count = outside_dupes_qs.count() if outside_count: logger.info("ASYNC_DELETE: Deleting %d outside-scope duplicates first", outside_count) - bulk_delete_findings(outside_dupes_qs, chunk_size=chunk_size) + bulk_delete_findings( + outside_dupes_qs, + chunk_size=chunk_size, + cascade_root=cascade_root, + product_id=product.pk if product else None, + ) # Step 4: Delete the main scope findings - bulk_delete_findings(finding_qs, chunk_size=chunk_size) + bulk_delete_findings( + finding_qs, + chunk_size=chunk_size, + cascade_root=cascade_root, + product_id=product.pk if product else None, + ) # Step 5: Delete all remaining related objects (Tests, Engagements, # Endpoints, etc.) via SQL cascade. Findings are already gone, so @@ -2061,8 +2075,9 @@ def async_delete_task(obj, **kwargs): # All children are already gone so this is a single-row DELETE. obj.delete() - # Step 7: Recalculate product grade once (not per-object) - if product: + # Step 7: Recalculate product grade once (Engagement/Test deletes only). Skip when the + # deleted object is the Product itself — it is removed in step 6 and grading is pointless. + if product and not isinstance(obj, Product): perform_product_grading(product) logger.info("ASYNC_DELETE: Successfully deleted %s: %s", obj_name, obj)