Skip to content

Commit

Permalink
Merge pull request #9775 from DefectDojo/release/2.32.2
Browse files Browse the repository at this point in the history
Release: Merge release into master from: release/2.32.2
  • Loading branch information
Maffooch authored Mar 18, 2024
2 parents 1f450c2 + d1eebce commit c182e9c
Show file tree
Hide file tree
Showing 18 changed files with 141 additions and 35 deletions.
7 changes: 6 additions & 1 deletion .dryrunsecurity.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
---
sensitiveCodepaths:
- 'dojo/object/*.py' # FIXME
- 'dojo/object/urls.py'
- 'dojo/object/views.py'
- 'dojo/announcement/*.py'
- 'dojo/api_v2/*.py'
- 'dojo/api_v2/**/*.py'
Expand Down Expand Up @@ -62,6 +63,10 @@ allowedAuthors:
- cneill
- Maffooch
- blakeowens
- kiblik
- dsever
- dogboat
- FelixHernandez
notificationList:
- '@mtesauro'
- '@grendel513'
2 changes: 0 additions & 2 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ ignore =
C901
# Multiple statements on one line
E704
# Assign a lambda expression
E731
# Bare except
E722
# Local variable
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ruff.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,4 @@ jobs:
run: pip install -r requirements-lint.txt

- name: Run Ruff Linter
run: ruff .
run: ruff check .
2 changes: 1 addition & 1 deletion components/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "defectdojo",
"version": "2.32.1",
"version": "2.32.2",
"license" : "BSD-3-Clause",
"private": true,
"dependencies": {
Expand Down
2 changes: 1 addition & 1 deletion dojo/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,6 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app # noqa: F401

__version__ = '2.32.1'
__version__ = '2.32.2'
__url__ = 'https://github.com/DefectDojo/django-DefectDojo'
__docs__ = 'https://documentation.defectdojo.com'
23 changes: 23 additions & 0 deletions dojo/api_v2/serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1717,6 +1717,17 @@ def get_display_status(self, obj) -> str:

# Overriding this to push add Push to JIRA functionality
def update(self, instance, validated_data):
# cvssv3 handling cvssv3 vector takes precedence,
# then cvssv3_score and finally severity
if validated_data.get("cvssv3"):
validated_data["cvssv3_score"] = None
validated_data["severity"] = ""
elif validated_data.get("cvssv3_score"):
validated_data["severity"] = ""
elif validated_data.get("severity"):
validated_data["cvssv3"] = None
validated_data["cvssv3_score"] = None

# remove tags from validated data and store them seperately
to_be_tagged, validated_data = self._pop_tags(validated_data)

Expand Down Expand Up @@ -2162,6 +2173,10 @@ class ImportScanSerializer(serializers.Serializer):
help_text="If set to True, the tags will be applied to the findings",
required=False,
)
apply_tags_to_endpoints = serializers.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
required=False,
)

def save(self, push_to_jira=False):
data = self.validated_data
Expand All @@ -2181,6 +2196,7 @@ def save(self, push_to_jira=False):
api_scan_configuration = data.get("api_scan_configuration", None)
service = data.get("service", None)
apply_tags_to_findings = data.get("apply_tags_to_findings", False)
apply_tags_to_endpoints = data.get("apply_tags_to_endpoints", False)
source_code_management_uri = data.get(
"source_code_management_uri", None
)
Expand Down Expand Up @@ -2274,6 +2290,7 @@ def save(self, push_to_jira=False):
title=test_title,
create_finding_groups_for_all_findings=create_finding_groups_for_all_findings,
apply_tags_to_findings=apply_tags_to_findings,
apply_tags_to_endpoints=apply_tags_to_endpoints,
)

if test:
Expand Down Expand Up @@ -2446,6 +2463,10 @@ class ReImportScanSerializer(TaggitSerializer, serializers.Serializer):
help_text="If set to True, the tags will be applied to the findings",
required=False
)
apply_tags_to_endpoints = serializers.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
required=False,
)

def save(self, push_to_jira=False):
logger.debug("push_to_jira: %s", push_to_jira)
Expand All @@ -2459,6 +2480,7 @@ def save(self, push_to_jira=False):
"close_old_findings_product_scope"
)
apply_tags_to_findings = data.get("apply_tags_to_findings", False)
apply_tags_to_endpoints = data.get("apply_tags_to_endpoints", False)
do_not_reactivate = data.get("do_not_reactivate", False)
version = data.get("version", None)
build_id = data.get("build_id", None)
Expand Down Expand Up @@ -2560,6 +2582,7 @@ def save(self, push_to_jira=False):
do_not_reactivate=do_not_reactivate,
create_finding_groups_for_all_findings=create_finding_groups_for_all_findings,
apply_tags_to_findings=apply_tags_to_findings,
apply_tags_to_endpoints=apply_tags_to_endpoints,
)

if test_import:
Expand Down
19 changes: 19 additions & 0 deletions dojo/db_migrations/0204_alter_finding_cvssv3_score.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Generated by Django 4.1.13 on 2024-03-14 17:18

import django.core.validators
from django.db import migrations, models


class Migration(migrations.Migration):

dependencies = [
('dojo', '0203_alter_finding_options_finding_epss_percentile_and_more'),
]

operations = [
migrations.AlterField(
model_name='finding',
name='cvssv3_score',
field=models.FloatField(blank=True, help_text='Numerical CVSSv3 score for the vulnerability. If the vector is given, the score is updated while saving the finding. The value must be between 0-10.', null=True, validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(10.0)], verbose_name='CVSSv3 score'),
),
]
3 changes: 2 additions & 1 deletion dojo/engagement/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -738,6 +738,7 @@ def post(self, request, eid=None, pid=None):
service = form.cleaned_data.get('service', None)
close_old_findings = form.cleaned_data.get('close_old_findings', None)
apply_tags_to_findings = form.cleaned_data.get('apply_tags_to_findings', False)
apply_tags_to_endpoints = form.cleaned_data.get('apply_tags_to_endpoints', False)
# close_old_findings_prodct_scope is a modifier of close_old_findings.
# If it is selected, close_old_findings should also be selected.
close_old_findings_product_scope = form.cleaned_data.get('close_old_findings_product_scope', None)
Expand Down Expand Up @@ -804,7 +805,7 @@ def post(self, request, eid=None, pid=None):
minimum_severity=minimum_severity, endpoints_to_add=list(form.cleaned_data['endpoints']) + added_endpoints, scan_date=scan_date,
version=version, branch_tag=branch_tag, build_id=build_id, commit_hash=commit_hash, push_to_jira=push_to_jira,
close_old_findings=close_old_findings, close_old_findings_product_scope=close_old_findings_product_scope, group_by=group_by, api_scan_configuration=api_scan_configuration, service=service,
create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, apply_tags_to_findings=apply_tags_to_findings)
create_finding_groups_for_all_findings=create_finding_groups_for_all_findings, apply_tags_to_findings=apply_tags_to_findings, apply_tags_to_endpoints=apply_tags_to_endpoints)

message = f'{scan_type} processed a total of {finding_count} findings'

Expand Down
13 changes: 13 additions & 0 deletions dojo/forms.py
Original file line number Diff line number Diff line change
Expand Up @@ -485,6 +485,12 @@ class ImportScanForm(forms.Form):
required=False,
initial=False
)
apply_tags_to_endpoints = forms.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
label="Apply Tags to Endpoints",
required=False,
initial=False
)

if is_finding_groups_enabled():
group_by = forms.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text='Choose an option to automatically group new findings by the chosen option.')
Expand Down Expand Up @@ -577,6 +583,12 @@ class ReImportScanForm(forms.Form):
required=False,
initial=False
)
apply_tags_to_endpoints = forms.BooleanField(
help_text="If set to True, the tags will be applied to the endpoints",
label="Apply Tags to Endpoints",
required=False,
initial=False
)

if is_finding_groups_enabled():
group_by = forms.ChoiceField(required=False, choices=Finding_Group.GROUP_BY_OPTIONS, help_text='Choose an option to automatically group new findings by the chosen option')
Expand Down Expand Up @@ -1190,6 +1202,7 @@ class FindingForm(forms.ModelForm):
cwe = forms.IntegerField(required=False)
vulnerability_ids = vulnerability_ids_field
cvssv3 = forms.CharField(max_length=117, required=False, widget=forms.TextInput(attrs={'class': 'cvsscalculator', 'data-toggle': 'dropdown', 'aria-haspopup': 'true', 'aria-expanded': 'false'}))
cvssv3_score = forms.FloatField(required=False, max_value=10.0, min_value=0.0)
description = forms.CharField(widget=forms.Textarea)
severity = forms.ChoiceField(
choices=SEVERITY_CHOICES,
Expand Down
9 changes: 8 additions & 1 deletion dojo/importers/importer/importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,8 @@ def close_old_findings(self, test, scan_date_time, user, push_to_jira=None, serv
def import_scan(self, scan, scan_type, engagement, lead, environment, active=None, verified=None, tags=None, minimum_severity=None,
user=None, endpoints_to_add=None, scan_date=None, version=None, branch_tag=None, build_id=None,
commit_hash=None, push_to_jira=None, close_old_findings=False, close_old_findings_product_scope=False,
group_by=None, api_scan_configuration=None, service=None, title=None, create_finding_groups_for_all_findings=True, apply_tags_to_findings=False):
group_by=None, api_scan_configuration=None, service=None, title=None, create_finding_groups_for_all_findings=True,
apply_tags_to_findings=False, apply_tags_to_endpoints=False):

logger.debug(f'IMPORT_SCAN: parameters: {locals()}')

Expand Down Expand Up @@ -389,6 +390,12 @@ def import_scan(self, scan, scan_type, engagement, lead, environment, active=Non
for tag in tags:
finding.tags.add(tag)

if apply_tags_to_endpoints and tags:
for finding in test_import.findings_affected.all():
for endpoint in finding.endpoints.all():
for tag in tags:
endpoint.tags.add(tag)

logger.debug('IMPORT_SCAN: Generating notifications')
notifications_helper.notify_test_created(test)
updated_count = len(new_findings) + len(closed_findings)
Expand Down
18 changes: 13 additions & 5 deletions dojo/importers/reimporter/reimporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ def process_parsed_findings(
scan_date=None,
do_not_reactivate=False,
create_finding_groups_for_all_findings=True,
apply_tags_to_findings=False,
**kwargs,
):

Expand Down Expand Up @@ -576,6 +575,7 @@ def reimport_scan(
do_not_reactivate=False,
create_finding_groups_for_all_findings=True,
apply_tags_to_findings=False,
apply_tags_to_endpoints=False,
):

logger.debug(f"REIMPORT_SCAN: parameters: {locals()}")
Expand Down Expand Up @@ -746,10 +746,18 @@ def reimport_scan(
reactivated_findings,
untouched_findings,
)
if apply_tags_to_findings and tags:
for finding in test_import.findings_affected.all():
for tag in tags:
finding.tags.add(tag)

if apply_tags_to_findings and tags:
for finding in test_import.findings_affected.all():
for tag in tags:
finding.tags.add(tag)

if apply_tags_to_endpoints and tags:
for finding in test_import.findings_affected.all():
for endpoint in finding.endpoints.all():
for tag in tags:
endpoint.tags.add(tag)

logger.debug("REIMPORT_SCAN: Generating notifications")

updated_count = (
Expand Down
34 changes: 28 additions & 6 deletions dojo/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1769,7 +1769,13 @@ def mitigated(self):

@property
def vulnerable(self):
return self.active_findings_count > 0
return Endpoint_Status.objects.filter(
endpoint=self,
mitigated=False,
false_positive=False,
out_of_scope=False,
risk_accepted=False
).count() > 0

@property
def findings_count(self):
Expand Down Expand Up @@ -2227,7 +2233,8 @@ class Finding(models.Model):
cvssv3_score = models.FloatField(null=True,
blank=True,
verbose_name=_('CVSSv3 score'),
help_text=_("Numerical CVSSv3 score for the vulnerability. If the vector is given, the score is updated while saving the finding"))
help_text=_("Numerical CVSSv3 score for the vulnerability. If the vector is given, the score is updated while saving the finding. The value must be between 0-10."),
validators=[MinValueValidator(0.0), MaxValueValidator(10.0)])

url = models.TextField(null=True,
blank=True,
Expand Down Expand Up @@ -2992,17 +2999,32 @@ def save(self, dedupe_option=True, rules_option=True, product_grading_option=Tru
from titlecase import titlecase
self.title = titlecase(self.title[:511])

# Assign the numerical severity for correct sorting order
self.numerical_severity = Finding.get_numerical_severity(self.severity)

# Synchronize cvssv3 score using cvssv3 vector
# Synchronize cvssv3 score and severity using cvssv3 vector
# the vector trumps all if we get it
if self.cvssv3:
try:
cvss_object = CVSS3(self.cvssv3)
# use the environmental score, which is the most refined score
self.severity = cvss_object.severities()[2]
if self.severity == "None":
self.severity = "Info"
self.cvssv3_score = cvss_object.scores()[2]
except Exception as ex:
logger.error("Can't compute cvssv3 score for finding id %i. Invalid cvssv3 vector found: '%s'. Exception: %s", self.id, self.cvssv3, ex)
elif self.cvssv3_score:
if self.cvssv3_score < .1:
self.severity = "Info"
elif self.cvssv3_score <= 3.9:
self.severity = "Low"
elif self.cvssv3_score <= 6.9:
self.severity = "Medium"
elif self.cvssv3_score <= 8.9:
self.severity = "High"
else:
self.severity = "Critical"

# Assign the numerical severity for correct sorting order
self.numerical_severity = Finding.get_numerical_severity(self.severity)

# Finding.save is called once from serializers.py with dedupe_option=False because the finding is not ready yet, for example the endpoints are not built
# It is then called a second time with dedupe_option defaulted to true; now we can compute the hash_code and run the deduplication
Expand Down
11 changes: 6 additions & 5 deletions dojo/templatetags/display_tags.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,9 +339,8 @@ def datediff_time(date1, date2):
date_str = ""
diff = dateutil.relativedelta.relativedelta(date2, date1)
attrs = ['years', 'months', 'days']
human_readable = lambda delta: ['%d %s' % (getattr(delta, attr), getattr(delta, attr) > 1 and attr or attr[:-1])
for attr in attrs if getattr(delta, attr)]
human_date = human_readable(diff)
human_date = ['%d %s' % (getattr(diff, attr), getattr(diff, attr) > 1 and attr or attr[:-1])
for attr in attrs if getattr(diff, attr)]
for date_part in human_date:
date_str = date_str + date_part + " "

Expand Down Expand Up @@ -897,7 +896,8 @@ def jira_project_tag(product_or_engagement, autoescape=True):
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
def esc(x):
return x

jira_project = jira_helper.get_jira_project(product_or_engagement)

Expand Down Expand Up @@ -954,7 +954,8 @@ def import_settings_tag(test_import, autoescape=True):
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
def esc(x):
return x

html = """
Expand Down
7 changes: 5 additions & 2 deletions dojo/test/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -829,6 +829,8 @@ def re_import_scan_results(request, tid):

group_by = form.cleaned_data.get('group_by', None)
create_finding_groups_for_all_findings = form.cleaned_data.get('create_finding_groups_for_all_findings')
apply_tags_to_findings = form.cleaned_data.get('apply_tags_to_findings', False)
apply_tags_to_endpoints = form.cleaned_data.get('apply_tags_to_endpoints', False)

active = None
if activeChoice:
Expand Down Expand Up @@ -860,13 +862,14 @@ def re_import_scan_results(request, tid):
try:
test, finding_count, new_finding_count, closed_finding_count, reactivated_finding_count, untouched_finding_count, test_import = \
reimporter.reimport_scan(scan, scan_type, test, active=active, verified=verified,
tags=None, minimum_severity=minimum_severity,
tags=tags, minimum_severity=minimum_severity,
endpoints_to_add=endpoints_to_add, scan_date=scan_date,
version=version, branch_tag=branch_tag, build_id=build_id,
commit_hash=commit_hash, push_to_jira=push_to_jira,
close_old_findings=close_old_findings, group_by=group_by,
api_scan_configuration=api_scan_configuration, service=service, do_not_reactivate=do_not_reactivate,
create_finding_groups_for_all_findings=create_finding_groups_for_all_findings)
create_finding_groups_for_all_findings=create_finding_groups_for_all_findings,
apply_tags_to_findings=apply_tags_to_findings, apply_tags_to_endpoints=apply_tags_to_endpoints)
except Exception as e:
logger.exception(e)
add_error_message_to_response('An exception error occurred during the report import:%s' % str(e))
Expand Down
6 changes: 6 additions & 0 deletions dojo/tools/anchore_engine/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,12 @@ def get_findings(self, filename, test):
cvssv3_base_score = item["vendor_data"][1][
"cvss_v3"
]["base_score"]
# cvssv3 score spec states value should be between 0.0 and 10.0
# anchorage provides a -1.0 in some situations which breaks spec
if (cvssv3_base_score
and ((float(cvssv3_base_score) < 0)
or (float(cvssv3_base_score) > 10))):
cvssv3_base_score = None

references = item["url"]

Expand Down
Loading

0 comments on commit c182e9c

Please sign in to comment.