From 71e858c89c030e1235c8887ace2fcce7608c2f36 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 16 Dec 2024 16:04:37 +0000 Subject: [PATCH 01/11] Update versions in application files --- components/package.json | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/components/package.json b/components/package.json index 590f1cb37e0..febe451775d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.41.2", + "version": "2.42.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index f76daab65f6..ab66f338320 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.41.2" +appVersion: "2.42.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.164 +version: 1.6.165-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From 209fbb4ece4d6f36e1f85fdd56f41768d5abe62d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 18:06:48 -0600 Subject: [PATCH 02/11] Bump nanoid from 3.3.7 to 3.3.8 in /docs (#11421) Bumps [nanoid](https://github.com/ai/nanoid) from 3.3.7 to 3.3.8. - [Release notes](https://github.com/ai/nanoid/releases) - [Changelog](https://github.com/ai/nanoid/blob/main/CHANGELOG.md) - [Commits](https://github.com/ai/nanoid/compare/3.3.7...3.3.8) --- updated-dependencies: - dependency-name: nanoid dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- docs/package-lock.json | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/package-lock.json b/docs/package-lock.json index 187c86624d8..254062bd28d 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -3636,16 +3636,15 @@ "license": "MIT" }, "node_modules/nanoid": { - "version": "3.3.7", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", - "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "version": "3.3.8", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", + "integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" } ], - "license": "MIT", "bin": { "nanoid": "bin/nanoid.cjs" }, From f6be5e876bcc8f6519a1979f6e2e049f68bdc37c Mon Sep 17 00:00:00 2001 From: Paul Osinski <42211303+paulOsinski@users.noreply.github.com> Date: Tue, 17 Dec 2024 09:47:02 -0700 Subject: [PATCH 03/11] [docs] Pro Docs release notes - 2.41.2 (#11420) Co-authored-by: Paul Osinski --- docs/content/en/changelog/changelog.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/content/en/changelog/changelog.md b/docs/content/en/changelog/changelog.md index 3d0c2c92292..e92ec689612 100644 --- a/docs/content/en/changelog/changelog.md +++ b/docs/content/en/changelog/changelog.md @@ -5,7 +5,12 @@ description: "DefectDojo Changelog" Here are the release notes for **DefectDojo Pro (Cloud Version)**. These release notes are focused on UX, so will not include all code changes. -For Open Source release notes, please see the [Releases page on GitHub](https://github.com/DefectDojo/django-DefectDojo/releases), or alternatively consult the Open Source [upgrate notes](../../open_source/upgrading/upgrading_guide). +For Open Source release notes, please see the [Releases page on GitHub](https://github.com/DefectDojo/django-DefectDojo/releases), or alternatively consult the Open Source [upgrade notes](../../open_source/upgrading/upgrading_guide). + + +## Dec 16, 2024: v2.41.2 + +- **(Connectors)** Remove the 'Beta' logo from Connectors ## Dec 9, 2024: v2.41.1 From e80b7d99b6f2dd474f7cdbb43b2adcb41fff71b6 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Tue, 17 Dec 2024 10:51:27 -0600 Subject: [PATCH 04/11] Release Drafter: Update upgrade notes link --- .github/release-drafter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml index 05905306de6..f4eed0e81cd 100644 --- a/.github/release-drafter.yml +++ b/.github/release-drafter.yml @@ -51,7 +51,7 @@ exclude-labels: change-template: '- $TITLE @$AUTHOR (#$NUMBER)' template: | - Please consult the [Upgrade notes in the documentation ](https://documentation.defectdojo.com/getting_started/upgrading/) for specific instructions for this release, and general upgrade instructions. Below is an automatically generated list of all PRs merged since the previous release. + Please consult the [Upgrade notes in the documentation ](https://docs.defectdojo.com/en/open_source/upgrading/upgrading_guide/) for specific instructions for this release, and general upgrade instructions. Below is an automatically generated list of all PRs merged since the previous release. ## Changes since $PREVIOUS_TAG $CHANGES From 2f79dc6f90c3eeac50f71f022604ccf20b1fddcb Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Tue, 17 Dec 2024 22:07:24 -0600 Subject: [PATCH 05/11] Notifications: Convert to classes (#11296) * Struggle bussing * Getting tests sorted out * Some tweaks * Formatting * Update mocks * Correct ruff * Update dojo/notifications/helper.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Update dojo/notifications/helper.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Update dojo/notifications/helper.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Update dojo/notifications/helper.py Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> * Make `no_users` default to False in more than one place * Last ruff fix --------- Co-authored-by: Charles Neill <1749665+cneill@users.noreply.github.com> --- dojo/engagement/views.py | 11 +- dojo/importers/base_importer.py | 37 + dojo/importers/default_importer.py | 15 +- dojo/importers/default_reimporter.py | 3 +- dojo/notifications/helper.py | 1287 ++++++++++++++++---------- dojo/notifications/views.py | 9 +- unittests/test_notifications.py | 145 +-- 7 files changed, 950 insertions(+), 557 deletions(-) diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py index 70ff8a7b160..3b515c7468a 100644 --- a/dojo/engagement/views.py +++ b/dojo/engagement/views.py @@ -28,7 +28,6 @@ from openpyxl.styles import Font import dojo.jira_link.helper as jira_helper -import dojo.notifications.helper as notifications_helper import dojo.risk_acceptance.helper as ra_helper from dojo.authorization.authorization import user_has_permission_or_403 from dojo.authorization.authorization_decorators import user_is_authorized @@ -653,7 +652,15 @@ def add_tests(request, eid): "Test added successfully.", extra_tags="alert-success") - notifications_helper.notify_test_created(new_test) + create_notification( + event="test_added", + title=f"Test created for {new_test.engagement.product}: {new_test.engagement.name}: {new_test}", + test=new_test, + engagement=new_test.engagement, + product=new_test.engagement.product, + url=reverse("view_test", args=(new_test.id,)), + url_api=reverse("test-detail", args=(new_test.id,)), + ) if "_Add Another Test" in request.POST: return HttpResponseRedirect( diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py index c9a77fbb95b..cab58fd718b 100644 --- a/dojo/importers/base_importer.py +++ b/dojo/importers/base_importer.py @@ -5,6 +5,7 @@ from django.core.exceptions import ValidationError from django.core.files.base import ContentFile from django.core.files.uploadedfile import TemporaryUploadedFile +from django.urls import reverse from django.utils.timezone import make_aware import dojo.finding.helper as finding_helper @@ -28,6 +29,7 @@ Test_Type, Vulnerability_Id, ) +from dojo.notifications.helper import create_notification from dojo.tools.factory import get_parser from dojo.utils import max_safe @@ -719,3 +721,38 @@ def mitigate_finding( finding.save(dedupe_option=False) else: finding.save(dedupe_option=False, push_to_jira=self.push_to_jira) + + def notify_scan_added( + self, + test, + updated_count, + new_findings=[], + findings_mitigated=[], + findings_reactivated=[], + findings_untouched=[], + ): + logger.debug("Scan added notifications") + + new_findings = sorted(new_findings, key=lambda x: x.numerical_severity) + findings_mitigated = sorted(findings_mitigated, key=lambda x: x.numerical_severity) + findings_reactivated = sorted(findings_reactivated, key=lambda x: x.numerical_severity) + findings_untouched = sorted(findings_untouched, key=lambda x: x.numerical_severity) + + title = ( + f"Created/Updated {updated_count} findings for {test.engagement.product}: {test.engagement.name}: {test}" + ) + + create_notification( + event="scan_added_empty" if updated_count == 0 else "scan_added", + title=title, + findings_new=new_findings, + findings_mitigated=findings_mitigated, + findings_reactivated=findings_reactivated, + finding_count=updated_count, + test=test, + engagement=test.engagement, + product=test.engagement.product, + findings_untouched=findings_untouched, + url=reverse("view_test", args=(test.id,)), + url_api=reverse("test-detail", args=(test.id,)), + ) diff --git a/dojo/importers/default_importer.py b/dojo/importers/default_importer.py index 95254ef59b8..3ac31143792 100644 --- a/dojo/importers/default_importer.py +++ b/dojo/importers/default_importer.py @@ -3,10 +3,10 @@ from django.core.files.uploadedfile import TemporaryUploadedFile from django.core.serializers import deserialize, serialize from django.db.models.query_utils import Q +from django.urls import reverse import dojo.finding.helper as finding_helper import dojo.jira_link.helper as jira_helper -import dojo.notifications.helper as notifications_helper from dojo.importers.base_importer import BaseImporter, Parser from dojo.importers.options import ImporterOptions from dojo.models import ( @@ -15,6 +15,7 @@ Test, Test_Import, ) +from dojo.notifications.helper import create_notification logger = logging.getLogger(__name__) deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication") @@ -126,9 +127,17 @@ def process_scan( ) # Send out some notifications to the user logger.debug("IMPORT_SCAN: Generating notifications") - notifications_helper.notify_test_created(self.test) + create_notification( + event="test_added", + title=f"Test created for {self.test.engagement.product}: {self.test.engagement.name}: {self.test}", + test=self.test, + engagement=self.test.engagement, + product=self.test.engagement.product, + url=reverse("view_test", args=(self.test.id,)), + url_api=reverse("test-detail", args=(self.test.id,)), + ) updated_count = len(new_findings) + len(closed_findings) - notifications_helper.notify_scan_added( + self.notify_scan_added( self.test, updated_count, new_findings=new_findings, diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py index 9debf4aabaa..0c4159ed669 100644 --- a/dojo/importers/default_reimporter.py +++ b/dojo/importers/default_reimporter.py @@ -6,7 +6,6 @@ import dojo.finding.helper as finding_helper import dojo.jira_link.helper as jira_helper -import dojo.notifications.helper as notifications_helper from dojo.importers.base_importer import BaseImporter, Parser from dojo.importers.options import ImporterOptions from dojo.models import ( @@ -128,7 +127,7 @@ def process_scan( updated_count = ( len(closed_findings) + len(reactivated_findings) + len(new_findings) ) - notifications_helper.notify_scan_added( + self.notify_scan_added( self.test, updated_count, new_findings=new_findings, diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py index 55281901192..3e0a0295de2 100644 --- a/dojo/notifications/helper.py +++ b/dojo/notifications/helper.py @@ -1,5 +1,7 @@ +import importlib import json import logging +from contextlib import suppress from datetime import timedelta import requests @@ -7,7 +9,7 @@ from django.conf import settings from django.core.exceptions import FieldDoesNotExist from django.core.mail import EmailMessage -from django.db.models import Count, Prefetch, Q +from django.db.models import Count, Prefetch, Q, QuerySet from django.template import TemplateDoesNotExist from django.template.loader import render_to_string from django.urls import reverse @@ -20,213 +22,282 @@ from dojo.models import ( Alerts, Dojo_User, + Engagement, + Finding, Notification_Webhooks, Notifications, + Product, + Product_Type, System_Settings, + Test, UserContactInfo, get_current_datetime, ) -from dojo.user.queries import get_authorized_users_for_product_and_product_type, get_authorized_users_for_product_type +from dojo.user.queries import ( + get_authorized_users_for_product_and_product_type, + get_authorized_users_for_product_type, +) logger = logging.getLogger(__name__) -def create_notification(event=None, **kwargs): - system_settings = System_Settings.objects.get() - kwargs["system_settings"] = system_settings - # System notifications - try: - system_notifications = Notifications.objects.get(user=None, template=False) - except Exception: - system_notifications = Notifications() - - if "recipients" in kwargs: - # mimic existing code so that when recipients is specified, no other system or personal notifications are sent. - logger.debug("creating notifications for recipients: %s", kwargs["recipients"]) - for recipient_notifications in Notifications.objects.filter(user__username__in=kwargs["recipients"], user__is_active=True, product=None): - if event in settings.NOTIFICATIONS_SYSTEM_LEVEL_TRUMP: - # merge the system level notifications with the personal level - # this allows for system to trump the personal - merged_notifications = Notifications.merge_notifications_list([system_notifications, recipient_notifications]) - merged_notifications.user = recipient_notifications.user - logger.debug("Sent notification to %s", merged_notifications.user) - process_notifications(event, merged_notifications, **kwargs) - else: - # Do not trump user preferences and send notifications as usual - logger.debug("Sent notification to %s", recipient_notifications.user) - process_notifications(event, recipient_notifications, **kwargs) - - else: - logger.debug("creating system notifications for event: %s", event) - # send system notifications to all admin users - - # parse kwargs before converting them to dicts - product_type = None - if "product_type" in kwargs: - product_type = kwargs.get("product_type") - logger.debug("Defined product type %s", product_type) +def create_notification( + event: str | None = None, + title: str | None = None, + finding: Finding | None = None, + test: Test | None = None, + engagement: Engagement | None = None, + product: Product | None = None, + requested_by: Dojo_User | None = None, + reviewers: list[Dojo_User] | list[str] | None = None, + recipients: list[Dojo_User] | list[str] | None = None, + no_users: bool = False, # noqa: FBT001 + url: str | None = None, + url_api: str | None = None, + **kwargs: dict, +) -> None: + """Create an instance of a NotificationManager and dispatch the notification.""" + default_manager = NotificationManager + notification_manager_class = default_manager + if isinstance( + ( + notification_manager := getattr( + settings, + "NOTIFICATION_MANAGER", + default_manager, + ) + ), + str, + ): + with suppress(ModuleNotFoundError): + module_name, _separator, class_name = notification_manager.rpartition(".") + module = importlib.import_module(module_name) + notification_manager_class = getattr(module, class_name) + notification_manager_class().create_notification( + event=event, + title=title, + finding=finding, + test=test, + engagement=engagement, + product=product, + requested_by=requested_by, + reviewers=reviewers, + recipients=recipients, + no_users=no_users, + url=url, + url_api=url_api, + **kwargs, + ) - product = None - if "product" in kwargs: - product = kwargs.get("product") - logger.debug("Defined product %s", product) - elif "engagement" in kwargs: - product = kwargs["engagement"].product - logger.debug("Defined product of engagement %s", product) +class NotificationManagerHelpers: - elif "test" in kwargs: - product = kwargs["test"].engagement.product - logger.debug("Defined product of test %s", product) + """Common functions for use in the Mangers.""" - elif "finding" in kwargs: - product = kwargs["finding"].test.engagement.product - logger.debug("Defined product of finding %s", product) + def __init__( + self, + *_args: list, + system_notifications: Notifications | None = None, + system_settings: System_Settings | None = None, + **_kwargs: dict, + ) -> None: + self.system_notifications = system_notifications or self._get_notifications_object() + self.system_settings = system_settings or self._get_system_settings() - elif "obj" in kwargs: - from dojo.utils import get_product - product = get_product(kwargs["obj"]) - logger.debug("Defined product of obj %s", product) - - # System notifications are sent one with user=None, which will trigger email to configured system email, to global slack channel, etc. - process_notifications(event, system_notifications, **kwargs) - - # All admins will also receive system notifications, but as part of the person global notifications section below - # This time user is set, so will trigger email to personal email, to personal slack channel (mention), etc. - # only retrieve users which have at least one notification type enabled for this event type. - logger.debug("creating personal notifications for event: %s", event) - - # There are notification like deleting a product type that shall not be sent to users. - # These notifications will have the parameter no_users=True - if not ("no_users" in kwargs and kwargs["no_users"] is True): - # get users with either global notifications, or a product specific noditiciation - # and all admin/superuser, they will always be notified - users = Dojo_User.objects.filter(is_active=True).prefetch_related(Prefetch( - "notifications_set", - queryset=Notifications.objects.filter(Q(product_id=product) | Q(product__isnull=True)), - to_attr="applicable_notifications", - )).annotate(applicable_notifications_count=Count("notifications__id", filter=Q(notifications__product_id=product) | Q(notifications__product__isnull=True)))\ - .filter(Q(applicable_notifications_count__gt=0) | Q(is_superuser=True)) - - # only send to authorized users or admin/superusers - logger.debug("Filtering users for the product %s", product) - - if product: - users = get_authorized_users_for_product_and_product_type(users, product, Permissions.Product_View) - - elif product_type: - users = get_authorized_users_for_product_type(users, product_type, Permissions.Product_Type_View) + def _get_notifications_object(self) -> Notifications: + """Set the system Notifications object on the class.""" + try: + return Notifications.objects.get(user=None, template=False) + except Exception: + return Notifications() + + def _get_system_settings(self) -> System_Settings: + """Set the system settings object in the class.""" + return System_Settings.objects.get() + + def _create_description(self, event: str, kwargs: dict) -> str: + if kwargs.get("description") is None: + if event == "product_added": + kwargs["description"] = _("Product %s has been created successfully.") % kwargs["title"] + elif event == "product_type_added": + kwargs["description"] = _("Product Type %s has been created successfully.") % kwargs["title"] else: - # nor product_type nor product defined, we should not make noise and send only notifications to admins - logger.debug("Product is not specified, making it silent") - users = users.filter(is_superuser=True) - - for user in users: - logger.debug("Authorized user for the product %s", user) - # send notifications to user after merging possible multiple notifications records (i.e. personal global + personal product) - # kwargs.update({'user': user}) - applicable_notifications = user.applicable_notifications - if user.is_superuser: - logger.debug("User %s is superuser", user) - # admin users get all system notifications - applicable_notifications.append(system_notifications) - - notifications_set = Notifications.merge_notifications_list(applicable_notifications) - notifications_set.user = user - process_notifications(event, notifications_set, **kwargs) - - -def create_description(event, *args, **kwargs): - if "description" not in kwargs: - if event == "product_added": - kwargs["description"] = _("Product %s has been created successfully.") % kwargs["title"] - elif event == "product_type_added": - kwargs["description"] = _("Product Type %s has been created successfully.") % kwargs["title"] - else: - kwargs["description"] = _("Event %s has occurred.") % str(event) - - return kwargs["description"] + kwargs["description"] = _("Event %s has occurred.") % str(event) + return kwargs["description"] -def create_notification_message(event, user, notification_type, *args, **kwargs): - template = f"notifications/{notification_type}/{event.replace('/', '')}.tpl" - kwargs.update({"user": user}) + def _create_notification_message( + self, + event: str, + user: Dojo_User, + notification_type: str, + kwargs: dict, + ) -> str: + template = f"notifications/{notification_type}/{event.replace('/', '')}.tpl" + kwargs.update({"user": user}) + notification_message = None - notification_message = None + # TODO: This may be deleted + # if (title := kwargs.get("title")) is not None: + # kwargs.update({"title": title}) - if (title := kwargs.get("title")) is not None: - kwargs.update({"title": title}) - - if kwargs.get("description") is None: - kwargs.update({"description": create_description(event, *args, **kwargs)}) - - try: - notification_message = render_to_string(template, kwargs) - logger.debug("Rendering from the template %s", template) - except TemplateDoesNotExist as e: - # In some cases, template includes another templates, if the interior one is missing, we will see it in "specifically" section - logger.debug(f"template not found or not implemented yet: {template} (specifically: {e.args})") - except Exception as e: - logger.error("error during rendering of template %s exception is %s", template, e) - finally: - if not notification_message: - kwargs["description"] = create_description(event, *args, **kwargs) - notification_message = render_to_string(f"notifications/{notification_type}/other.tpl", kwargs) - - return notification_message or "" - - -def process_notifications(event, notifications=None, **kwargs): - from dojo.utils import get_system_setting - - if not notifications: - logger.warning("no notifications!") - return - - logger.debug("sending notification " + ("asynchronously" if we_want_async() else "synchronously")) - logger.debug("process notifications for %s", notifications.user) - logger.debug("notifications: %s", vars(notifications)) - - slack_enabled = get_system_setting("enable_slack_notifications") - msteams_enabled = get_system_setting("enable_msteams_notifications") - mail_enabled = get_system_setting("enable_mail_notifications") - webhooks_enabled = get_system_setting("enable_webhooks_notifications") - - if slack_enabled and "slack" in getattr(notifications, event, getattr(notifications, "other")): - logger.debug("Sending Slack Notification") - send_slack_notification(event, notifications.user, **kwargs) - - if msteams_enabled and "msteams" in getattr(notifications, event, getattr(notifications, "other")): - logger.debug("Sending MSTeams Notification") - send_msteams_notification(event, notifications.user, **kwargs) - - if mail_enabled and "mail" in getattr(notifications, event, getattr(notifications, "other")): - logger.debug("Sending Mail Notification") - send_mail_notification(event, notifications.user, **kwargs) - - if webhooks_enabled and "webhooks" in getattr(notifications, event, getattr(notifications, "other")): - logger.debug("Sending Webhooks Notification") - send_webhooks_notification(event, notifications.user, **kwargs) - - if "alert" in getattr(notifications, event, getattr(notifications, "other")): - logger.debug(f"Sending Alert to {notifications.user}") - send_alert_notification(event, notifications.user, **kwargs) + if kwargs.get("description") is None: + kwargs.update({"description": self._create_description(event, kwargs)}) + try: + notification_message = render_to_string(template, kwargs) + logger.debug("Rendering from the template %s", template) + except TemplateDoesNotExist as e: + # In some cases, template includes another templates, if the interior one is missing, we will see it in "specifically" section + logger.debug( + f"template not found or not implemented yet: {template} (specifically: {e.args})", + ) + except Exception as e: + logger.error( + "error during rendering of template %s exception is %s", + template, + e, + ) + finally: + if not notification_message: + kwargs["description"] = self._create_description(event, kwargs) + notification_message = render_to_string( + f"notifications/{notification_type}/other.tpl", + kwargs, + ) -@dojo_async_task -@app.task -def send_slack_notification(event, user=None, *args, **kwargs): - from dojo.utils import get_system_setting + return notification_message or "" + + def _log_alert( + self, + exception: Exception, + notification_type: str | None = None, + **kwargs: dict, + ) -> None: + # no try catch here, if this fails we need to show an error + for user in Dojo_User.objects.filter(is_superuser=True): + alert = Alerts( + user_id=user, + url=kwargs.get("url", reverse("alerts")), + title=kwargs.get("title", "Notification issue")[:250], + description=kwargs.get("description", str(exception))[:2000], + icon="exclamation-triangle", + source=notification_type[:100] if notification_type else kwargs.get("source", "unknown")[:100], + ) + # relative urls will fail validation + alert.clean_fields(exclude=["url"]) + alert.save() + + +class SlackNotificationManger(NotificationManagerHelpers): + + """Manger for slack notifications and their helpers.""" + + @dojo_async_task + @app.task + def send_slack_notification( + self, + event: str, + user: Dojo_User | None = None, + **kwargs: dict, + ): + try: + # If the user has slack information on profile and chooses to receive slack notifications + # Will receive a DM + if user is not None: + logger.debug("personal notification to slack for user %s", user) + if hasattr(user, "usercontactinfo") and user.usercontactinfo.slack_username is not None: + slack_user_id = user.usercontactinfo.slack_user_id + if not slack_user_id: + # Lookup the slack userid the first time, then save it. + slack_user_id = self._get_slack_user_id( + user.usercontactinfo.slack_username, + ) + if slack_user_id: + slack_user_save = UserContactInfo.objects.get( + user_id=user.id, + ) + slack_user_save.slack_user_id = slack_user_id + slack_user_save.save() + # only send notification if we managed to find the slack_user_id + if slack_user_id: + channel = f"@{slack_user_id}" + self._post_slack_message(event, user, channel, **kwargs) + else: + logger.info( + "The user %s does not have a email address informed for Slack in profile.", + user, + ) + else: + # System scope slack notifications, and not personal would still see this go through + if self.system_settings.slack_channel is not None: + channel = self.system_settings.slack_channel + logger.info( + f"Sending system notification to system channel {channel}.", + ) + self._post_slack_message(event, user, channel, **kwargs) + else: + logger.debug( + "slack_channel not configured: skipping system notification", + ) + + except Exception as exception: + logger.exception(exception) + self._log_alert( + exception, + "Slack Notification", + title=kwargs["title"], + description=str(exception), + url=kwargs.get("url"), + ) + + def _get_slack_user_id(self, user_email: str) -> str: + user_id = None + res = requests.request( + method="POST", + url="https://slack.com/api/users.lookupByEmail", + data={"token": self.system_settings.slack_token, "email": user_email}, + timeout=settings.REQUESTS_TIMEOUT, + ) - def _post_slack_message(channel): + user = json.loads(res.text) + slack_user_is_found = False + if user: + if "error" in user: + logger.error("Slack is complaining. See error message below.") + logger.error(user) + raise RuntimeError("Error getting user list from Slack: " + res.text) + if "email" in user["user"]["profile"]: + if user_email == user["user"]["profile"]["email"]: + if "id" in user["user"]: + user_id = user["user"]["id"] + logger.debug(f"Slack user ID is {user_id}") + slack_user_is_found = True + else: + logger.warning( + f"A user with email {user_email} could not be found in this Slack workspace.", + ) + + if not slack_user_is_found: + logger.warning("The Slack user was not found.") + + return user_id + + def _post_slack_message( + self, + event: str, + user: Dojo_User, + channel: str, + **kwargs: dict, + ) -> None: res = requests.request( method="POST", url="https://slack.com/api/chat.postMessage", data={ - "token": get_system_setting("slack_token"), + "token": self.system_settings.slack_token, "channel": channel, - "username": get_system_setting("slack_username"), - "text": create_notification_message(event, user, "slack", *args, **kwargs), + "username": self.system_settings.slack_username, + "text": self._create_notification_message(event, user, "slack", kwargs), }, timeout=settings.REQUESTS_TIMEOUT, ) @@ -236,155 +307,560 @@ def _post_slack_message(channel): logger.error(res.text) raise RuntimeError("Error posting message to Slack: " + res.text) - try: - # If the user has slack information on profile and chooses to receive slack notifications - # Will receive a DM - if user is not None: - logger.debug("personal notification to slack for user %s", user) - if hasattr(user, "usercontactinfo") and user.usercontactinfo.slack_username is not None: - slack_user_id = user.usercontactinfo.slack_user_id - if not slack_user_id: - # Lookup the slack userid the first time, then save it. - slack_user_id = get_slack_user_id( - user.usercontactinfo.slack_username) - if slack_user_id: - slack_user_save = UserContactInfo.objects.get(user_id=user.id) - slack_user_save.slack_user_id = slack_user_id - slack_user_save.save() - - # only send notification if we managed to find the slack_user_id - if slack_user_id: - channel = f"@{slack_user_id}" - _post_slack_message(channel) - else: - logger.info("The user %s does not have a email address informed for Slack in profile.", user) +class MSTeamsNotificationManger(NotificationManagerHelpers): + + """Manger for Microsoft Teams notifications and their helpers.""" + + @dojo_async_task + @app.task + def send_msteams_notification( + self, + event: str, + user: Dojo_User | None = None, + **kwargs: dict, + ): + try: + # Microsoft Teams doesn't offer direct message functionality, so no MS Teams PM functionality here... + if user is None: + if self.system_settings.msteams_url is not None: + logger.debug("sending MSTeams message") + res = requests.request( + method="POST", + url=self.system_settings.msteams_url, + data=self._create_notification_message( + event, + None, + "msteams", + kwargs, + ), + timeout=settings.REQUESTS_TIMEOUT, + ) + if res.status_code != 200: + logger.error("Error when sending message to Microsoft Teams") + logger.error(res.status_code) + logger.error(res.text) + raise RuntimeError( + "Error posting message to Microsoft Teams: " + res.text, + ) + else: + logger.info( + "Webhook URL for Microsoft Teams not configured: skipping system notification", + ) + except Exception as exception: + logger.exception(exception) + self._log_alert( + exception, + "Microsoft Teams Notification", + title=kwargs["title"], + description=str(exception), + url=kwargs["url"], + ) + + +class EmailNotificationManger(NotificationManagerHelpers): + + """Manger for email notifications and their helpers.""" + + @dojo_async_task + @app.task + def send_mail_notification( + self, + event: str, + user: Dojo_User | None = None, + **kwargs: dict, + ): + # Attempt to get the "to" address + if (recipient := kwargs.get("recipient")) is not None: + address = recipient + elif user: + address = user.email else: - # System scope slack notifications, and not personal would still see this go through - if get_system_setting("slack_channel") is not None: - channel = get_system_setting("slack_channel") - logger.info(f"Sending system notification to system channel {channel}.") - _post_slack_message(channel) + address = self.system_settings.mail_notifications_to + + logger.debug("notification email for user %s to %s", user, address) + + try: + subject = f"{self.system_settings.team_name} notification" + if (title := kwargs.get("title")) is not None: + subject += f": {title}" + + email = EmailMessage( + subject, + self._create_notification_message(event, user, "mail", kwargs), + self.system_settings.email_from, + [address], + headers={"From": f"{self.system_settings.email_from}"}, + ) + email.content_subtype = "html" + logger.debug("sending email alert") + email.send(fail_silently=False) + + except Exception as exception: + logger.exception(exception) + self._log_alert( + exception, + "Email Notification", + title=kwargs["title"], + description=str(exception), + url=kwargs["url"], + ) + + +class WebhookNotificationManger(NotificationManagerHelpers): + + """Manger for webhook notifications and their helpers.""" + + ERROR_PERMANENT = "permanent" + ERROR_TEMPORARY = "temporary" + + @dojo_async_task + @app.task + def send_webhooks_notification( + self, + event: str, + user: Dojo_User | None = None, + **kwargs: dict, + ): + for endpoint in self._get_webhook_endpoints(user=user): + error = None + if endpoint.status not in [ + Notification_Webhooks.Status.STATUS_ACTIVE, + Notification_Webhooks.Status.STATUS_ACTIVE_TMP, + ]: + logger.info( + f"URL for Webhook '{endpoint.name}' is not active: {endpoint.get_status_display()} ({endpoint.status})", + ) + continue + + try: + logger.debug(f"Sending webhook message to endpoint '{endpoint.name}'") + res = self._webhooks_notification_request(endpoint, event, **kwargs) + if 200 <= res.status_code < 300: + logger.debug( + f"Message sent to endpoint '{endpoint.name}' successfully.", + ) + continue + # HTTP request passed successfully but we still need to check status code + if 500 <= res.status_code < 600 or res.status_code == 429: + error = self.ERROR_TEMPORARY + else: + error = self.ERROR_PERMANENT + + endpoint.note = f"Response status code: {res.status_code}" + logger.error( + f"Error when sending message to Webhooks '{endpoint.name}' (status: {res.status_code}): {res.text}", + ) + except requests.exceptions.Timeout as e: + error = self.ERROR_TEMPORARY + endpoint.note = f"Requests exception: {e}" + logger.error( + f"Timeout when sending message to Webhook '{endpoint.name}'", + ) + except Exception as exception: + error = self.ERROR_PERMANENT + endpoint.note = f"Exception: {exception}"[:1000] + logger.exception(exception) + self._log_alert(exception, "Webhooks Notification") + + now = get_current_datetime() + if error == self.ERROR_TEMPORARY: + # If endpoint is unstable for more then one day, it needs to be deactivated + if endpoint.first_error is not None and (now - endpoint.first_error).total_seconds() > 60 * 60 * 24: + endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT + else: + # We need to monitor when outage started + if endpoint.status == Notification_Webhooks.Status.STATUS_ACTIVE: + endpoint.first_error = now + endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_TMP + # In case of failure within one day, endpoint can be deactivated temporally only for one minute + self._webhook_reactivation.apply_async( + args=[self], + kwargs={"endpoint_id": endpoint.pk}, + countdown=60, + ) + # There is no reason to keep endpoint active if it is returning 4xx errors else: - logger.debug("slack_channel not configured: skipping system notification") - - except Exception as e: - logger.exception(e) - log_alert(e, "Slack Notification", title=kwargs["title"], description=str(e), url=kwargs.get("url")) - - -@dojo_async_task -@app.task -def send_msteams_notification(event, user=None, *args, **kwargs): - from dojo.utils import get_system_setting - - try: - # Microsoft Teams doesn't offer direct message functionality, so no MS Teams PM functionality here... - if user is None: - if get_system_setting("msteams_url") is not None: - logger.debug("sending MSTeams message") - res = requests.request( - method="POST", - url=get_system_setting("msteams_url"), - data=create_notification_message(event, None, "msteams", *args, **kwargs), - timeout=settings.REQUESTS_TIMEOUT, + endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT + endpoint.first_error = now + + endpoint.last_error = now + endpoint.save() + + def _get_webhook_endpoints( + self, + user: Dojo_User | None = None, + ) -> QuerySet[Notification_Webhooks]: + endpoints = Notification_Webhooks.objects.filter(owner=user) + if not endpoints.exists(): + if user: + logger.info( + f"URLs for Webhooks not configured for user '{user}': skipping user notification", ) - if res.status_code != 200: - logger.error("Error when sending message to Microsoft Teams") - logger.error(res.status_code) - logger.error(res.text) - raise RuntimeError("Error posting message to Microsoft Teams: " + res.text) else: - logger.info("Webhook URL for Microsoft Teams not configured: skipping system notification") - except Exception as e: - logger.exception(e) - log_alert(e, "Microsoft Teams Notification", title=kwargs["title"], description=str(e), url=kwargs["url"]) - - -@dojo_async_task -@app.task -def send_mail_notification(event, user=None, *args, **kwargs): - from dojo.utils import get_system_setting - email_from_address = get_system_setting("email_from") - # Attempt to get the "to" address - if "recipient" in kwargs: - address = kwargs.get("recipient") - elif user: - address = user.email - else: - address = get_system_setting("mail_notifications_to") - - logger.debug("notification email for user %s to %s", user, address) - - try: - subject = f"{get_system_setting('team_name')} notification" - if "title" in kwargs: - subject += f": {kwargs['title']}" - - email = EmailMessage( - subject, - create_notification_message(event, user, "mail", *args, **kwargs), - email_from_address, - [address], - headers={"From": f"{email_from_address}"}, + logger.info( + "URLs for Webhooks not configured: skipping system notification", + ) + return Notification_Webhooks.objects.none() + return endpoints + + def _generate_request_details( + self, + endpoint: Notification_Webhooks, + event: str | None = None, + **kwargs: dict, + ) -> tuple[dict, dict]: + headers = { + "User-Agent": f"DefectDojo-{dd_version}", + "X-DefectDojo-Event": event, + "X-DefectDojo-Instance": settings.SITE_URL, + "Accept": "application/json", + } + if endpoint.header_name is not None: + headers[endpoint.header_name] = endpoint.header_value + yaml_data = self._create_notification_message( + event, + endpoint.owner, + "webhooks", + kwargs, + ) + data = yaml.safe_load(yaml_data) + + return headers, data + + def _webhooks_notification_request( + self, + endpoint: Notification_Webhooks, + event: str | None = None, + **kwargs: dict, + ) -> requests.Response: + headers, data = self._generate_request_details(endpoint, event=event, **kwargs) + return requests.request( + method="POST", + url=endpoint.url, + headers=headers, + json=data, + timeout=self.system_settings.webhooks_notifications_timeout, ) - email.content_subtype = "html" - logger.debug("sending email alert") - # logger.info(create_notification_message(event, user, 'mail', *args, **kwargs)) - email.send(fail_silently=False) - - except Exception as e: - logger.exception(e) - log_alert(e, "Email Notification", title=kwargs["title"], description=str(e), url=kwargs["url"]) - - -def webhooks_notification_request(endpoint, event, *args, **kwargs): - from dojo.utils import get_system_setting - - headers = { - "User-Agent": f"DefectDojo-{dd_version}", - "X-DefectDojo-Event": event, - "X-DefectDojo-Instance": settings.SITE_URL, - "Accept": "application/json", - } - if endpoint.header_name is not None: - headers[endpoint.header_name] = endpoint.header_value - yaml_data = create_notification_message(event, endpoint.owner, "webhooks", *args, **kwargs) - data = yaml.safe_load(yaml_data) - - timeout = get_system_setting("webhooks_notifications_timeout") - - return requests.request( - method="POST", - url=endpoint.url, - headers=headers, - json=data, - timeout=timeout, - ) + def _test_webhooks_notification(self, endpoint: Notification_Webhooks) -> None: + res = self._webhooks_notification_request( + endpoint, + "ping", + description="Test webhook notification", + ) + res.raise_for_status() + # in "send_webhooks_notification", we are doing deeper analysis, why it failed + # for now, "raise_for_status" should be enough + + @app.task(ignore_result=True) + def _webhook_reactivation(self, endpoint_id: int, **_kwargs: dict): + endpoint = Notification_Webhooks.objects.get(pk=endpoint_id) + # User already changed status of endpoint + if endpoint.status != Notification_Webhooks.Status.STATUS_INACTIVE_TMP: + return + endpoint.status = Notification_Webhooks.Status.STATUS_ACTIVE_TMP + endpoint.save() + logger.debug( + f"Webhook endpoint '{endpoint.name}' reactivated to '{Notification_Webhooks.Status.STATUS_ACTIVE_TMP}'", + ) -def test_webhooks_notification(endpoint): - res = webhooks_notification_request(endpoint, "ping", description="Test webhook notification") - res.raise_for_status() - # in "send_webhooks_notification", we are doing deeper analysis, why it failed - # for now, "raise_for_status" should be enough +class AlertNotificationManger(NotificationManagerHelpers): -@app.task(ignore_result=True) -def webhook_reactivation(endpoint_id: int, *args, **kwargs): - endpoint = Notification_Webhooks.objects.get(pk=endpoint_id) + """Manger for alert notifications and their helpers.""" - # User already changed status of endpoint - if endpoint.status != Notification_Webhooks.Status.STATUS_INACTIVE_TMP: - return + def send_alert_notification( + self, + event: str, + user: Dojo_User | None = None, + **kwargs: dict, + ): + logger.debug("sending alert notification to %s", user) + try: + # no need to differentiate between user/no user + icon = kwargs.get("icon", "info-circle") + try: + source = Notifications._meta.get_field(event).verbose_name.title()[:100] + except FieldDoesNotExist: + source = event.replace("_", " ").title()[:100] + alert = Alerts( + user_id=user, + title=kwargs.get("title")[:250], + description=self._create_notification_message( + event, + user, + "alert", + kwargs, + )[:2000], + url=kwargs.get("url", reverse("alerts")), + icon=icon[:25], + source=source, + ) + # relative urls will fail validation + alert.clean_fields(exclude=["url"]) + alert.save() + except Exception as exception: + logger.exception(exception) + self._log_alert( + exception, + "Alert Notification", + title=kwargs["title"], + description=str(exception), + url=kwargs["url"], + ) + + +class NotificationManager(NotificationManagerHelpers): + + """Manage the construction and dispatch of notifications.""" + + def __init__(self, *args: list, **kwargs: dict) -> None: + NotificationManagerHelpers.__init__(self, *args, **kwargs) + + def create_notification(self, event: str | None = None, **kwargs: dict) -> None: + # Process the notifications for a given list of recipients + if kwargs.get("recipients") is not None: + self._process_recipients(event=event, **kwargs) + else: + logger.debug("creating system notifications for event: %s", event) + # send system notifications to all admin users + self._process_objects(**kwargs) + # System notifications are sent one with user=None, which will trigger email to configured system email, to global slack channel, etc. + self._process_notifications( + event, + notifications=self.system_notifications, + **kwargs, + ) + # All admins will also receive system notifications, but as part of the person global notifications section below + # This time user is set, so will trigger email to personal email, to personal slack channel (mention), etc. + # only retrieve users which have at least one notification type enabled for this event type. + logger.debug("creating personal notifications for event: %s", event) + # There are notification like deleting a product type that shall not be sent to users. + # These notifications will have the parameter no_users=True + if kwargs.get("no_users", False) is False: + # get users with either global notifications, or a product specific notification + # and all admin/superuser, they will always be notified + for user in self._get_user_to_send_notifications_to(): + self._send_single_notification_to_user(user, event=event, **kwargs) + + def _process_recipients(self, event: str | None = None, **kwargs: dict) -> None: + # mimic existing code so that when recipients is specified, no other system or personal notifications are sent. + logger.debug("creating notifications for recipients: %s", kwargs["recipients"]) + for recipient_notifications in Notifications.objects.filter( + user__username__in=kwargs["recipients"], + user__is_active=True, + product=None, + ): + if event in settings.NOTIFICATIONS_SYSTEM_LEVEL_TRUMP: + # merge the system level notifications with the personal level + # this allows for system to trump the personal + merged_notifications = Notifications.merge_notifications_list( + [self.system_notifications, recipient_notifications], + ) + merged_notifications.user = recipient_notifications.user + logger.debug("Sent notification to %s", merged_notifications.user) + self._process_notifications( + event, + notifications=merged_notifications, + **kwargs, + ) + else: + # Do not trump user preferences and send notifications as usual + logger.debug("Sent notification to %s", recipient_notifications.user) + self._process_notifications( + event, + notifications=recipient_notifications, + **kwargs, + ) - endpoint.status = Notification_Webhooks.Status.STATUS_ACTIVE_TMP - endpoint.save() - logger.debug(f"Webhook endpoint '{endpoint.name}' reactivated to '{Notification_Webhooks.Status.STATUS_ACTIVE_TMP}'") + def _process_objects(self, **kwargs: dict) -> None: + """Extract the product and product type from the kwargs.""" + self.product_type: Product_Type = None + self.product: Product = None + if (product_type := kwargs.get("product_type")) is not None: + self.product_type = product_type + logger.debug("Defined product type %s", self.product_type) + if (product := kwargs.get("product")) is not None: + self.product = product + logger.debug("Defined product %s", self.product) + elif (engagement := kwargs.get("engagement")) is not None: + self.product = engagement.product + logger.debug("Defined product of engagement %s", self.product) + elif (test := kwargs.get("test")) is not None: + self.product = test.engagement.product + logger.debug("Defined product of test %s", self.product) + elif (finding := kwargs.get("finding")) is not None: + self.product = finding.test.engagement.product + logger.debug("Defined product of finding %s", self.product) + elif (obj := kwargs.get("obj")) is not None: + from dojo.utils import get_product + + self.product = get_product(obj) + logger.debug("Defined product of obj %s", self.product) + + def _get_user_to_send_notifications_to( + self, + ) -> QuerySet[Dojo_User]: + """Determine the users we should send notifications to based on product and product type permissions.""" + users = ( + Dojo_User.objects.filter(is_active=True) + .prefetch_related( + Prefetch( + "notifications_set", + queryset=Notifications.objects.filter( + Q(product_id=self.product) | Q(product__isnull=True), + ), + to_attr="applicable_notifications", + ), + ) + .annotate( + applicable_notifications_count=Count( + "notifications__id", + filter=Q(notifications__product_id=self.product) | Q(notifications__product__isnull=True), + ), + ) + .filter(Q(applicable_notifications_count__gt=0) | Q(is_superuser=True)) + ) + # only send to authorized users or admin/superusers + logger.debug("Filtering users for the product %s", self.product) + if self.product is not None: + users = get_authorized_users_for_product_and_product_type( + users, + self.product, + Permissions.Product_View, + ) + elif self.product_type is not None: + users = get_authorized_users_for_product_type( + users, + self.product_type, + Permissions.Product_Type_View, + ) + else: + # nor product_type nor product defined, we should not make noise and send only notifications to admins + logger.debug("Product is not specified, making it silent") + users = users.filter(is_superuser=True) + return users + + def _send_single_notification_to_user( + self, + user: Dojo_User, + event: str | None = None, + **kwargs: dict, + ) -> None: + """Send a notification to a single user.""" + logger.debug("Authorized user for the product %s", user) + # send notifications to user after merging possible multiple notifications records (i.e. personal global + personal product) + # kwargs.update({'user': user}) + applicable_notifications = user.applicable_notifications + if user.is_superuser: + # admin users get all system notifications + logger.debug("User %s is superuser", user) + applicable_notifications.append(self.system_notifications) + + notifications_set = Notifications.merge_notifications_list( + applicable_notifications, + ) + notifications_set.user = user + self._process_notifications(event, notifications=notifications_set, **kwargs) + + def _get_manager_instance( + self, + alert_type: str, + ) -> type[NotificationManagerHelpers]: + kwargs = { + "system_notifications": self.system_notifications, + "system_settings": self.system_settings, + } + if alert_type == "slack": + return SlackNotificationManger(**kwargs) + if alert_type == "msteams": + return MSTeamsNotificationManger(**kwargs) + if alert_type == "mail": + return EmailNotificationManger(**kwargs) + if alert_type == "webhooks": + return WebhookNotificationManger(**kwargs) + if alert_type == "alert": + return AlertNotificationManger(**kwargs) + + msg = f"Unsupported alert type: {alert_type}" + raise TypeError(msg) + + def _process_notifications( + self, + event: str | None, + notifications: Notifications | None = None, + **kwargs: dict, + ) -> None: + # Quick break out if we do not have any work to do + if not notifications: + logger.warning("no notifications!") + return + + logger.debug( + "sending notification " + ("asynchronously" if we_want_async() else "synchronously"), + ) + logger.debug("process notifications for %s", notifications.user) + + if self.system_settings.enable_slack_notifications and "slack" in getattr( + notifications, + event, + getattr(notifications, "other"), + ): + logger.debug("Sending Slack Notification") + self._get_manager_instance("slack").send_slack_notification( + event, + user=notifications.user, + **kwargs, + ) + + if self.system_settings.enable_msteams_notifications and "msteams" in getattr( + notifications, + event, + getattr(notifications, "other"), + ): + logger.debug("Sending MSTeams Notification") + self._get_manager_instance("msteams").send_msteams_notification( + event, + user=notifications.user, + **kwargs, + ) + + if self.system_settings.enable_mail_notifications and "mail" in getattr( + notifications, + event, + getattr(notifications, "other"), + ): + logger.debug("Sending Mail Notification") + self._get_manager_instance("mail").send_mail_notification( + event, + user=notifications.user, + **kwargs, + ) + + if self.system_settings.enable_webhooks_notifications and "webhooks" in getattr( + notifications, + event, + getattr(notifications, "other"), + ): + logger.debug("Sending Webhooks Notification") + self._get_manager_instance("webhooks").send_webhooks_notification( + event, + user=notifications.user, + **kwargs, + ) + + if "alert" in getattr(notifications, event, getattr(notifications, "other")): + logger.debug(f"Sending Alert to {notifications.user}") + self._get_manager_instance("alert").send_alert_notification( + event, + user=notifications.user, + **kwargs, + ) @app.task(ignore_result=True) -def webhook_status_cleanup(*args, **kwargs): +def webhook_status_cleanup(*_args: list, **_kwargs: dict): # If some endpoint was affected by some outage (5xx, 429, Timeout) but it was clean during last 24 hours, # we consider this endpoint as healthy so need to reset it endpoints = Notification_Webhooks.objects.filter( @@ -397,7 +873,9 @@ def webhook_status_cleanup(*args, **kwargs): endpoint.last_error = None endpoint.note = f"Reactivation from {Notification_Webhooks.Status.STATUS_ACTIVE_TMP}" endpoint.save() - logger.debug(f"Webhook endpoint '{endpoint.name}' reactivated from '{Notification_Webhooks.Status.STATUS_ACTIVE_TMP}' to '{Notification_Webhooks.Status.STATUS_ACTIVE}'") + logger.debug( + f"Webhook endpoint '{endpoint.name}' reactivated from '{Notification_Webhooks.Status.STATUS_ACTIVE_TMP}' to '{Notification_Webhooks.Status.STATUS_ACTIVE}'", + ) # Reactivation of STATUS_INACTIVE_TMP endpoints. # They should reactive automatically in 60s, however in case of some unexpected event (e.g. start of whole stack), @@ -407,180 +885,5 @@ def webhook_status_cleanup(*args, **kwargs): last_error__lt=get_current_datetime() - timedelta(minutes=5), ) for endpoint in broken_endpoints: - webhook_reactivation(endpoint_id=endpoint.pk) - - -@dojo_async_task -@app.task -def send_webhooks_notification(event, user=None, *args, **kwargs): - - ERROR_PERMANENT = "permanent" - ERROR_TEMPORARY = "temporary" - - endpoints = Notification_Webhooks.objects.filter(owner=user) - - if not endpoints: - if user: - logger.info(f"URLs for Webhooks not configured for user '{user}': skipping user notification") - else: - logger.info("URLs for Webhooks not configured: skipping system notification") - return - - for endpoint in endpoints: - - error = None - if endpoint.status not in [Notification_Webhooks.Status.STATUS_ACTIVE, Notification_Webhooks.Status.STATUS_ACTIVE_TMP]: - logger.info(f"URL for Webhook '{endpoint.name}' is not active: {endpoint.get_status_display()} ({endpoint.status})") - continue - - try: - logger.debug(f"Sending webhook message to endpoint '{endpoint.name}'") - res = webhooks_notification_request(endpoint, event, *args, **kwargs) - - if 200 <= res.status_code < 300: - logger.debug(f"Message sent to endpoint '{endpoint.name}' successfully.") - continue - - # HTTP request passed successfully but we still need to check status code - error = ERROR_TEMPORARY if 500 <= res.status_code < 600 or res.status_code == 429 else ERROR_PERMANENT - - endpoint.note = f"Response status code: {res.status_code}" - logger.error(f"Error when sending message to Webhooks '{endpoint.name}' (status: {res.status_code}): {res.text}") - - except requests.exceptions.Timeout as e: - error = ERROR_TEMPORARY - endpoint.note = f"Requests exception: {e}" - logger.error(f"Timeout when sending message to Webhook '{endpoint.name}'") - - except Exception as e: - error = ERROR_PERMANENT - endpoint.note = f"Exception: {e}"[:1000] - logger.exception(e) - log_alert(e, "Webhooks Notification") - - now = get_current_datetime() - - if error == ERROR_TEMPORARY: - - # If endpoint is unstable for more then one day, it needs to be deactivated - if endpoint.first_error is not None and (now - endpoint.first_error).total_seconds() > 60 * 60 * 24: - endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT - - else: - # We need to monitor when outage started - if endpoint.status == Notification_Webhooks.Status.STATUS_ACTIVE: - endpoint.first_error = now - - endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_TMP - - # In case of failure within one day, endpoint can be deactivated temporally only for one minute - webhook_reactivation.apply_async(kwargs={"endpoint_id": endpoint.pk}, countdown=60) - - # There is no reason to keep endpoint active if it is returning 4xx errors - else: - endpoint.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT - endpoint.first_error = now - - endpoint.last_error = now - endpoint.save() - - -def send_alert_notification(event, user=None, *args, **kwargs): - logger.debug("sending alert notification to %s", user) - try: - # no need to differentiate between user/no user - icon = kwargs.get("icon", "info-circle") - try: - source = Notifications._meta.get_field(event).verbose_name.title()[:100] - except FieldDoesNotExist: - source = event.replace("_", " ").title()[:100] - alert = Alerts( - user_id=user, - title=kwargs.get("title")[:250], - description=create_notification_message(event, user, "alert", *args, **kwargs)[:2000], - url=kwargs.get("url", reverse("alerts")), - icon=icon[:25], - source=source, - ) - # relative urls will fail validation - alert.clean_fields(exclude=["url"]) - alert.save() - except Exception as e: - logger.exception(e) - log_alert(e, "Alert Notification", title=kwargs["title"], description=str(e), url=kwargs["url"]) - - -def get_slack_user_id(user_email): - - from dojo.utils import get_system_setting - - user_id = None - - res = requests.request( - method="POST", - url="https://slack.com/api/users.lookupByEmail", - data={"token": get_system_setting("slack_token"), "email": user_email}, - timeout=settings.REQUESTS_TIMEOUT, - ) - - user = json.loads(res.text) - - slack_user_is_found = False - if user: - if "error" in user: - logger.error("Slack is complaining. See error message below.") - logger.error(user) - raise RuntimeError("Error getting user list from Slack: " + res.text) - if "email" in user["user"]["profile"]: - if user_email == user["user"]["profile"]["email"]: - if "id" in user["user"]: - user_id = user["user"]["id"] - logger.debug(f"Slack user ID is {user_id}") - slack_user_is_found = True - else: - logger.warning(f"A user with email {user_email} could not be found in this Slack workspace.") - - if not slack_user_is_found: - logger.warning("The Slack user was not found.") - - return user_id - - -def log_alert(e, notification_type=None, *args, **kwargs): - # no try catch here, if this fails we need to show an error - - users = Dojo_User.objects.filter(is_superuser=True) - for user in users: - alert = Alerts( - user_id=user, - url=kwargs.get("url", reverse("alerts")), - title=kwargs.get("title", "Notification issue")[:250], - description=kwargs.get("description", str(e))[:2000], - icon="exclamation-triangle", - source=notification_type[:100] if notification_type else kwargs.get("source", "unknown")[:100]) - # relative urls will fail validation - alert.clean_fields(exclude=["url"]) - alert.save() - - -def notify_test_created(test): - title = "Test created for " + str(test.engagement.product) + ": " + str(test.engagement.name) + ": " + str(test) - create_notification(event="test_added", title=title, test=test, engagement=test.engagement, product=test.engagement.product, - url=reverse("view_test", args=(test.id,)), url_api=reverse("test-detail", args=(test.id,))) - - -def notify_scan_added(test, updated_count, new_findings=[], findings_mitigated=[], findings_reactivated=[], findings_untouched=[]): - logger.debug("Scan added notifications") - - new_findings = sorted(new_findings, key=lambda x: x.numerical_severity) - findings_mitigated = sorted(findings_mitigated, key=lambda x: x.numerical_severity) - findings_reactivated = sorted(findings_reactivated, key=lambda x: x.numerical_severity) - findings_untouched = sorted(findings_untouched, key=lambda x: x.numerical_severity) - - title = "Created/Updated " + str(updated_count) + " findings for " + str(test.engagement.product) + ": " + str(test.engagement.name) + ": " + str(test) - - event = "scan_added_empty" if updated_count == 0 else "scan_added" - - create_notification(event=event, title=title, findings_new=new_findings, findings_mitigated=findings_mitigated, findings_reactivated=findings_reactivated, - finding_count=updated_count, test=test, engagement=test.engagement, product=test.engagement.product, findings_untouched=findings_untouched, - url=reverse("view_test", args=(test.id,)), url_api=reverse("test-detail", args=(test.id,))) + manager = WebhookNotificationManger() + manager._webhook_reactivation(manager, endpoint_id=endpoint.pk) diff --git a/dojo/notifications/views.py b/dojo/notifications/views.py index 7fe5562ee7e..7fc58803fc4 100644 --- a/dojo/notifications/views.py +++ b/dojo/notifications/views.py @@ -11,7 +11,7 @@ from dojo.forms import DeleteNotificationsWebhookForm, NotificationsForm, NotificationsWebhookForm from dojo.models import Notification_Webhooks, Notifications -from dojo.notifications.helper import test_webhooks_notification +from dojo.notifications.helper import NotificationManagerHelpers from dojo.utils import add_breadcrumb, get_enabled_notifications_list, get_system_setting logger = logging.getLogger(__name__) @@ -136,6 +136,9 @@ def set_breadcrumbs(self, request: HttpRequest): class NotificationWebhooksView(View): + def get_webhook_manager_instance(self) -> type[NotificationManagerHelpers]: + return Notification_Webhooks() + def check_webhooks_enabled(self): if not get_system_setting("enable_webhooks_notifications"): raise Http404 @@ -216,7 +219,7 @@ def process_form(self, request: HttpRequest, context: dict): form = context["form"] if form.is_valid(): try: - test_webhooks_notification(form.instance) + self.get_webhook_manager_instance().test_webhooks_notification(form.instance) except requests.exceptions.RequestException as e: messages.add_message( request, @@ -305,7 +308,7 @@ def process_form(self, request: HttpRequest, nwh: Notification_Webhooks, context if form.is_valid(): try: - test_webhooks_notification(form.instance) + self.get_webhook_manager_instance().test_webhooks_notification(form.instance) except requests.exceptions.RequestException as e: messages.add_message( request, diff --git a/unittests/test_notifications.py b/unittests/test_notifications.py index cccdb2e3d6b..860c2168599 100644 --- a/unittests/test_notifications.py +++ b/unittests/test_notifications.py @@ -1,6 +1,6 @@ import datetime import logging -from unittest.mock import patch +from unittest.mock import Mock, patch from auditlog.context import set_actor from crum import impersonate @@ -10,11 +10,12 @@ from rest_framework.authtoken.models import Token from rest_framework.test import APIClient, APITestCase -import dojo.notifications.helper as notifications_helper from dojo import __version__ as dd_version +from dojo.importers.base_importer import BaseImporter from dojo.models import ( DEFAULT_NOTIFICATION, Alerts, + Development_Environment, Dojo_User, Endpoint, Engagement, @@ -31,10 +32,9 @@ get_current_datetime, ) from dojo.notifications.helper import ( + AlertNotificationManger, + WebhookNotificationManger, create_notification, - send_alert_notification, - send_webhooks_notification, - webhook_reactivation, webhook_status_cleanup, ) @@ -90,107 +90,115 @@ def test_merge_notifications_list(self): self.assertEqual(len(merged_notifications.other), 3) self.assertEqual(merged_notifications.other, {"alert", "mail", "slack"}) - @patch("dojo.notifications.helper.send_alert_notification", wraps=send_alert_notification) - def test_notifications_system_level_trump(self, mock): + # @patch("dojo.notifications.helper.AlertNotificationManger.send_alert_notification", wraps=AlertNotificationManger.send_alert_notification) + @patch("dojo.notifications.helper.NotificationManager._get_manager_instance") + def test_notifications_system_level_trump(self, mock_get_manager_instance): + mock_manager = Mock(wraps=AlertNotificationManger()) + mock_get_manager_instance.return_value = mock_manager + notif_user, _ = Notifications.objects.get_or_create(user=User.objects.get(username="admin")) notif_system, _ = Notifications.objects.get_or_create(user=None, template=False) - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("user off, system off"): notif_user.user_mentioned = () # no alert notif_user.save() notif_system.user_mentioned = () # no alert notif_system.save() create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count) + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count) - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("user off, system on"): notif_user.user_mentioned = () # no alert notif_user.save() notif_system.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_system.save() create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count + 1) + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count + 1) # Small note for this test-cast: Trump works only in positive direction - system is not able to disable some kind of notification if user enabled it - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("user on, system off"): notif_user.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_user.save() notif_system.user_mentioned = () # no alert notif_system.save() create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count + 1) + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count + 1) - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("user on, system on"): notif_user.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_user.save() notif_system.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_system.save() create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count + 1) - last_count = mock.call_count + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count + 1) + last_count = mock_manager.send_alert_notification.call_count + + # @patch("dojo.notifications.helper.AlertNotificationManger.send_alert_notification", wraps=AlertNotificationManger.send_alert_notification) + @patch("dojo.notifications.helper.NotificationManager._get_manager_instance") + def test_non_default_other_notifications(self, mock_get_manager_instance): + mock_manager = Mock(wraps=AlertNotificationManger()) + mock_get_manager_instance.return_value = mock_manager - @patch("dojo.notifications.helper.send_alert_notification", wraps=send_alert_notification) - def test_non_default_other_notifications(self, mock): notif_user, _ = Notifications.objects.get_or_create(user=User.objects.get(username="admin")) notif_system, _ = Notifications.objects.get_or_create(user=None, template=False) - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("do not notify other"): notif_user.other = () # no alert notif_user.save() create_notification(event="dummy_bar_event", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count) + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count) - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("notify other"): notif_user.other = DEFAULT_NOTIFICATION # alert only notif_user.save() create_notification(event="dummy_foo_event", title="title_for_dummy_foo_event", description="description_for_dummy_foo_event", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count + 1) - self.assertEqual(mock.call_args_list[0].args[0], "dummy_foo_event") + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count + 1) + self.assertEqual(mock_manager.send_alert_notification.call_args_list[0].args[0], "dummy_foo_event") alert = Alerts.objects.get(title="title_for_dummy_foo_event") self.assertEqual(alert.source, "Dummy Foo Event") - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("user off, system off"): notif_user.user_mentioned = () # no alert notif_user.save() notif_system.user_mentioned = () # no alert notif_system.save() create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count + 0) + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count + 0) - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("user off, system on"): notif_user.user_mentioned = () # no alert notif_user.save() notif_system.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_system.save() create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count + 1) + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count + 1) # Small note for this test-cast: Trump works only in positive direction - system is not able to disable some kind of notification if user enabled it - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("user on, system off"): notif_user.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_user.save() notif_system.user_mentioned = () # no alert notif_system.save() create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count + 1) + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count + 1) - last_count = mock.call_count + last_count = mock_manager.send_alert_notification.call_count with self.subTest("user on, system on"): notif_user.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_user.save() notif_system.user_mentioned = DEFAULT_NOTIFICATION # alert only notif_system.save() create_notification(event="user_mentioned", title="user_mentioned", recipients=["admin"]) - self.assertEqual(mock.call_count, last_count + 1) + self.assertEqual(mock_manager.send_alert_notification.call_count, last_count + 1) class TestNotificationTriggers(DojoTestCase): @@ -199,7 +207,7 @@ class TestNotificationTriggers(DojoTestCase): def setUp(self): self.notification_tester = Dojo_User.objects.get(username="admin") - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") def test_product_types(self, mock): last_count = mock.call_count @@ -219,7 +227,7 @@ def test_product_types(self, mock): self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product type "notif prod type" was deleted by admin') self.assertEqual(mock.call_args_list[-1].kwargs["url"], "/product/type") - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") def test_products(self, mock): last_count = mock.call_count @@ -240,7 +248,7 @@ def test_products(self, mock): self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product "prod name" was deleted by admin') self.assertEqual(mock.call_args_list[-1].kwargs["url"], "/product") - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") def test_engagements(self, mock): last_count = mock.call_count @@ -300,7 +308,7 @@ def test_engagements(self, mock): self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The engagement "Testing engagement" was deleted by admin') self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/product/{prod2.id}") - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") def test_endpoints(self, mock): prod_type = Product_Type.objects.first() prod1, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name 1") @@ -323,7 +331,7 @@ def test_endpoints(self, mock): self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The endpoint "host2" was deleted by admin') self.assertEqual(mock.call_args_list[-1].kwargs["url"], "/endpoint") - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") def test_tests(self, mock): prod_type = Product_Type.objects.first() prod, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name") @@ -347,7 +355,7 @@ def test_tests(self, mock): self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The test "Acunetix Scan" was deleted by admin') self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/engagement/{eng2.id}") - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") def test_finding_groups(self, mock): prod_type = Product_Type.objects.first() prod, _ = Product.objects.get_or_create(prod_type=prod_type, name="prod name") @@ -372,7 +380,7 @@ def test_finding_groups(self, mock): self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The finding group "fg test" was deleted by admin') self.assertEqual(mock.call_args_list[-1].kwargs["url"], f"/test/{test2.id}") - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") @override_settings(ENABLE_AUDITLOG=True) def test_auditlog_on(self, mock): prod_type = Product_Type.objects.create(name="notif prod type") @@ -380,7 +388,7 @@ def test_auditlog_on(self, mock): prod_type.delete() self.assertEqual(mock.call_args_list[-1].kwargs["description"], 'The product type "notif prod type" was deleted by admin') - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") @override_settings(ENABLE_AUDITLOG=False) def test_auditlog_off(self, mock): prod_type = Product_Type.objects.create(name="notif prod type") @@ -397,7 +405,7 @@ def setUp(self): self.client = APIClient() self.client.credentials(HTTP_AUTHORIZATION="Token " + token.key) - @patch("dojo.notifications.helper.process_notifications") + @patch("dojo.notifications.helper.NotificationManager._process_notifications") @override_settings(ENABLE_AUDITLOG=True) def test_auditlog_on(self, mock): prod_type = Product_Type.objects.create(name="notif prod type API") @@ -427,26 +435,30 @@ def test_missing_system_webhook(self): # test data contains 2 entries but we need to test missing definition Notification_Webhooks.objects.all().delete() with self.assertLogs("dojo.notifications.helper", level="INFO") as cm: - send_webhooks_notification(event="dummy") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy") self.assertIn("URLs for Webhooks not configured: skipping system notification", cm.output[0]) def test_missing_personal_webhook(self): # test data contains 2 entries but we need to test missing definition Notification_Webhooks.objects.all().delete() with self.assertLogs("dojo.notifications.helper", level="INFO") as cm: - send_webhooks_notification(event="dummy", user=Dojo_User.objects.get(username="admin")) + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy", user=Dojo_User.objects.get(username="admin")) self.assertIn("URLs for Webhooks not configured for user '(admin)': skipping user notification", cm.output[0]) def test_system_webhook_inactive(self): self.sys_wh.status = Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT self.sys_wh.save() with self.assertLogs("dojo.notifications.helper", level="INFO") as cm: - send_webhooks_notification(event="dummy") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy") self.assertIn("URL for Webhook 'My webhook endpoint' is not active: Permanently inactive (inactive_permanent)", cm.output[0]) def test_system_webhook_sucessful(self): with self.assertLogs("dojo.notifications.helper", level="DEBUG") as cm: - send_webhooks_notification(event="dummy") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy") self.assertIn("Message sent to endpoint 'My webhook endpoint' successfully.", cm.output[-1]) updated_wh = Notification_Webhooks.objects.filter(owner=None).first() @@ -459,7 +471,8 @@ def test_system_webhook_4xx(self): self.sys_wh.save() with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm: - send_webhooks_notification(event="dummy", title="Dummy event") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy", title="Dummy event") self.assertIn("Error when sending message to Webhooks 'My webhook endpoint' (status: 400)", cm.output[-1]) updated_wh = Notification_Webhooks.objects.all().filter(owner=None).first() @@ -472,7 +485,8 @@ def test_system_webhook_first_5xx(self): self.sys_wh.save() with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm: - send_webhooks_notification(event="dummy", title="Dummy event") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy", title="Dummy event") updated_wh = Notification_Webhooks.objects.filter(owner=None).first() self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_TMP) @@ -490,7 +504,8 @@ def test_system_webhook_second_5xx_within_one_day(self): self.sys_wh.save() with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm: - send_webhooks_notification(event="dummy", title="Dummy event") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy", title="Dummy event") updated_wh = Notification_Webhooks.objects.filter(owner=None).first() self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_TMP) @@ -510,7 +525,8 @@ def test_system_webhook_third_5xx_after_more_then_day(self): self.sys_wh.save() with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm: - send_webhooks_notification(event="dummy", title="Dummy event") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy", title="Dummy event") updated_wh = Notification_Webhooks.objects.filter(owner=None).first() self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT) @@ -522,7 +538,8 @@ def test_system_webhook_third_5xx_after_more_then_day(self): def test_webhook_reactivation(self): with self.subTest("active"): wh = Notification_Webhooks.objects.filter(owner=None).first() - webhook_reactivation(endpoint_id=wh.pk) + manager = WebhookNotificationManger() + manager._webhook_reactivation(manager, endpoint_id=wh.pk) updated_wh = Notification_Webhooks.objects.filter(owner=None).first() self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE) @@ -540,7 +557,8 @@ def test_webhook_reactivation(self): wh.save() with self.assertLogs("dojo.notifications.helper", level="DEBUG") as cm: - webhook_reactivation(endpoint_id=wh.pk) + manager = WebhookNotificationManger() + manager._webhook_reactivation(manager, endpoint_id=wh.pk) updated_wh = Notification_Webhooks.objects.filter(owner=None).first() self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_ACTIVE_TMP) @@ -640,7 +658,8 @@ def test_system_webhook_timeout(self): system_settings.save() with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm: - send_webhooks_notification(event="dummy", title="Dummy event") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy", title="Dummy event") updated_wh = Notification_Webhooks.objects.filter(owner=None).first() self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_TMP) @@ -655,7 +674,8 @@ def test_system_webhook_wrong_fqdn(self): self.sys_wh.save() with self.assertLogs("dojo.notifications.helper", level="ERROR") as cm: - send_webhooks_notification(event="dummy", title="Dummy event") + manager = WebhookNotificationManger() + manager.send_webhooks_notification(event="dummy", title="Dummy event") updated_wh = Notification_Webhooks.objects.filter(owner=None).first() self.assertEqual(updated_wh.status, Notification_Webhooks.Status.STATUS_INACTIVE_PERMANENT) @@ -751,7 +771,15 @@ def test_events_messages(self, mock): with self.subTest("test_added"): test = Test.objects.create(title="notif test", engagement=eng, target_start=timezone.now(), target_end=timezone.now(), test_type_id=Test_Type.objects.first().id) - notifications_helper.notify_test_created(test) + create_notification( + event="test_added", + title=f"Test created for {test.engagement.product}: {test.engagement.name}: {test}", + test=test, + engagement=test.engagement, + product=test.engagement.product, + url=reverse("view_test", args=(test.id,)), + url_api=reverse("test-detail", args=(test.id,)), + ) self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "test_added") self.maxDiff = None self.assertEqual(mock.call_args.kwargs["json"], { @@ -787,7 +815,10 @@ def test_events_messages(self, mock): }) with self.subTest("scan_added_empty"): - notifications_helper.notify_scan_added(test, updated_count=0) + BaseImporter( + environment=Development_Environment.objects.get_or_create(name="Development")[0], + scan_type="ZAP Scan", + ).notify_scan_added(test, updated_count=0) self.assertEqual(mock.call_args.kwargs["headers"]["X-DefectDojo-Event"], "scan_added_empty") self.maxDiff = None self.assertEqual(mock.call_args.kwargs["json"], { @@ -830,7 +861,11 @@ def test_events_messages(self, mock): }) with self.subTest("scan_added"): - notifications_helper.notify_scan_added(test, + BaseImporter( + environment=Development_Environment.objects.get_or_create(name="Development")[0], + scan_type="ZAP Scan", + ).notify_scan_added( + test, updated_count=4, new_findings=[ Finding.objects.create(test=test, title="New Finding", severity="Critical"), From e939ca195954cec84e97bfe158effaa4639ceeab Mon Sep 17 00:00:00 2001 From: manuelsommer <47991713+manuel-sommer@users.noreply.github.com> Date: Wed, 18 Dec 2024 05:08:42 +0100 Subject: [PATCH 06/11] fix typo in docs (#11387) --- .../archived_docs/integrations/social-authentication.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/content/en/open_source/archived_docs/integrations/social-authentication.md b/docs/content/en/open_source/archived_docs/integrations/social-authentication.md index db2a536f775..640beaeff18 100644 --- a/docs/content/en/open_source/archived_docs/integrations/social-authentication.md +++ b/docs/content/en/open_source/archived_docs/integrations/social-authentication.md @@ -213,7 +213,7 @@ This will ensure the user is added to all the groups found in the Azure AD Token The Azure AD token returned by Azure will also need to be configured to include group IDs. Without this step, the token will not contain any notion of a group, and the mapping process will report that the current user is not a member of any -groups. To update the the format of the token, add a group claim that applies to whatever group type you are using. +groups. To update the format of the token, add a group claim that applies to whatever group type you are using. If unsure of what type that is, select `All Groups`. Do not activate `Emit groups as role claims` within the Azure AD "Token configuration" page. From f1d6d02c2c76ecf7e368c20cad4a98c5c2ae4d62 Mon Sep 17 00:00:00 2001 From: Paul Osinski <42211303+paulOsinski@users.noreply.github.com> Date: Thu, 19 Dec 2024 09:12:56 -0700 Subject: [PATCH 07/11] 2.41.1: docs maintenance (#11413) * qa connectors: merge articles, fix links * qa 'connecting tools': labels, weights, content * qa user mgmt docs: weights, content, links * fix broken links * fix upgrade notes typo --------- Co-authored-by: Paul Osinski Co-authored-by: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> --- .../en/about_defectdojo/new_user_checklist.md | 12 +-- .../connectors/_index.md | 4 +- .../connectors/about_connectors.md | 14 +-- .../connectors/add_edit_connectors.md | 9 +- .../connectors/connectors_tool_reference.md | 91 +----------------- .../connectors/edit_ignore_delete_records.md | 59 ------------ .../connectors/manage_operations.md | 79 +++++++++++++++ .../connectors/manage_records.md | 63 +++++++++++- .../connectors/operations_discover.md | 31 ------ .../connectors/operations_page.md | 23 ----- .../connectors/operations_sync.md | 41 -------- .../connectors/run_operations_manually.md | 28 ------ .../connecting_your_tools/external_tools.md | 4 +- .../en/connecting_your_tools/import_intro.md | 15 +-- .../import_scan_files/_index.md | 2 +- .../api_pipeline_modelling.md | 41 ++------ .../import_scan_files/import_scan_ui.md | 5 +- .../import_scan_files/smart_upload.md | 42 ++------ .../import_scan_files/using_reimport.md | 74 +++----------- .../connecting_your_tools/parsers/_index.md | 2 +- docs/content/en/user_management/_index.md | 4 +- .../user_management/about_perms_and_roles.md | 7 +- .../en/user_management/configure_sso.md | 60 +++--------- .../en/user_management/create_user_group.md | 96 +++++-------------- .../user_management/set_user_permissions.md | 15 ++- .../user_management/user_permission_chart.md | 27 ++---- .../intro_to_findings.md | 2 +- docs/layouts/index.html | 2 +- 28 files changed, 267 insertions(+), 585 deletions(-) delete mode 100644 docs/content/en/connecting_your_tools/connectors/edit_ignore_delete_records.md create mode 100644 docs/content/en/connecting_your_tools/connectors/manage_operations.md delete mode 100644 docs/content/en/connecting_your_tools/connectors/operations_discover.md delete mode 100644 docs/content/en/connecting_your_tools/connectors/operations_page.md delete mode 100644 docs/content/en/connecting_your_tools/connectors/operations_sync.md delete mode 100644 docs/content/en/connecting_your_tools/connectors/run_operations_manually.md diff --git a/docs/content/en/about_defectdojo/new_user_checklist.md b/docs/content/en/about_defectdojo/new_user_checklist.md index f1f7a8b4f35..8c0f9522fdd 100644 --- a/docs/content/en/about_defectdojo/new_user_checklist.md +++ b/docs/content/en/about_defectdojo/new_user_checklist.md @@ -10,11 +10,11 @@ Here's a quick reference you can use to ensure successful implementation - from ### The Basics -1. Start by [importing a file](../../connecting_your_tools/import_scan_files/import_scan_ui) using the UI. This is generally the quickest way to see how your data fits into the DefectDojo model. (note: OS users will need to set up a Product Type and Product before they can import data) +1. Start by [importing a file](/en/connecting_your_tools/import_scan_files/import_scan_ui) using the UI. This is generally the quickest way to see how your data fits into the DefectDojo model. (note: OS users will need to set up a Product Type and Product before they can import data) -2. Now that you have data in DefectDojo, learn more about how to organize it with the [Product Hierarchy Overview](../../working_with_findings/organizing_engagements_tests/product-hierarchy-overview). The Product Hierarchy creates a working inventory of your apps, which helps you divide your data up into logical categories. These categories can be used to apply access control rules, or to segement your reports to the correct team. +2. Now that you have data in DefectDojo, learn more about how to organize it with the [Product Hierarchy Overview](/en/working_with_findings/organizing_engagements_tests/product_hierarchy). The Product Hierarchy creates a working inventory of your apps, which helps you divide your data up into logical categories. These categories can be used to apply access control rules, or to segement your reports to the correct team. -3. Try [creating a Report](../../pro_reports/using-the-report-builder/) to summarize the data you've imported. Reports can be used to quickly share Findings with stakeholders such as Product Owners. +3. Try [creating a Report](/en/pro_reports/using_the_report_builder/) to summarize the data you've imported. Reports can be used to quickly share Findings with stakeholders such as Product Owners. This is the essence of DefectDojo - import security data, organize it, and present it to the folks who need to know. @@ -22,6 +22,6 @@ All of these features can be automated, and because DefectDojo can handle over 1 ### Other guides -- Does your organization use Jira? Learn how to use our [Jira integration](../jira_integration/Connect%20DefectDojo%20to%20Jira.md) to create Jira tickets from the data you ingest. -- Are you expecting to share DefectDojo with many users in your organization? Check out our guides to [user management](../user_management/about-permissions-roles) and set up role-based access control (RBAC). -- Ready to dive into automation? Learn how to use the [DefectDojo API](../connecting_your_tools/import_scan_files/api_pipeline_modelling) to automatically import new data, and build a robust CI / CD pipeline. \ No newline at end of file +- Does your organization use Jira? Learn how to use our [Jira integration](/en/jira_integration/connect_to_jira) to create Jira tickets from the data you ingest. +- Are you expecting to share DefectDojo with many users in your organization? Check out our guides to [user management](/en/user_management/about_perms_and_roles/) and set up role-based access control (RBAC). +- Ready to dive into automation? Learn how to use the [DefectDojo API](/en/connecting_your_tools/import_scan_files/api_pipeline_modelling) to automatically import new data, and build a robust CI / CD pipeline. \ No newline at end of file diff --git a/docs/content/en/connecting_your_tools/connectors/_index.md b/docs/content/en/connecting_your_tools/connectors/_index.md index 1923119f880..b4f5ad643e0 100644 --- a/docs/content/en/connecting_your_tools/connectors/_index.md +++ b/docs/content/en/connecting_your_tools/connectors/_index.md @@ -1,11 +1,11 @@ --- -title: "Set Up API Connectors" +title: "API Connectors" description: "Seamlessly connect DefectDojo to your security tools suite" summary: "" date: 2023-09-07T16:06:50+02:00 lastmod: 2023-09-07T16:06:50+02:00 draft: false -weight: 2 +weight: 3 chapter: true sidebar: collapsed: true diff --git a/docs/content/en/connecting_your_tools/connectors/about_connectors.md b/docs/content/en/connecting_your_tools/connectors/about_connectors.md index 5b30e4af7a7..8d03035aa89 100644 --- a/docs/content/en/connecting_your_tools/connectors/about_connectors.md +++ b/docs/content/en/connecting_your_tools/connectors/about_connectors.md @@ -17,6 +17,8 @@ seo: pro-feature: true --- +Note: Connectors are a DefectDojo Pro-only feature. + DefectDojo allows users to build sophisticated API integrations, and gives users full control over how their vulnerability data is organized. But everyone needs a starting point, and that's where Connectors come in. Connectors are designed to get your security tools connected and importing data to DefectDojo as quickly as possible. @@ -39,9 +41,9 @@ These Connectors provide an API\-speed integration with DefectDojo, and can be u If you're using DefectDojo's **Auto\-Map** settings, you can have your first Connector up and running in no time. -1. Set up a [Connector](https://docs.defectdojo.com/en/connecting_your_tools/connectors/add_edit_connectors/) from a supported tool. -2. [Discover](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_discover/) your tool's data hierarchy. -3. [Sync](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_sync/) the vulnerabilities found with your tool into DefectDojo. +1. Set up a [Connector](../add_edit_connectors/) from a supported tool. +2. [Discover](../manage_operations/#discover-operations) your tool's data hierarchy. +3. [Sync](../operations_sync/#sync-operations) the vulnerabilities found with your tool into DefectDojo. That's all, really! And remember, even if you create your Connector the 'easy' way, you can easily change the way things are set up later, without losing any of your work. @@ -59,10 +61,10 @@ When you're ready to add more tools to DefectDojo, you can easily rearrange your ## My Connector isn't supported -Fortunately, DefectDojo can still handle manual import for a wide range of security tools. Please see our [Supported Tool List](https://docs.defectdojo.com/en/connecting_your_tools/parsers/), as well as our guide to Importing data. +Fortunately, DefectDojo can still handle manual import for a wide range of security tools. Please see our [Supported Tool List](../../parsers/), as well as our guide to Importing data. # **Next Steps** * Check out the Connectors page by switching to DefectDojo's **Beta UI**. -* Follow our guide to [create your first Connector](https://docs.defectdojo.com/en/connecting_your_tools/connectors/add_edit_connectors/). -* Check out the process of [Discovering \& Mapping](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_discover/) your security tools and see how they can be configured to import data. +* Follow our guide to [create your first Connector](../add_edit_connectors/). +* Check out the process of [Running Operations](../manage_operations/) with your Connected security tools and see how they can be configured to import data. diff --git a/docs/content/en/connecting_your_tools/connectors/add_edit_connectors.md b/docs/content/en/connecting_your_tools/connectors/add_edit_connectors.md index 688473a7b5d..53577bb4f09 100644 --- a/docs/content/en/connecting_your_tools/connectors/add_edit_connectors.md +++ b/docs/content/en/connecting_your_tools/connectors/add_edit_connectors.md @@ -3,21 +3,24 @@ title: "Add or Edit a Connector" description: "Connect to a supported security tool" --- +Note: Connectors are a DefectDojo Pro-only feature. + The process for adding and configuring a connector is similar, regardless of the tool you’re trying to connect. However, certain tools may require you to create API keys or complete additional steps. -Before you begin this process, we recommend checking our [tool-specific reference](https://docs.defectdojo.com/en/connecting_your_tools/connectors/connectors_tool_reference/) to find the API resources for the tool you're trying to connect. +Before you begin this process, we recommend checking our [Tool-Specific Reference](../connectors_tool_reference/) to find the API resources for the tool you're trying to connect. 1. If you haven't already, start by **switching to the Beta UI** in DefectDojo. 2. From the left\-side menu, click on the **API Connectors** menu item. This is nested under the **Import** header. ​ ![image](images/add_edit_connectors.png) + 3. Choose a new Connector you want to add to DefectDojo in **Available Connections**, and click the **Add Configuration** underneath the tool. ​ You can also edit an existing Connection under the **Configured Connections** header. Click **Manage Configuration \> Edit Configuration** for the Configured Connection you want to Edit. ​ ![image](images/add_edit_connectors_2.png) -4. You will need an accessible URL **Location** for the tool, along with an API **Secret** key. The location of the API key will depend on the tool you are trying to configure. See our [Tool\-Specific Reference](https://docs.defectdojo.com/en/connecting_your_tools/connectors/connectors_tool_reference/) for more details. +4. You will need an accessible URL **Location** for the tool, along with an API **Secret** key. The location of the API key will depend on the tool you are trying to configure. See our [Tool\-Specific Reference](../connectors_tool_reference/) for more details. ​ 5. Set a **Label** for this connection to help you identify it in DefectDojo. ​ @@ -31,4 +34,4 @@ You can also edit an existing Connection under the **Configured Connections** he ## Next Steps -* Now that you've added a connector, you can confirm everything is set up correctly by running a [Discover](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_discover/) operation. +* Now that you've added a connector, you can confirm everything is set up correctly by running a [Discover](../manage_operations/#discover-operations) operation. diff --git a/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md b/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md index 90e7726cb2f..8b06395d111 100644 --- a/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md +++ b/docs/content/en/connecting_your_tools/connectors/connectors_tool_reference.md @@ -1,48 +1,35 @@ --- -title: "Tool-Specific API Reference (Connectors)" +title: "Tool-Specific Connector Setup" description: "Our list of supported Connector tools, and how to set them up with DefectDojo" --- -When setting up a Connector for a supported tool, you'll need to give DefectDojo specific information related to the tool's API. At a base level, you'll need: +Note: Connectors are a DefectDojo Pro-only feature. +When setting up a Connector for a supported tool, you'll need to give DefectDojo specific information related to the tool's API. At a base level, you'll need: * **Location** \-a field whichgenerallyrefers to your tool's URL in your network, * **Secret** \- generally an API key. Some tools will require additional API\-related fields beyond **Location** and **Secret**. They may also require you to make changes on their side to accommodate an incoming Connector from DefectDojo. - - ![image](images/connectors_tool_reference.png) -Each tool has different API requirements, and this guide is intended to help you set up the tool's API so that DefectDojo can connect. - +Each tool has a different API configuration, and this guide is intended to help you set up the tool's API so that DefectDojo can connect. Whenever possible, we recommend creating a new 'DefectDojo Bot' account within your Security Tool which will only be used by the Connector. This will help you better differentiate between actions manually taken by your team, and automated actions taken by the Connector. - - - # **Supported Connectors** - - ## **AWS Security Hub** - The AWS Security Hub connector uses an AWS access key to interact with the Security Hub APIs. - #### Prerequisites - Rather than use the AWS access key from a team member, we recommend creating an IAM User in your AWS account specifically for DefectDojo, with that user's permissions limited to those necessary for interacting with Security Hub. - - AWS's "**[AWSSecurityHubReadOnlyAccess](https://docs.aws.amazon.com/aws-managed-policy/latest/reference/AWSSecurityHubReadOnlyAccess.html)**policy" provides the required level of access for a connector. If you would like to write a custom policy for a Connector, you will need to include the following permissions: - * [DescribeHub](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_DescribeHub.html) * [GetFindingAggregator](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_GetFindingAggregator.html) * [GetFindings](https://docs.aws.amazon.com/securityhub/1.0/APIReference/API_GetFindings.html) @@ -50,9 +37,6 @@ AWS's "**[AWSSecurityHubReadOnlyAccess](https://docs.aws.amazon.com/aws-managed- A working policy definition might look like the following: - - - ``` { "Version": "2012-10-17", @@ -72,93 +56,63 @@ A working policy definition might look like the following: } ``` - **Please note:** we may need to use additional API actions in the future to provide the best possible experience, which will require updates to this policy. - Once you have created your IAM user and assigned it the necessary permissions using an appropriate policy/role, you will need to generate an access key, which you can then use to create a Connector. - - #### Connector Mappings - 1. Enter the appropriate [AWS API Endpoint for your region](https://docs.aws.amazon.com/general/latest/gr/sechub.html#sechub_region) in the **Location** field**:** for example, to retrieve results from the `us-east-1` region, you would supply - `https://securityhub.us-east-1.amazonaws.com` 2. Enter a valid **AWS Access Key** in the **Access Key** field. 3. Enter a matching **Secret Key** in the **Secret Key** field. DefectDojo can pull Findings from more than one region using Security Hub's **cross\-region aggregation** feature. If [cross\-region aggregation](https://docs.aws.amazon.com/securityhub/latest/userguide/finding-aggregation.html) is enabled, you should supply the API endpoint for your "**Aggregation Region**". Additional linked regions will have ProductRecords created for them in DefectDojo based on your AWS account ID and the region name. - - - ## **BurpSuite** - DefectDojo’s Burp connector calls Burp’s GraphQL API to fetch data. - #### Prerequisites - - Before you can set up this connector, you will need an API key from a Burp Service Account. Burp user accounts don’t have API keys by default, so you may need to create a new user specifically for this purpose. - - See [Burp Documentation](https://portswigger.net/burp/documentation/enterprise/user-guide/api-documentation/create-api-user) for a guide on setting up a Service Account user with an API key. - #### Connector Mappings - 1. Enter Burp’s root URL in the **Location** field: this is the URL where you access the Burp tool. 2. Enter a valid API Key in the Secret field. This is the API key associated with your Burp Service account. See the official [Burp documentation](https://portswigger.net/burp/extensibility/enterprise/graphql-api/index.html) for more information on the Burp API. - - - ## **Checkmarx ONE** - DefectDojo's Checkmarx ONE connector calls the Checkmarx API to fetch data. - #### **Connector Mappings** - 1. Enter your **Tenant Name** in the **Checkmarx Tenant** field. This name should be visible on the Checkmarx ONE login page in the top\-right hand corner: " Tenant: \<**your tenant name**\> " ​ - - ![image](images/connectors_tool_reference_2.png) + 2. Enter a valid API key. You may need to generate a new one: see [Checkmarx API Documentation](https://docs.checkmarx.com/en/34965-68618-generating-an-api-key.html#UUID-f3b6481c-47f4-6cd8-9f0d-990896e36cd6_UUID-39ccc262-c7cb-5884-52ed-e1692a635e08) for details. 3. Enter your tenant location in the **Location** field. This URL is formatted as follows: ​`https://.ast.checkmarx.net/` . Your Region can be found at the beginning of your Checkmarx URL when using the Checkmarx app. **** is the primary US server (which has no region prefix). - ## Dependency\-Track - This connector fetches data from a on\-premise Dependency\-Track instance, via REST API. - - ​**Connector Mappings** - 1. Enter your local Dependency\-Track server URL in the **Location** field. 2. Enter a valid API key in the **Secret** field. To generate a Dependency\-Track API key: - 1. **Access Management**: Navigate to Administration \> Access Management \> Teams in the Dependency\-Track interface. 2. **Teams Setup**: You can either create a new team or select an existing one. Teams allow you to manage API access based on group membership. 3. **Generate API Key**: In the selected team's details page, find the "API Keys" section. Click the \+ button to generate a new API key. @@ -167,102 +121,67 @@ To generate a Dependency\-Track API key: For more information, see **[Dependency\-Track Documentation](https://docs.dependencytrack.org/integrations/rest-api/)**. - - - ## Probely - This connector uses the Probely REST API to fetch data. - - ​**Connector Mappings** - 1. Enter the appropriate API server address in the **Location** field. (either or ) 2. Enter a valid API key in the **Secret** field. You can find an API key under the User \> API Keys menu in Probely. See [Probely documentation](https://help.probely.com/en/articles/8592281-how-to-generate-an-api-key) for more info. - - - ## **SemGrep** - This connector uses the SemGrep REST API to fetch data. - #### Connector Mappings - Enter https://semgrep.dev/api/v1/in the **Location** field. - 1. Enter a valid API key in the **Secret** field. You can find this on the Tokens page: ​ "Settings" in the left navbar \> Tokens \> Create new token ([https://semgrep.dev/orgs/\-/settings/tokens](https://semgrep.dev/orgs/-/settings/tokens)) See [SemGrep documentation](https://semgrep.dev/docs/semgrep-cloud-platform/semgrep-api/#tag__badge-list) for more info. - - - ## SonarQube - The SonarQube Connector can fetch data from either a SonarCloud account or from a local SonarQube instance. - - **For SonarCloud users:** - 1. Enter https://sonarcloud.io/ in the Location field. 2. Enter a valid **API key** in the Secret field. **For SonarQube (on\-premise) users:** - 1. Enter the base url of your SonarQube instance in the Location field: for example `https://my.sonarqube.com/` 2. Enter a valid **API key** in the Secret field. This will need to be a **[User](https://docs.sonarsource.com/sonarqube/latest/user-guide/user-account/generating-and-using-tokens/)** [API Token Type](https://docs.sonarsource.com/sonarqube/latest/user-guide/user-account/generating-and-using-tokens/). API tokens can be found and generated via **My Account \-\> Security \-\> Generate Token** in the SonarQube app. For more information, [see SonarQube documentation](https://docs.sonarsource.com/sonarqube/latest/user-guide/user-account/generating-and-using-tokens/). - - - ## **Snyk** - The Snyk connector uses the Snyk REST API to fetch data. - #### Connector Mappings - 1. Enter **[https://api.snyk.io/rest](https://api.snyk.io/v1)** or **[https://api.eu.snyk.io/rest](https://api.eu.snyk.io/v1)** (for a regional EU deployment) in the **Location** field. 2. Enter a valid API key in the **Secret** field. API Tokens are found on a user's **[Account Settings](https://docs.snyk.io/getting-started/how-to-obtain-and-authenticate-with-your-snyk-api-token)** [page](https://docs.snyk.io/getting-started/how-to-obtain-and-authenticate-with-your-snyk-api-token) in Snyk. See the [Snyk API documentation](https://docs.snyk.io/snyk-api) for more info. - - - ## Tenable - The Tenable connector uses the **Tenable.io** REST API to fetch data. - On\-premise Tenable Connectors are not available at this time. - #### **Connector Mappings** - 1. Enter in the Location field. 2. Enter a valid **API key** in the Secret field. diff --git a/docs/content/en/connecting_your_tools/connectors/edit_ignore_delete_records.md b/docs/content/en/connecting_your_tools/connectors/edit_ignore_delete_records.md deleted file mode 100644 index 4fc06df43cc..00000000000 --- a/docs/content/en/connecting_your_tools/connectors/edit_ignore_delete_records.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: "Edit, Ignore or Delete Records" -description: "" ---- - -Records can be Edited, Ignored or Deleted from the **Manage Records \& Operations Page.** - -Although Mapped and Unmapped records are located in separate tables, they can both be edited in the same way. - -From the Records table, click the blue ▼ Arrow next to the State column on a given Record. From there, you can select **Edit Record,** or **Delete Record.** - -![image](images/edit_ignore_delete_records.png) - -## Edit a Record - -Clicking **Edit Record** will open a window which allows you to change the destination product in DefectDojo. You can either select an existing Product from the drop\-down menu, or you can type in the name of a new Product you wish to create. - -![image](images/edit_ignore_delete_records_2.png) - -## **Change the Mapping of a Record** - -The scan data associated with a Record can be directed to flow into a different Product by changing the mapping. - -Select, or type in the name of a new Product from the drop\-down menu to the right. - -### **Edit the State of a Record** - -The State of a Record can be changed from this menu as well. Records can be switched from Good to Ignored (or vice versa) by choosing an option from the **State** dropdown list. - -#### Ignoring a Record - -If you wish to ‘switch off’ one of the records or disregard the data it’s sending to DefectDojo, you can choose to ‘Ignore’ the record. An ‘Ignored’ record will move to the Unmapped Records list and will not push any new data to DefectDojo. - -You can Ignore a Mapped Record (which will remove the mapping), or a New Record (from the unmapped Records list). - -#### Restoring an Ignored Record - -If you would like to remove the Ignored status from a record, you can change it back to New with the same State dropdown menu. - -* If Auto\-Map Records is enabled, the Record will return to its original mapping once the Discover operation runs again. -* If Auto\-Map Records is not enabled, DefectDojo will not automatically restore a previous mapping, so you’ll need to set up the mapping for this Record again. - -## **Delete a Record** - -You can also Delete Records, which will remove them from the Unmapped or Mapped Records table. - -Keep in mind that the Discover function will always import all records from a tool \- meaning that even if a Record is deleted from DefectDojo, it will become re\-discovered later (and will return to the list of Records to be mapped again). - -* If you plan on removing the underlying Vendor\-Equivalent Product from your scan tool, then Deleting the Record is a good option. Otherwise, the next Discover operation will see that the associated data is missing, and this Record will change state to 'Missing'. -​ -* However, if the underlying Vendor\-Equivalent Product still exists, it will be Discovered again on a future Discover operation. To prevent this behaviour, you can instead Ignore the Record. - -### Does this affect any imported data? - -No. All Findings, Tests and Engagements created by a sync record will remain in DefectDojo even after a Record is deleted. Deleting a record or a configuration will only remove the data\-flow process, and won’t delete any vulnerability data from DefectDojo or your tool. - -# Next Steps - -* If your Records have been mapped, learn how to import data via [Sync operations](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_sync/). diff --git a/docs/content/en/connecting_your_tools/connectors/manage_operations.md b/docs/content/en/connecting_your_tools/connectors/manage_operations.md new file mode 100644 index 00000000000..17e830e77ee --- /dev/null +++ b/docs/content/en/connecting_your_tools/connectors/manage_operations.md @@ -0,0 +1,79 @@ +--- +title: "Managing Operations" +description: "Check the status of your Connector's Discover & Sync Operations" +--- + +Note: Connectors are a DefectDojo Pro-only feature. + +Once an API connector is set up, it will run two Operations on a recurring basis: + +* **Discover** will learn the connected tool's structure, and will create records in DefectDojo of any unmapped data; +* **Sync** will import new Findings from the tool based on your mappings. + +Both of these Operations are managed on the Operations page of a Connector. The table will also track past runs of these Operations so that you can ensure your Connector is up to date. + +To access a Connector's Operations Page, open **Manage Records & Operations** for the Connector you wish to work with, and then switch to the ** Operations From (tool)** tab. + +![image](images/operations_discover.png) + +The **Manage Records & Operations** page can also be used to handle Records; which are the individual Product mappings of your connected tool. See [Managing Records](../manage_records) for more information. + +## The Operations Page + +![image](images/operations_page.png) + +Each entry on the Operations Page's table is a record of an operation event, with the following traits: + +* **Type** describes whether the event was a **Sync** or a **Discover** operation. +* **Status** describes whether the event ran successfully. +* **Trigger** describes how the event was triggered \- was it a **Scheduled** operation which ran automatically, or a **Manual** operation which was triggered by a DefectDojo user? +* The **Start \& End Time** of each operation is recorded here, along with the **Duration**. + +## Discover Operations + +The first step a DefectDojo Connector needs to take is to **Discover** your tool's environment to see how you're organizing your scan data. + +Let's say you have a BurpSuite tool, which is set up to scan five different repositories for vulnerabilities. Your Connector will take note of this organizational structure and set up **Records** to help you translate those separate repositories into DefectDojos Product/Engagement/Test hierarchy. + +### Creating New Records + +Each time your Connector runs a **Discover** operation, it will look for new **Vendor\-Equivalent\-Products (VEPs)**. DefectDojo looks at the way the Vendor tool is set up and will create **Records** of VEPs based on how your tool is organized. + +![image](images/operations_discover_2.png) + +### Run Discover Manually + +**Discover** operations will automatically run on a regular basis, but they can also be run manually. If you're setting up this Connector for the first time, you can click the **Discover** button next to the **Unmapped Records** header. After you refresh the page, you will see your initial list of **Records**. + +![image](images/operations_discover_3.png) + +To learn more about working with records and setting up mappings to Products, see our guide to [Managing Records](../manage_records). + +## Sync Operations + +On a daily basis, DefectDojo will look at each **Mapped Record** for new scan data. DefectDojo will then run a **Reimport**, which compares the state of existing scan data to an incoming report. + +### Where is vulnerability data stored? + +* DefectDojo will create an **Engagement** nested under the Product specified in the **Record Mapping**. This Engagement will be called **Global Connectors**. +* The **Global Connectors** Engagement will track each separate Connection associated with the Product as a **Test**. +* On this sync, and each subsequent sync, the **Test** will store each vulnerability found by the tool as a **Finding**. + +### How Sync handles new vulnerability data + +Whenever Sync runs, it will compare the latest scan data against the existing list of Findings for changes. + +* If there are new Findings detected, they will be added to the Test as new Findings. +* If there are any Findings which aren’t detected in the latest scan, they will be marked as Inactive in the Test. + +To learn more about Products, Engagements, Tests and Findings, see our [Product Hierarchy Overview](/en/working_with_findings/organizing_engagements_tests/product_hierarchy). + +### Running Sync Manually + +To have DefectDojo run a Sync operation off\-schedule: + +1. Navigate to the **Manage Records \& Operations** page for the connector you want to use. From the **API Connectors** page, click the drop\-down menu on the Connector you wish to work with, and select Manage Records \& Operations. +​ +2. From this page, click the **Sync** button. This button is located next to the **Mapped Records** header. + +![image](images/operations_sync.png) \ No newline at end of file diff --git a/docs/content/en/connecting_your_tools/connectors/manage_records.md b/docs/content/en/connecting_your_tools/connectors/manage_records.md index 5436061480d..711959988a5 100644 --- a/docs/content/en/connecting_your_tools/connectors/manage_records.md +++ b/docs/content/en/connecting_your_tools/connectors/manage_records.md @@ -1,15 +1,17 @@ --- -title: "Manage Records" +title: "Managing Records" description: "Direct the flow of data from your tool into DefectDojo" --- +Note: Connectors are a DefectDojo Pro-only feature. + Once you have run your first Discover operation, you should see a list of Mapped or Unmapped records on the **Manage Records and Operations** page. ## What's a Record? A Record is a connection between a DefectDojo **Product** and a **Vendor\-Equivalent\-Product**. You can use your Records list to control the flow of data between your tool and DefectDojo. -Records are created and updated during the **[Discover](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_discover/)** operation, which DefectDojo runs daily to look for new Vendor\-Equivalent Products. +Records are created and updated during the **[Discover](../manage_operations/#discover-operations)** operation, which DefectDojo runs daily to look for new Vendor\-Equivalent Products. ![image](images/manage_records.png) @@ -55,7 +57,7 @@ Once a Record is Mapped, DefectDojo will be ready to import your tool’s scans This makes it possible to send scan data from multiple Connectors to the same Product. All of the data will be stored in the same Engagement, but each Connector will store data in a separate Test. -To learn more about Products, Engagements and Tests, see our [Product Hierarchy Overview](https://docs.defectdojo.com/en/working_with_findings/organizing_engagements_tests/product-hierarchy-overview/). +To learn more about Products, Engagements and Tests, see our [Product Hierarchy Overview](/en/working_with_findings/organizing_engagements_tests/product_hierarchy/). ## Record States - Glossary @@ -63,7 +65,7 @@ Each Record has an associated state to communicate how the Record is working. ### New -A New Record is an Unmapped Record which DefectDojo has Discovered. It can be Mapped to a Product or Ignored. To Map a new Record to a Product, see our guide on [Editing Records](https://docs.defectdojo.com/en/connecting_your_tools/connectors/edit_ignore_delete_records/). +A New Record is an Unmapped Record which DefectDojo has Discovered. It can be Mapped to a Product or Ignored. To Map a new Record to a Product, see our guide on [Editing Records](). ### Good @@ -71,7 +73,7 @@ A New Record is an Unmapped Record which DefectDojo has Discovered. It can be Ma ### Ignored -'Ignored' Records have been successfully Discovered, but a DefectDojo user has decided not to map the data to a Product. If you wish to change a New or Mapped Record to Ignored, or re-map an Ignored Record, see our guide on [Editing Records](https://docs.defectdojo.com/en/connecting_your_tools/connectors/edit_ignore_delete_records/). +'Ignored' Records have been successfully Discovered, but a DefectDojo user has decided not to map the data to a Product. ## Warning States: Stale or Missing @@ -90,3 +92,54 @@ If a Record has been Mapped, but the source data (or Vendor\-Equivalent Product) DefectDojo Connectors will adapt to name changes, directory changes and other data shifts, so this is possibly because the related Vendor\-Equivalent Product was deleted from the Tool you’re using. If you intended to remove the Vendor Equivalent Product from your tool, you can Delete a Missing Record. If not, you'll need to troubleshoot the problem within the Tool so that the source data can be Discovered correctly. + +## Edit Records: Remap, Ignore or Delete + +Records can be Edited, Ignored or Deleted from the **Manage Records \& Operations Page.** + +Although Mapped and Unmapped records are located in separate tables, they can both be edited in the same way. + +From the Records table, click the blue ▼ Arrow next to the State column on a given Record. From there, you can select **Edit Record,** or **Delete Record.** + +![image](images/edit_ignore_delete_records.png) + +### Change the Mapping of a Record + +Clicking **Edit Record** will open a window which allows you to change the destination product in DefectDojo. You can either select an existing Product from the drop\-down menu, or you can type in the name of a new Product you wish to create. + +![image](images/edit_ignore_delete_records_2.png) + +The scan data associated with a Record can be directed to flow into a different Product by changing the mapping. + +Select, or type in the name of a new Product from the drop\-down menu to the right. + +#### Edit the State of a Record + +The State of a Record can be changed from this menu as well. Records can be switched from Good to Ignored (or vice versa) by choosing an option from the **State** dropdown list. + +### Ignoring a Record + +If you wish to ‘switch off’ one of the records or disregard the data it’s sending to DefectDojo, you can choose to ‘Ignore’ the record. An ‘Ignored’ record will move to the Unmapped Records list and will not push any new data to DefectDojo. + +You can Ignore a Mapped Record (which will remove the mapping), or a New Record (from the unmapped Records list). + +#### Restoring an Ignored Record + +If you would like to remove the Ignored status from a record, you can change it back to New with the same State dropdown menu. + +* If Auto\-Map Records is enabled, the Record will return to its original mapping once the Discover operation runs again. +* If Auto\-Map Records is not enabled, DefectDojo will not automatically restore a previous mapping, so you’ll need to set up the mapping for this Record again. + +### Delete a Record + +You can also Delete Records, which will remove them from the Unmapped or Mapped Records table. + +Keep in mind that the Discover function will always import all records from a tool \- meaning that even if a Record is deleted from DefectDojo, it will become re\-discovered later (and will return to the list of Records to be mapped again). + +* If you plan on removing the underlying Vendor\-Equivalent Product from your scan tool, then Deleting the Record is a good option. Otherwise, the next Discover operation will see that the associated data is missing, and this Record will change state to 'Missing'. +​ +* However, if the underlying Vendor\-Equivalent Product still exists, it will be Discovered again on a future Discover operation. To prevent this behaviour, you can instead Ignore the Record. + +#### Does this affect any imported data? + +No. All Findings, Tests and Engagements created by a sync record will remain in DefectDojo even after a Record is deleted. Deleting a record or a configuration will only remove the data\-flow process, and won’t delete any vulnerability data from DefectDojo or your tool. diff --git a/docs/content/en/connecting_your_tools/connectors/operations_discover.md b/docs/content/en/connecting_your_tools/connectors/operations_discover.md deleted file mode 100644 index e54a6760882..00000000000 --- a/docs/content/en/connecting_your_tools/connectors/operations_discover.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -title: "'Discover' Operations" -description: "Create Records, and direct the flow of scan data into DefectDojo" ---- - -Once you have a Connector set up, you can start making decisions about how data will flow from the tool into DefectDojo. This is managed through the Discovery process. - -You can manage all of these processes from the **Manage Records \& Operations** page. From the **API Connectors** page, click the drop\-down menu on the Connector you wish to work with, and select Manage Records \& Operations. - -![image](images/operations_discover.png) - -# Creating New Records - -The first step a DefectDojo Connector needs to take is to **Discover** your tool's environment to see how you're organizing your scan data. - -Let's say you have a BurpSuite tool, which is set up to scan five different repositories for vulnerabilities. Your Connector will take note of this organizational structure and set up **Records** to help you translate those separate repositories into DefectDojos Product/Engagement/Test hierarchy. - -Each time your Connector runs a **Discover** operation, it will look for new **Vendor\-Equivalent\-Products (VEPs)**. DefectDojo looks at the way the Vendor tool is set up and will create **Records** of VEPs based on how your tool is organized. - -![image](images/operations_discover_2.png) - -## Run Discover Manually - -**Discover** operations will automatically run on a regular basis, but they can also be run manually. If you're setting up this Connector for the first time, you can click the **Discover** button next to the **Unmapped Records** header. After you refresh the page, you will see your initial list of **Records**. - -![image](images/operations_discover_3.png) - -# **Next Steps:** - -* Learn how to [manage the Records](https://docs.defectdojo.com/en/connecting_your_tools/connectors/manage_records/) discovered by a Connector, and start importing data. -* If your Records have already been mapped (such as through Auto\-Map Records), learn how to import data via [Sync operations](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_sync/). diff --git a/docs/content/en/connecting_your_tools/connectors/operations_page.md b/docs/content/en/connecting_your_tools/connectors/operations_page.md deleted file mode 100644 index c62e6c7a582..00000000000 --- a/docs/content/en/connecting_your_tools/connectors/operations_page.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: "The Operations Page" -description: "Check the status of your Connector's Discover & Sync Operations" ---- - -The Operations Page provides an overview of your connector's Discover \& Sync Operations, along with additional details for each. These operations are tracked using a table. - -To access a Connector's Operations Page, open **Manage Records \& Operations** for the connector you wish to edit, and then switch to the **\ Operations From (tool)** tab. - -# The Operations Table - -![image](images/operations_page.png) - -Each entry on the Operations Table is a record of an operation event, with the following traits: - -* **Type** describes whether the event was a **Sync** or a **Discover** operation. -* **Status** describes whether the event ran successfully. -* **Trigger** describes how the event was triggered \- was it a **Scheduled** operation which ran automatically, or a **Manual** operation which was triggered by a DefectDojo user? -* The **Start \& End Time** of each operation is recorded here, along with the **Duration**. - -# **Next Steps** - -* Learn more about [Discover](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_discover/) and [Sync](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_sync/) operations from our guides. diff --git a/docs/content/en/connecting_your_tools/connectors/operations_sync.md b/docs/content/en/connecting_your_tools/connectors/operations_sync.md deleted file mode 100644 index 6789e16cde2..00000000000 --- a/docs/content/en/connecting_your_tools/connectors/operations_sync.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -title: "'Sync' Operations" -description: "Import data from your Connector into DefectDojo" ---- - -The primary ‘Job’ of a DefectDojo Connector is to import data from a security tool, and this process is handled by the Sync Operation. - -On a daily basis, DefectDojo will look at each **Mapped** **Record** for new scan data. DefectDojo will then run a **Reimport**, which compares the state of each scan. - -## The Sync Process - -### Where is my vulnerability data stored? - -* DefectDojo will create an **Engagement** nested under the Product specified in the **Record Mapping**. This Engagement will be called **Global Connectors**. -* The **Global Connectors** Engagement will track each separate Connection associated with the Product as a **Test**. -* On this sync, and each subsequent sync, the **Test** will store each vulnerability found by the tool as a **Finding**. - -### How Sync handles new vulnerability data - -Whenever Sync runs, it will compare the latest scan data against the existing list of Findings for changes. - -* If there are new Findings detected, they will be added to the Test as new Findings. -* If there are any Findings which aren’t detected in the latest scan, they will be marked as Inactive in the Test. - -To learn more about Products, Engagements, Tests and Findings, see our [Product Hierarchy Overview](https://docs.defectdojo.com/en/working_with_findings/organizing_engagements_tests/product-hierarchy-overview/). - -## Running Sync Manually - -To have DefectDojo run a Sync operation off\-schedule: - -1. Navigate to the **Manage Records \& Operations** page for the connector you want to use. From the **API Connectors** page, click the drop\-down menu on the Connector you wish to work with, and select Manage Records \& Operations. -​ -2. From this page, click the **Sync** button. This button is located next to the **Mapped Records** header. - -![image](images/operations_sync.png) - -# Next Steps - -* Learn how to set up the flow of data into DefectDojo through a [Discover operation](https://docs.defectdojo.com/en/connecting_your_tools/connectors/operations_discover/). -* Adjust the schedule of your Sync and Discover operations by [Editing a Connector](https://docs.defectdojo.com/en/connecting_your_tools/connectors/add_edit_connectors/). -* Learn about Engagements, Tests and Findings with our guide to [Product Hierarchy](https://docs.defectdojo.com/en/working_with_findings/organizing_engagements_tests/product-hierarchy-overview/). diff --git a/docs/content/en/connecting_your_tools/connectors/run_operations_manually.md b/docs/content/en/connecting_your_tools/connectors/run_operations_manually.md deleted file mode 100644 index 4a23c3c2612..00000000000 --- a/docs/content/en/connecting_your_tools/connectors/run_operations_manually.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: "How to run Operations manually" -description: "Run a Sync or Discover operation outside of schedule" ---- - -Connectors import data to DefectDojo on a regular interval (which you defined when adding the connector). However, if you want to import data manually (such as if you want to import historical data) you can follow this process: - - - -Select the tool which you want to test from **Configured Connections,** and click the **Manage Configuration button.** From the drop\-down list, select **Manage Records and Operations.** - - - -## Run Discover Manually - - -* To have DefectDojo search for, and import new records from the API, click the **🔎 Discover** button. This button is located next to the **Unmapped Records** header. - -![image](images/run_operations_manually.png) -## Run Sync Manually - - -* To have DefectDojo import new data from each Mapped Record, click the **Sync** button. This button is located next to the **Mapped Records** header. - -![image](images/run_operations_manually_2.png) - -If there are no Mapped Records associated with this Connector, DefectDojo will not be able to import any data via Sync. You may need to run a Discover operation first, or map each record to a Product. - diff --git a/docs/content/en/connecting_your_tools/external_tools.md b/docs/content/en/connecting_your_tools/external_tools.md index d9a4408fb0d..731a605477d 100644 --- a/docs/content/en/connecting_your_tools/external_tools.md +++ b/docs/content/en/connecting_your_tools/external_tools.md @@ -5,6 +5,8 @@ draft: false weight: 2 --- +Note: The following external tools are DefectDojo Pro-only features. These binaries will not work unless they are connected to an instance with a DefectDojo Pro license. + ## About Universal Importer Universal Importer and Dojo-CLI are command-line tools designed to seamlessly upload scan results into DefectDojo. It streamlines both the import and re-import processes of findings and associated objects. These tools are flexible and supports importing and re-importing scan results, making it ideal for users who need robust interaction with the DefectDojo API. @@ -261,5 +263,5 @@ If you encounter any issues, please check the following: - Ensure you're using the correct binary for your operating system and CPU architecture. - Verify that the API key is set correctly in your environment variables. - Check that the DefectDojo URL is correct and accessible. -- When importing, confirm that the report file exists and is in the supported format for the specified scan type. You can review the supported scanners for Defect Dojo in the documentation https://documentation.defectdojo.com/integrations/parsers/file/. +- When importing, confirm that the report file exists and is in the supported format for the specified scan type. You can review the supported scanners for DefectDojo on our [supported tools list](../parsers). diff --git a/docs/content/en/connecting_your_tools/import_intro.md b/docs/content/en/connecting_your_tools/import_intro.md index 588b4826787..4b1480f6bc9 100644 --- a/docs/content/en/connecting_your_tools/import_intro.md +++ b/docs/content/en/connecting_your_tools/import_intro.md @@ -1,7 +1,7 @@ --- title: "Import Methods" description: "Learn how to import data manually, through the API, or via a connector" -weight: 0 +weight: 1 --- One of the things we understand at DefectDojo is that every company’s security needs are completely different. There is no ‘one\-size\-fits\-all’ approach. As your organization changes, having a flexible approach is key. @@ -16,15 +16,16 @@ There are four main ways that DefectDojo can upload Finding reports: * Via direct **import** through the UI (“**Add Findings**”) * Via **API** endpoint (allowing for automated data ingest) +* Via **Universal Importer**, a command-line tool which leverages the DefectDojo API * Via **Connectors** for certain tools, an ‘out of the box’ data integration * Via **Smart Upload** for certain tools, an importer designed to handle infrastructure scans ### Comparing Upload Methods -| | **UI Import** | **API Import** | **Connectors** | **Smart Upload** | +| | **UI Import** | **API** | **Connectors** Pro | **Smart Upload** | | --- | --- | --- | --- | --- | -| **Supported Scan Types** | All (see **Supported Tools**) | All (see **Supported Tools**) | Snyk, Semgrep, Burp Suite, AWS Security Hub, Probely, Checkmarx, Tenable | Nexpose, NMap, OpenVas, Qualys, Tenable | -| **Can it be automated?** | Not directly, though method can be automated through API | Yes, calls to API can be made manually or via script | Yes, Connectors is a natively automated process which leverages your tool’s API to rapidly import data | Yes, can be automated via /smart\_upload\_import API endpoint | +| **Supported Scan Types** | All: see [Supported Tools](en/connecting_your_tools/parsers) | All: see [Supported Tools](en/connecting_your_tools/parsers) | Snyk, Semgrep, Burp Suite, AWS Security Hub, Probely, Checkmarx, Tenable | Nexpose, NMap, OpenVas, Qualys, Tenable | +| **Automation?** | Available via API: `/reimport` `/import` endpoints | Triggered from [CLI Importer](../external_tools) or external code | Connectors is inherently automated | Available via API: `/smart_upload_import` endpoint | ### Product Hierarchy @@ -35,6 +36,6 @@ Each of these methods can create Product Hierarchy on the spot. Product Hierarch # Next Steps -* If you have a brand new DefectDojo instance, learning how to use the **Import Scan Form** is a great starting point. -* If you want to learn how to translate DefectDojo’s organizational system into a robust pipeline, you can start by consulting our article on [Product Hierarchy](https://docs.defectdojo.com/en/working_with_findings/organizing_engagements_tests/product-hierarchy-overview/). -* If you want to set up Connectors to work with a supported tool, see our [About Connectors](https://docs.defectdojo.com/en/connecting_your_tools/connectors/about_connectors/) article. +* If you have a brand new DefectDojo instance, learning how to use the [Import Scan Form](../import_scan_files/import_scan_ui) is a great starting point. +* If you want to learn how to translate DefectDojo’s organizational system into a robust pipeline, you can start by consulting our article on [Product Hierarchy](/en/working_with_findings/organizing_engagements_tests/product_hierarchy/). +* If you want to set up Connectors to work with a supported tool, see our [About Connectors](../connectors/about_connectors/) article. diff --git a/docs/content/en/connecting_your_tools/import_scan_files/_index.md b/docs/content/en/connecting_your_tools/import_scan_files/_index.md index 03eda102b7f..44f08532b34 100644 --- a/docs/content/en/connecting_your_tools/import_scan_files/_index.md +++ b/docs/content/en/connecting_your_tools/import_scan_files/_index.md @@ -5,7 +5,7 @@ summary: "" date: 2023-09-07T16:06:50+02:00 lastmod: 2023-09-07T16:06:50+02:00 draft: false -weight: 1 +weight: 2 chapter: true sidebar: collapsed: true diff --git a/docs/content/en/connecting_your_tools/import_scan_files/api_pipeline_modelling.md b/docs/content/en/connecting_your_tools/import_scan_files/api_pipeline_modelling.md index 90d840aecb8..889e20c9346 100644 --- a/docs/content/en/connecting_your_tools/import_scan_files/api_pipeline_modelling.md +++ b/docs/content/en/connecting_your_tools/import_scan_files/api_pipeline_modelling.md @@ -1,67 +1,42 @@ --- -title: "Creating an automated import pipeline via API" +title: "Create an automated import pipeline via API" description: "" --- DefectDojo’s API allows for robust pipeline solutions, which automatically ingest new scans to your instance. Automation like this can take a few different forms: - * A daily import which scans your environment on a daily basis, and then imports the results of the scan to DefectDojo (similar to our **Connectors** feature) * A CI/CD pipeline which scans new code as it is deployed, and imports the results to DefectDojo as a triggered action These pipelines can be created by directly calling our API **/reimport** endpoint with an attached scan file in a way that closely resembles our **Import Scan Form**. +#### Universal Importer: out of the box automation +DefectDojo Inc. maintains a **Universal Importer** which can be set up with existing CI/CD pipelines, triggered via GitHub actions, or run in any other automated context. +This external tool is a useful way to build a pipeline directly from the command line: a much faster solution than writing your own code. -# Universal Importer \- out of the box CI/CD tool - - -DefectDojo maintains a **Universal Importer** which can be set up with existing CI/CD pipelines, triggered via GitHub actions, or run in any other automated context. The **Universal Importer** runs in a separate container, and will call your DefectDojo instance’s API in the appropriate way. - - - -The Universal Importer is a useful way to leverage the API without needing to create and maintain the necessary API calls in your own pipeline. This is generally a faster solution than writing your own code. - - - -If you have an active DefectDojo subscription and want to request a copy of the Universal Importer, please contact us at **[support@defectdojo.com](mailto:support@defectdojo.com)** along with the operating system you want to use to run the tool. - - - - -# Working with DefectDojo’s API +See our guide to [external tools](../../external_tools) to learn more. External tools are available for **DefectDojo Pro** users only. +## DefectDojo’s API DefectDojo’s API is documented in\-app using the OpenAPI framework. You can access this documentation from the User Menu in the top right\-hand corner, under **‘API v2 OpenAPI3’**. - - \- The documentation can be used to test API calls with various parameters, and does so using your own user’s API Token. - - If you need to access an API token for a script or another integration, you can find that information under the **API v2 Token** option from the same menu. - - - ![image](images/api_pipeline_modelling.png) -## General API Considerations +### General API Considerations * Although our OpenAPI documentation is detailed regarding the parameters that can be used with each endpoint, it assumes that the reader has a solid understanding of DefectDojo’s key concepts. (Product Hierarchy, Findings, Deduplication, etc). * Users who want a working import integration but are less familiar with DefectDojo as a whole should consider our **Universal Importer**. * DefectDojo’s API can sometimes create unintended data objects, particularly if ‘Auto\-Create Context’ is used on the **/import** or **/reimport** endpoint. * Fortunately, it is very difficult to accidentally delete data using the API. Most objects can only be removed using a dedicated **DELETE** call to the relevant endpoint. - -## Specific notes on /import and /reimport endpoints - +### Specific notes on /import and /reimport endpoints The **/reimport** endpoint can be used for both an initial Import, or a “Reimport” which extends a Test with additional Findings. You do not need to first create a Test with **/import** before you can use the **/reimport** endpoint. As long as ‘Auto Create Context’ is enabled, the /reimport endpoint can create a new Test, Engagement, Product or Product Type. In almost all cases, you can use the **/reimport** endpoint exclusively when adding data via API. - - However, the **/import** endpoint can instead be used for a pipeline where you always want to store each scan result in a discrete Test object, rather than using **/reimport** to handle the diff within a single Test object. Either option is acceptable, and the endpoint you choose depends on your reporting structure, or whether you need to inspect an isolated run of a Pipeline. - diff --git a/docs/content/en/connecting_your_tools/import_scan_files/import_scan_ui.md b/docs/content/en/connecting_your_tools/import_scan_files/import_scan_ui.md index 9d4f361fd87..a3ce17f60aa 100644 --- a/docs/content/en/connecting_your_tools/import_scan_files/import_scan_ui.md +++ b/docs/content/en/connecting_your_tools/import_scan_files/import_scan_ui.md @@ -1,6 +1,7 @@ --- title: "Import Scan Form" description: "" +weight: 1 --- If you have a brand new DefectDojo instance, the Import Scan Form is a logical first step to learn the software and set up your environment. From this form, you upload a scan file from a supported tool, which will create Findings to represent those vulnerabilities. While filling out the form, you can decide whether to: @@ -10,7 +11,7 @@ If you have a brand new DefectDojo instance, the Import Scan Form is a logical f It’s easy to reorganize your Product Hierarchy in DefectDojo, so it’s ok if you’re not sure how to set things up yet. -For now, it’s good to know that **Engagements** can store data from multiple tools, which can be useful if you’re running different scans concurrently. +For now, it’s good to know that **Engagements** can store data from multiple tools, which can be useful if you’re running different tools concurrently as part of a single testing effort. ## Accessing the Import Scan Form @@ -60,4 +61,4 @@ This option is especially relevant when using the API. If uploading data with Pr Once your upload has completed, you should be redirected to the Test Page which contains the Findings found in the scan file. You can start working with those results right away, but feel free to consult the following articles: * Learn how to organize your Product Hierarchy to manage different contexts for your Findings and Tests: [Product Hierarchy Overview](https://docs.defectdojo.com/en/working_with_findings/organizing_engagements_tests/product-hierarchy-overview/). -* Learn how to add new Findings to this test: **Reimport Data To Extend a Test** +* Learn how to extend a test with additional Findings and reports: **Reimport Data To Extend a Test** diff --git a/docs/content/en/connecting_your_tools/import_scan_files/smart_upload.md b/docs/content/en/connecting_your_tools/import_scan_files/smart_upload.md index 9fafb21b1ba..adc766bec9a 100644 --- a/docs/content/en/connecting_your_tools/import_scan_files/smart_upload.md +++ b/docs/content/en/connecting_your_tools/import_scan_files/smart_upload.md @@ -1,82 +1,54 @@ --- -title: "Smart Upload" +title: "Smart Upload for infrastructure scans" description: "Automatically route incoming Findings to the correct Product" +weight: 3 --- Smart upload is a specialized importer that ingests reports from **infrastructure scanning tools**, including: - - * Nexpose * NMap * OpenVas * Qualys * Tenable - Smart Upload is unique in that it can split Findings from a scan file into separate Products. This is relevant in an Infrastructure scanning context, where the Findings may apply to many different teams, have different implicit SLAs, or need to be included in separate reports due to where they were discovered in your infrastructure. - - Smart Upload handles this by sorting incoming findings based on the Endpoints discovered in the scan. At first, those Findings will need to be manually assigned, or directed into the correct Product from an Unassigned Findings list. However, once a Finding has been assigned to a Product, all subsequent Findings that share an Endpoint or Host will be sent to the same Product. - - -# Smart Upload menu options - +## Smart Upload menu options The Smart Upload menu is stored in a collapsible section of the sidebar. - - * **Add Findings allows you to import a new scan file, similar to DefectDojo’s Import Scan method** * **Unassigned Findings lists all Findings from Smart Upload which have yet to be assigned to a Product.** - ![image](images/smart_upload.png) -## The Smart Upload Form - - +### The Smart Upload Form The Smart Upload Import Scan form is essentially the same as the Import Scan form. See our notes on the **Import Scan Form** for more details. - - ![image](images/smart_upload_2.png) -# Unassigned Findings - +## Unassigned Findings Once a Smart Upload has been completed, any Findings which are not automatically assigned to a Product (based on their Endpoint) will be placed in the **Unassigned Findings** list. The first Smart Upload for a given tool does not yet have any method to Assign Findings, so each Finding from this file will be sent to this page for sorting. - - Unassigned Findings are not included in the Product Hierarchy and will not appear in reports, filters or metrics until they have been assigned. - - -## Working with Unassigned Findings - - +### Working with Unassigned Findings ![image](images/smart_upload_3.png) You can select one or more Unassigned Findings for sorting with the checkbox, and perform one of the following actions: - - * **Assign to New Product, which will create a new Product** * **Assign to Existing Product which will move the Finding into an existing Product** * **Disregard Selected Findings**, which will remove the Finding from the list - Whenever a Finding is assigned to a New or Existing Product, it will be placed in a dedicated Engagement called ‘Smart Upload’. This Engagement will contain a Test named according to the Scan Type (e.g. Tenable Scan). Subsequent Findings uploaded via Smart Upload which match those Endpoints will be placed under that Engagement \> Test. - - -## Disregarded Findings - +### Disregarded Findings If a Finding is Disregarded it will be removed from the Unassigned Findings list. However, the Finding will not be recorded in memory, so subsequent scan uploads may cause the Finding to appear in the Unassigned Findings list again. - diff --git a/docs/content/en/connecting_your_tools/import_scan_files/using_reimport.md b/docs/content/en/connecting_your_tools/import_scan_files/using_reimport.md index 72525ace37c..8645d3ba184 100644 --- a/docs/content/en/connecting_your_tools/import_scan_files/using_reimport.md +++ b/docs/content/en/connecting_your_tools/import_scan_files/using_reimport.md @@ -1,128 +1,82 @@ --- -title: "Adding new Findings to a Test via Reimport" +title: "Add new Findings to a Test via Reimport" description: "" +weight: 2 --- When a Test is created in DefectDojo (either in advance or by importing a scan file), the Test can be extended with new Finding data. - - For example, let’s say you have a CI/CD pipeline, which is designed to send a new report to DefectDojo every day. Rather than create a new Test or Engagement for each ‘run’ of the pipeline, you could have each report flow into the same Test using **Reimport**. - - - -# Reimport: Process Summary - +## Reimport: Process Summary Reimporting data does not replace any old data in the Test, instead, it compares the incoming scan file with the existing scan data in a test to make informed decisions: - - * Based on the latest file, which vulnerabilities are still present? * Which vulnerabilities are no longer present? * Which vulnerabilities have been previously solved, but have since been reintroduced? - The Test will track and separate each scan version via **Import History,** so that you can check the Finding changes in your Test over time. - - ![image](images/using_reimport.png) - -# Reimport Logic: Create, Ignore, Close or Reopen - +## Reimport Logic: Create, Ignore, Close or Reopen When using Reimport, DefectDojo will compare the incoming scan data with the existing scan data, and then apply changes to the Findings contained within your Test as follows: - - -## Create Findings - +### Create Findings Any vulnerabilities which were not contained in the previous import will be added to the Test automatically as new Findings. - - -## Ignore existing Findings - +### Ignore existing Findings If any incoming Findings match Findings that already exist, the incoming Findings will be discarded rather than recorded as Duplicates. These Findings have been recorded already \- no need to add a new Finding object. The Test page will show these Findings as **Left Untouched**. - - -## Close Findings - +### Close Findings If there are any Findings that already exist in the Test but which are not present in the incoming report, you can choose to automatically set those Findings to Inactive and Mitigated (on the assumption that those vulnerabilities have been resolved since the previous import). The Test page will show these Findings as **Closed**. - - If you don’t want any Findings to be closed, you can disable this behavior on Reimport: - * Uncheck the **Close Old Findings** checkbox if using the UI * Set **close\_old\_findings** to **False** if using the API -## Reopen Findings - +### Reopen Findings * If there are any Closed Findings which appear again in a Reimport, they will automatically be Reopened. The assumption is that these vulnerabilities have occurred again, despite previous mitigation. The Test page will track these Findings as **Reactivated**. - If you’re using a triage\-less scanner, or you don’t otherwise want Closed Findings to reactivate, you can disable this behavior on Reimport: - * Set **do\_not\_reactivate** to **True** if using the API * Check the **Do Not Reactivate** checkbox if using the UI - - -# Opening the Reimport form - +## Opening the Reimport form The **Re\-Import Findings** form can be accessed on any Test page, under the **⚙️Gear** drop\-down menu. - -## - - ![image](images/using_reimport_2.png) - The **Re\-import Findings** **Form** will **not** allow you to import a different scan type, or change the destination of the Findings you’re trying to upload. If you’re trying to do one of those things, you’ll need to use the **Import Scan Form**. - - - -# Working with Import History - +## Working with Import History Import History for a given test is listed under the **Test Overview** header on the **Test** page. - - This table shows each Import or Reimport as a single line with a **Timestamp**, along with **Branch Tag, Build ID, Commit Hash** and **Version** columns if those were specified. - - - ![image](images/using_reimport_3.png) -## Actions +### Actions This header indicates the actions taken by an Import/Reimport. - * **\# created indicates the number of new Findings created at the time of Import/Reimport** * **\# closed shows the number of Findings that were closed by a Reimport (due to not existing in the incoming report).** * **\# left untouched shows the count of Open Findings which were unchanged by a Reimport (because they also existed in the incoming report).** * **\#** **reactivated** shows any Closed Findings which were reopened by an incoming Reimport. +## Reimport via API \- special note -# Reimport via API \- special note - - -Note that the /reimport API endpoint can both **extend an existing Test** (apply the method in this article) **or** **create a new Test** with new data \- an initial call to /import, or setting up a Test in advance is not required. +Note that the /reimport API endpoint can both **extend an existing Test** (apply the method in this article) **or create a new Test** with new data \- an initial call to `/import`, or setting up a Test in advance is not required. +To learn more about creating an automated CI/CD pipeline using DefectDojo, see our guide [here](../api_pipeline_modelling). diff --git a/docs/content/en/connecting_your_tools/parsers/_index.md b/docs/content/en/connecting_your_tools/parsers/_index.md index 3583fd52f5f..e5f2e8fb543 100644 --- a/docs/content/en/connecting_your_tools/parsers/_index.md +++ b/docs/content/en/connecting_your_tools/parsers/_index.md @@ -2,7 +2,7 @@ title: "Supported Reports" description: "DefectDojo has the ability to import scan reports from a large number of security tools." draft: false -weight: 1 +weight: 5 sidebar: collapsed: true --- diff --git a/docs/content/en/user_management/_index.md b/docs/content/en/user_management/_index.md index 4f30f9e4788..80e27cbfec8 100644 --- a/docs/content/en/user_management/_index.md +++ b/docs/content/en/user_management/_index.md @@ -1,6 +1,6 @@ --- -title: "Set User Permissions" -description: "Set User Permissions" +title: "User Management" +description: "Set Up User Permissions, SSO and Groups" summary: "" date: 2023-09-07T16:06:50+02:00 lastmod: 2023-09-07T16:06:50+02:00 diff --git a/docs/content/en/user_management/about_perms_and_roles.md b/docs/content/en/user_management/about_perms_and_roles.md index 76c5f1bef9a..11994762188 100644 --- a/docs/content/en/user_management/about_perms_and_roles.md +++ b/docs/content/en/user_management/about_perms_and_roles.md @@ -1,6 +1,7 @@ --- -title: "About Permissions & Roles" +title: "User permissions & Roles" description: "Summary of all DefectDojo permission options, in detail" +weight: 1 --- If you have a team of users working in DefectDojo, it's important to set up Role\-Based Access Control (RBAC) appropriately so that users can only access specific data. Security data is highly sensitive, and DefectDojo's options for access control allow you to be specific about each team member’s access to information. @@ -39,7 +40,7 @@ Users can be assigned a role of Reader, Writer, Maintainer, Owner or API Importe ​ * **API Importer** **Users** have limited abilities. This Role allows limited API access without exposing the majority of the API endpoints, so is useful for automation or users who are meant to be ‘external’ to DefectDojo. They can view underlying data, Add / Edit Engagements, and Import Scan Data. -For detailed information on Roles, please see our **[Role Permission Chart](https://docs.defectdojo.com/en/user_management/user-permission-charts/)**. +For detailed information on Roles, please see our **[Role Permission Chart](../user-permission-charts/)**. ### Global Roles @@ -85,4 +86,4 @@ If users are part of a Group, they also have Group Configuration Permissions whi If users create a new Group, they will be given the Owner role of the new Group by default. -For more information on Configuration Permissions, see our **[Configuration Permissions Chart](https://docs.defectdojo.com/en/user_management/user-permission-charts/)**. +For more information on Configuration Permissions, see our **[Configuration Permissions Chart](../user_permission_chart/#configuration-permission-chart)**. diff --git a/docs/content/en/user_management/configure_sso.md b/docs/content/en/user_management/configure_sso.md index 019dd9c41d1..6517256b9ee 100644 --- a/docs/content/en/user_management/configure_sso.md +++ b/docs/content/en/user_management/configure_sso.md @@ -1,12 +1,11 @@ --- -title: "Configure Single-Sign On Login" +title: "Configure SSO login" description: "Sign in to DefectDojo using OAuth or SAML login options" pro-feature: true --- Users can connect to DefectDojo with a Username and Password, but if you prefer, you can allow users to authenticate using a Single Sign\-On or SSO method. You can set up DefectDojo to work with your own SAML Identity Provider, but we also support many OAuth methods for authentication: - * Auth0 * Azure AD * GitHub Enterprise @@ -17,25 +16,18 @@ Users can connect to DefectDojo with a Username and Password, but if you prefer, All of these methods can only be configured by a Superuser in DefectDojo. ​ - - - -# Set Up SAML Login - +## Set Up SAML Login If you would like to add DefectDojo to your SAML Identity Provider, here is the process to follow: - 1. Start from **Plugin Manager \> Enterprise Settings** in DefectDojo. ​ - - ![image](images/Configure_Single-Sign_On_Login.png) + 2. Open the SAML tab from this page to configure your sign\-on settings. ​ - - ![image](images/Configure_Single-Sign_On_Login_2.png) + 3. Complete the SAML form. Start by setting an **Entity ID** \- this is either a label or a URL which your SAML Identity Provider can point to, and use to identify DefectDojo. This is a required field. ​ 4. If you wish, set **Login Button Text** in DefectDojo. This text will appear on the button or link users click to initiate the login process. @@ -64,56 +56,34 @@ This is a required field for this form. ​ 11. Finally, check the **Enable SAML** checkbox at the bottom of this form to confirm that you want to use SAML to log in. Once this is enabled, you will see the **Login With SAML** button on the DefectDojo Login Page. - ![image](images/Configure_Single-Sign_On_Login_3.png) -## Additional SAML Options: - - -**Create Unknown User** allows you to decide whether or not to automatically create a new user in DefectDojo if they aren’t found in the SAML response. - - - -**Allow Unknown Attributes** allows you to authorize users who have attributes which are not found in the **Attribute Mapping** field. - +### Additional SAML Options +* **Create Unknown User** allows you to decide whether or not to automatically create a new user in DefectDojo if they aren’t found in the SAML response. -**Sign Assertions/Responses** will require any incoming SAML responses to be signed. +* **Allow Unknown Attributes** allows you to authorize users who have attributes which are not found in the **Attribute Mapping** field. +* **Sign Assertions/Responses** will require any incoming SAML responses to be signed. +* **Sign Logout Requests** forces DefectDojo to sign any logout requests. -**Sign Logout Requests** forces DefectDojo to sign any logout requests. +* **Force Authentication** determines whether you want to force your users to authenticate using your Identity Provider each time, regardless of existing sessions. +* **Enable SAML Debugging** will log more detailed SAML output for debugging purposes. - -**Force Authentication** determines whether you want to force your users to authenticate using your Identity Provider each time, regardless of existing sessions. - - - -**Enable SAML Debugging** will log more detailed SAML output for debugging purposes. - - - - - -# Set up OAuth Login (Google, Gitlab, Auth0…) - +## Set up OAuth Login (Google, Gitlab, Auth0…) 1. Start by navigating to the **Plugin Manager \> Enterprise Settings** page in DefectDojo. ​ - - ![image](images/Configure_Single-Sign_On_Login_4.png) + 2. From here, navigate to the OAuth tab and select the service you want to configure from the list. ​ - - ![image](images/Configure_Single-Sign_On_Login_5.png) + 3. Complete the relevant OAuth form. ​ 4. Finally, check the **Enable \_\_ OAuth** button from below, and click **Submit**. ​ - -Users should now be able to sign in using the OAuth service you selected. A button will be added to the DefectDojo Login page to enable them to sign on using this method. - - +Users should now be able to sign in using the OAuth service you selected. A button will be added to the DefectDojo Login page to enable them to sign on using this method. \ No newline at end of file diff --git a/docs/content/en/user_management/create_user_group.md b/docs/content/en/user_management/create_user_group.md index 827ae3461d9..948e46e37a4 100644 --- a/docs/content/en/user_management/create_user_group.md +++ b/docs/content/en/user_management/create_user_group.md @@ -1,165 +1,113 @@ --- title: "Create a User Group for shared permissions" description: "Share and maintain permissions for many users" +weight: 3 --- If you have a significant number of DefectDojo users, you may want to create one or more **Groups**, in order to set the same Role\-Based Access Control (RBAC) rules for many users simultaneously. Only Superusers can create User Groups. - - Groups can work in multiple ways: - * Set one, or many different Product or Product Type level Roles for all Group Members, allowing specific control over which Products or Product Types can be accessed and edited by the Group. * Set a Global Role for all Group Members, giving them visibility and access to all Product or Product Types. * Set Configuration Permissions for a Group, allowing them to change specific functionality around DefectDojo. For more information on Roles, please refer to our **Introduction To Roles** article. - - -# The All Groups page - +## The All Groups page From the sidebar, navigate to 👤**Users \> Groups** to see a list of all active and inactive user groups. - - ![image](images/Create_a_User_Group_for_shared_permissions.png) From here, you can create, delete or view your individual Group pages. - - -## Creating a new User Group - +## Create / Edit a User Group 1. Navigate to the 👤**Users \> Groups** page on the sidebar. You will see a list of all existing User Groups, including their Name, Description, Number of Users, Global Role (if applicable) and Email. ​ - - ![image](images/Create_a_User_Group_for_shared_permissions_2.png) -2. Click the **🛠️button** next to the All Groups heading, and select **\+ New Group.** -​ - +2. Click the **🛠️ button** next to the All Groups heading, and select **\+ New Group.** +​ ![image](images/Create_a_User_Group_for_shared_permissions_3.png) -​ + 3. This will take you to a page where you can create a new Group. Set the Name for this Group, and add a Description if you wish. -If you want a weekly report sent to a particular Email address, you can enter that as well. - You can also select a Global Role that you wish to apply to this Group, if you wish. Adding a Global Role to the Group will give all Group Members access to all DefectDojo data, along with a limited amount of edit access depending on the Global Role you choose. See our **Introduction To Roles** article for more information. The account that initially creates a Group will have an Owner Role for the Group by Default. +### Set an email address to receive reports +The Weekly Digest is a report on all Group-assigned Products / Product Types. To have a weekly Digest sent out, enter the destination email address you wish to use on the Create / Edit Group form. Group members will still receive notifications as usual. -## Viewing a Group Page - - +### Viewing a Group Page Once you have created a Group, you can access it by selecting it in the menu listed under **Users \> Groups.** - The Group Page can be customized with a **Description**.It features a list of all **Group Members,** as well as the assigned **Products, Product Types**, and the associated **Role** associated with each of these**.** - You can also see the Group’s **Configuration Permissions** listed here. - - - -# Managing a Group’s Users - +## Manage a Group’s Users Group Membership is managed from the individual Group page, which you can select from the list in the **Users \> Groups** page. Click the highlighted Group Name to access the Group page that you wish to edit. - In order to view or edit a Group’s Membership, a User must have the appropriate Configuration permissions enabled as well as Membership in the Group (or Superuser status). - - -## **Add a User to a Group** - +### **Add a User to a Group** User Groups can have as many Users assigned as you wish. All Users in a Group will be given the associated Role on each Product or Product Type listed, but Users may also have Individual Roles which supersede the Group role. - 1. From the Group page, select **\+ Add Users** from the **☰** button at the edge of the **Members** heading. ​ - - ![image](images/Create_a_User_Group_for_shared_permissions_4.png) - -​ + 2. This will take you to the **Add Some Group Members** screen. Open the Users drop\-down menu, and then check off each user that you wish to add to the Group. ​ - - ![image](images/Create_a_User_Group_for_shared_permissions_5.png) - -​ -3. .Select the Group Role that you wish to assign these Users. This determines their ability to configure the Group. +3. .Select the Group Role that you wish to assign these Users. This determines their ability to configure the Group. Note that adding a member to a Group will not allow them access to their own Group page by default. This is a separate Configuration permission which must be enabled first. - - -## **Edit or Delete a Member from a User Group** - +### **Edit or Delete a Member from a User Group** 1. From the Group page, select the ⋮ next to the Name of the User you wish to Edit or Delete from the Group. - + **📝 Edit** will take you to the Edit Member screen, where you can change this user's Role (from Reader, Maintainer or Owner to a different choice). - + **🗑️ Delete** removes a User's Membership altogether. It will not remove any contributions or changes the User has made to the Product or Product Type. ![image](images/Create_a_User_Group_for_shared_permissions_6.png) - -# Managing a Group’s Permissions - +## Manage a Group’s Permissions Group Permissions are managed from the individual Group page, which you can select from the list in the **Users \> Groups** page. Click the highlighted Group Name to access the Group page that you wish to edit. - Note that only Superusers can edit a Group’s permissions (Product / Product Type, or Configuration). ​ - - -## **Add Product Roles or Product Type Roles for a Group** - +### **Add Product Roles or Product Type Roles for a Group** You can register as many Product Roles or Product Type Roles as you wish in each Group. - 1. From the Group page, select **\+ Add Product Types**, or \+ **Add Product** from the relevant heading (Product Type Groups or Product Groups). ​ - - ![image](images/Create_a_User_Group_for_shared_permissions_7.png) -2. This will take you to a **Register New Products / Product Types** Page, where you can select a Product or Product Type to add from the drop\-down menu. +2. This will take you to a **Register New Products / Product Types** Page, where you can select a Product or Product Type to add from the drop\-down menu. ![image](images/Create_a_User_Group_for_shared_permissions_8.png) -3. Select the Role that you want all Group members to have regarding this particular Product or Product Type. +3. Select the Role that you want all Group members to have regarding this particular Product or Product Type. Groups cannot be assigned to Products or Product Types without a Role. If you're not sure which Role you want a Group to have, Reader is a good 'default' option. This will keep your Product state secure until you make your final decision about the Group Role. - - -## **Assign Configuration Permissions to a Group** - +### **Assign Configuration Permissions to a Group** If you want the Members in your Group to access Configuration functions, and control certain aspects of DefectDojo, you can assign these responsibilities from the Group page. - - Assign View, Add, Edit or Delete roles from the menu in the bottom\-right hand corner. Checking off a Configuration Permission will immediately give the Group access to this particular function. - - -![image](images/Create_a_User_Group_for_shared_permissions_9.png) \ No newline at end of file +![image](images/Create_a_User_Group_for_shared_permissions_9.png) diff --git a/docs/content/en/user_management/set_user_permissions.md b/docs/content/en/user_management/set_user_permissions.md index 59246a896b7..d442f6f45e6 100644 --- a/docs/content/en/user_management/set_user_permissions.md +++ b/docs/content/en/user_management/set_user_permissions.md @@ -1,6 +1,7 @@ --- -title: "Set a User's Permissions" +title: "Set a User's permissions" description: "How to grant Roles & Permissions to a user, as well as superuser status" +weight: 2 --- ## Introduction to Permission Types @@ -64,7 +65,7 @@ Users can have two kinds of membership simultaneously at the **Product** level: If a user has already been added as a Product Type member, and does not require an additional level of permissions on a specific Product, there is no need to add them as a Product Member. -### Adding a new Member to a Product or Product Type +### Adding a new Member 1. Navigate to the Product or Product Type which you want to assign a user to. You can select the Product from the list under **Products \> All Products**. @@ -78,7 +79,7 @@ If a user has already been added as a Product Type member, and does not require Users cannot be assigned as Members on a Product or Product Type without also having a Role. If you're not sure which Role you want a new user to have, **Reader** is a good 'default' option. This will keep your Product state secure until you make your final decision about their Role. -### Edit Or Delete a Member from a Product or Product Type +### Edit Or Delete a Member Members can have their Role changed within a Product or Product Type. @@ -93,7 +94,7 @@ Within the **Product** or **Product Type** page, navigate to the **Members** hea * If you can't Edit or Delete a user's Membership (the **⋮** is not visible) it's because they have this Membership conferred at a **Product Type** level. * A user can have two levels of membership within a Product \- one assigned at the **Product Type** level and another assigned at the **Product** level. -### Adding an additional Product role to a user with a related Product Type role +#### Add an additional Product role to a user with a related Product Type role If a User has a Product Type\-level Role, they will also be assigned Membership with this Role to every underlying Product within the category. However, if you want this User to have a special Role on a specific Product within that Product Type, you can give them an additional Role on the Product level. @@ -117,7 +118,7 @@ Configuration Permissions are not related to a specific Product or Product Type * **Finding Templates:** Access to the Findings \> Finding Templates page * **Groups**: Access the 👤Users \> Groups page * **Jira Instances:** Access the ⚙️Configuration \> JIRA page -* **Language Types**:Access the [Language Types](https://documentation.defectdojo.com/integrations/languages/) API endpoint +* **Language Types**:Access the [Language Types](en/open_source/languages/) API endpoint * **Login Banner**: Edit the ⚙️Configuration \> Login Banner page * **Announcements**: Access ⚙️Configuration \> Announcements * **Note Types:** Access the ⚙️Configuration \> Note Types page @@ -135,11 +136,9 @@ Configuration Permissions are not related to a specific Product or Product Type **Only Superusers can add Configuration Permissions to a User**. - 1. Navigate to the 👤 Users \> Users page on the sidebar. You will see a list of all registered accounts on DefectDojo, along with each account's Active status, Global Roles, and other relevant User data. ​ ![image](images/Set_a_User's_Permissions_7.png) - 2. Click the name of the account that you wish to edit. ​ @@ -147,5 +146,5 @@ Configuration Permissions are not related to a specific Product or Product Type ​ 4. Select the User Configuration Permissions you wish to add. ​ -For a detailed breakdown of User Configuration Permissions, please refer to our [Permission Chart](https://docs.defectdojo.com/en/user_management/user-permission-charts/). +For a detailed breakdown of User Configuration Permissions, please refer to our [Permission Chart](../user-permission-charts/). diff --git a/docs/content/en/user_management/user_permission_chart.md b/docs/content/en/user_management/user_permission_chart.md index 5f12118eb75..204db728885 100644 --- a/docs/content/en/user_management/user_permission_chart.md +++ b/docs/content/en/user_management/user_permission_chart.md @@ -1,17 +1,14 @@ --- -title: "User Permission Charts" +title: "User permission charts" description: "All user permissions in detail" +weight: 4 --- -# Role Permission Chart - +## Role Permission Chart This chart is intended to list all permissions related to a Product or Product Type, as well as which permissions are available to each role. - - - -| **Section** | **Permission** | Reader | Writer | Maintainer | Owner | API Imp | +| **Section** | **Permission** | Reader | Writer | Maintainer | Owner | API Importer | | --- | --- | --- | --- | --- | --- | --- | | **Product / Product Type Access** | View assigned Product or Product Type ¹ | ☑️ | ☑️ | ☑️ | ☑️ | ☑️ | | | View nested Products, Engagements, Tests, Findings, Endpoints | ☑️ | ☑️ | ☑️ | ☑️ | ☑️ | @@ -48,19 +45,12 @@ This chart is intended to list all permissions related to a Product or Product T 2. When a new Product is added underneath a Product Type, all Product Type\-level Users will be added as Members of the new Product with their Product Type\-level Role. 3. The user who wishes to make changes to a Group must also have **Edit Group** **Configuration Permissions**, and a **Maintainer or Owner** **Group Configuration Role** in the Group they wish to edit. - -# Configuration Permission Chart - +## Configuration Permission Chart Each Configuration Permission refers to a particular function in the software, and has an associated set of actions a user can perform related to this function. - - The majority of Configuration Permissions give users access to certain pages in the UI. - - - | **Configuration Permission** | **View ☑️** | **Add ☑️** | **Edit ☑️** | **Delete ☑️** | | --- | --- | --- | --- | --- | | Credential Manager | Access the **⚙️Configuration \> Credential Manager** page | Add new entries to the Credential Manager | Edit Credential Manager entries | Delete Credential Manager entries | @@ -84,12 +74,7 @@ The majority of Configuration Permissions give users access to certain pages in 1. Access to the Finding Templates page also requires the **Writer, Maintainer** or **Owner** Global Role for this user. - - -# Group Configuration Permissions - - - +## Group Configuration Permissions | Configuration Permission | **Reader** | **Maintainer** | **Owner** | | --- | --- | --- | --- | diff --git a/docs/content/en/working_with_findings/intro_to_findings.md b/docs/content/en/working_with_findings/intro_to_findings.md index a716e7b3b79..058028ac681 100644 --- a/docs/content/en/working_with_findings/intro_to_findings.md +++ b/docs/content/en/working_with_findings/intro_to_findings.md @@ -73,7 +73,7 @@ If you’re in charge of security reporting for many different contexts, softwar * Each Product in DefectDojo can have a different SLA configuration, so that you can instantly flag Findings that are discovered in Production or other highly sensitive environments. * You can create a report directly from a **Product Type, Product, Engagement or Test** to ‘zoom in and out’ of your security context. **Tests** contain results from a single tool, **Engagements** can combine multiple Tests, **Products** can contain multiple Engagements, **Product Types** can contain multiple Products. -For more information on creating a Report, see our guides to **[Custom Reporting](https://docs.defectdojo.com/en/pro_reports/using-the-report-builder/)**. +For more information on creating a Report, see our guides to **[Custom Reporting](https://docs.defectdojo.com/en/pro_reports/using_the_report_builder/)**. ### Triage Vulnerabilities using Finding Status diff --git a/docs/layouts/index.html b/docs/layouts/index.html index c99e8414ea3..058e5237397 100644 --- a/docs/layouts/index.html +++ b/docs/layouts/index.html @@ -24,7 +24,7 @@

Import Data

Create Reports

-

Use the Report Builder to present customizable reports of Findings.

+

Use the Report Builder to present customizable reports of Findings.

From 7f7803a8e132959a5769c882a3c082212de77348 Mon Sep 17 00:00:00 2001 From: Cody Maffucci <46459665+Maffooch@users.noreply.github.com> Date: Thu, 19 Dec 2024 15:29:30 -0600 Subject: [PATCH 08/11] Qualys Hacker Guardian: Set Dedupe Config (#11442) --- dojo/settings/settings.dist.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index 483688dcd4e..df655d0190c 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1290,6 +1290,7 @@ def saml2_attrib_map_format(dict): "HackerOne Cases": ["title", "severity"], "KrakenD Audit Scan": ["description", "mitigation", "severity"], "Red Hat Satellite": ["description", "severity"], + "Qualys Hacker Guardian Scan": ["title", "severity", "description"], } # Override the hardcoded settings here via the env var @@ -1535,6 +1536,7 @@ def saml2_attrib_map_format(dict): "KrakenD Audit Scan": DEDUPE_ALGO_HASH_CODE, "PTART Report": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, "Red Hat Satellite": DEDUPE_ALGO_HASH_CODE, + "Qualys Hacker Guardian Scan": DEDUPE_ALGO_HASH_CODE, } # Override the hardcoded settings here via the env var From f2484142d4444519e45aa8b7f49d877777d8b0c0 Mon Sep 17 00:00:00 2001 From: Harold Blankenship <36673698+hblankenship@users.noreply.github.com> Date: Fri, 20 Dec 2024 16:24:07 -0600 Subject: [PATCH 09/11] Dedupe settings for Horusec Scan (#11418) --- dojo/settings/settings.dist.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dojo/settings/settings.dist.py b/dojo/settings/settings.dist.py index df655d0190c..6022f1704cd 100644 --- a/dojo/settings/settings.dist.py +++ b/dojo/settings/settings.dist.py @@ -1211,6 +1211,7 @@ def saml2_attrib_map_format(dict): "Dependency Check Scan": ["title", "cwe", "file_path"], "Dockle Scan": ["title", "description", "vuln_id_from_tool"], "Dependency Track Finding Packaging Format (FPF) Export": ["component_name", "component_version", "vulnerability_ids"], + "Horusec Scan": ["title", "description", "file_path", "line"], "Mobsfscan Scan": ["title", "severity", "cwe", "file_path", "description"], "Tenable Scan": ["title", "severity", "vulnerability_ids", "cwe", "description"], "Nexpose Scan": ["title", "severity", "vulnerability_ids", "cwe"], @@ -1430,6 +1431,7 @@ def saml2_attrib_map_format(dict): "Cobalt.io API": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, "Crunch42 Scan": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, "Dependency Track Finding Packaging Format (FPF) Export": DEDUPE_ALGO_HASH_CODE, + "Horusec Scan": DEDUPE_ALGO_HASH_CODE, "Mobsfscan Scan": DEDUPE_ALGO_HASH_CODE, "SonarQube Scan detailed": DEDUPE_ALGO_UNIQUE_ID_FROM_TOOL, "SonarQube Scan": DEDUPE_ALGO_HASH_CODE, From e8c98f10889c59e3e95e5605fbfaf8c90076c18c Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 23 Dec 2024 15:44:13 +0000 Subject: [PATCH 10/11] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index febe451775d..6ff5bff877e 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.42.0-dev", + "version": "2.41.3", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index 7edf826dd58..d6ab484dc20 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.41.2" +__version__ = "2.41.3" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index ab66f338320..4d63b24192b 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.42.0-dev" +appVersion: "2.41.3" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.165-dev +version: 1.6.165 icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap From e4e11c9cd41a1ad163fb12fc8f292d2cc01d3803 Mon Sep 17 00:00:00 2001 From: DefectDojo release bot Date: Mon, 23 Dec 2024 16:22:19 +0000 Subject: [PATCH 11/11] Update versions in application files --- components/package.json | 2 +- dojo/__init__.py | 2 +- helm/defectdojo/Chart.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/components/package.json b/components/package.json index 6ff5bff877e..febe451775d 100644 --- a/components/package.json +++ b/components/package.json @@ -1,6 +1,6 @@ { "name": "defectdojo", - "version": "2.41.3", + "version": "2.42.0-dev", "license" : "BSD-3-Clause", "private": true, "dependencies": { diff --git a/dojo/__init__.py b/dojo/__init__.py index d6ab484dc20..033e2fc2894 100644 --- a/dojo/__init__.py +++ b/dojo/__init__.py @@ -4,6 +4,6 @@ # Django starts so that shared_task will use this app. from .celery import app as celery_app # noqa: F401 -__version__ = "2.41.3" +__version__ = "2.42.0-dev" __url__ = "https://github.com/DefectDojo/django-DefectDojo" __docs__ = "https://documentation.defectdojo.com" diff --git a/helm/defectdojo/Chart.yaml b/helm/defectdojo/Chart.yaml index 4d63b24192b..4b1d62f3fef 100644 --- a/helm/defectdojo/Chart.yaml +++ b/helm/defectdojo/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v2 -appVersion: "2.41.3" +appVersion: "2.42.0-dev" description: A Helm chart for Kubernetes to install DefectDojo name: defectdojo -version: 1.6.165 +version: 1.6.166-dev icon: https://www.defectdojo.org/img/favicon.ico maintainers: - name: madchap