From 94ba23402e19362e927e993e7f43aab4ec33d3b2 Mon Sep 17 00:00:00 2001 From: jnathangreeg Date: Sun, 24 Nov 2024 11:23:12 +0200 Subject: [PATCH 1/5] complete workflows tests Signed-off-by: jnathangreeg --- .../system/tests_cases/workflows_tests.py | 53 ++- .../deployments/http/apache-http.yaml | 22 ++ .../deployments/http1/apache1-http.yaml | 22 ++ .../slack-alert-channel.json | 51 +++ .../teams-alert-channel.json | 52 +++ infrastructure/backend_api.py | 117 +++++- system_test_mapping.json | 37 +- systest_utils/statics.py | 4 + tests_scripts/workflows/conf_workflows.py | 206 +++++++++++ tests_scripts/workflows/jira_workflows.py | 325 +++++++++++++++++ tests_scripts/workflows/slack_workflows.py | 330 +++++++++++++++++ tests_scripts/workflows/teams_workflows.py | 332 ++++++++++++++++++ tests_scripts/workflows/utils.py | 105 ++++++ tests_scripts/workflows/workflows.py | 59 +--- 14 files changed, 1643 insertions(+), 72 deletions(-) create mode 100644 configurations/workflows_notifications/deployments/http/apache-http.yaml create mode 100644 configurations/workflows_notifications/deployments/http1/apache1-http.yaml create mode 100644 configurations/workflows_notifications/slack-alert-channel.json create mode 100644 configurations/workflows_notifications/teams-alert-channel.json create mode 100644 tests_scripts/workflows/conf_workflows.py create mode 100644 tests_scripts/workflows/jira_workflows.py create mode 100644 tests_scripts/workflows/slack_workflows.py create mode 100644 tests_scripts/workflows/teams_workflows.py create mode 100644 tests_scripts/workflows/utils.py diff --git a/configurations/system/tests_cases/workflows_tests.py b/configurations/system/tests_cases/workflows_tests.py index 9b471c9d..12f6fee1 100644 --- a/configurations/system/tests_cases/workflows_tests.py +++ b/configurations/system/tests_cases/workflows_tests.py @@ -1,31 +1,64 @@ import inspect from .structures import TestConfiguration +from systest_utils.statics import DEFAULT_WORKFLOWS_DEPLOYMENT_PATH +from os.path import join +from tests_scripts.workflows.utils import get_messages_from_slack_channel, enrich_slack_alert_channel, get_messages_from_teams_channel, enrich_teams_alert_channel + class WorkflowsTests(object): ''' - NOTE: + NOTE: ''' @staticmethod - def slack_alerts_workflows(): - from tests_scripts.workflows.workflows import WorkflowsSlack + def slack_notifications_workflows(): + from tests_scripts.workflows.slack_workflows import WorkflowsSlackNotifications return TestConfiguration( name=inspect.currentframe().f_code.co_name, - test_obj=WorkflowsSlack, - create_test_tenant = True + test_obj=WorkflowsSlackNotifications, + deployments=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http"), + deployments1=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http1"), + getMessagesFunc=get_messages_from_slack_channel, + enrichAlertChannelFunc=enrich_slack_alert_channel, + create_test_tenant = False ) @staticmethod - def teams_alerts_workflows(): - from tests_scripts.workflows.workflows import WorkflowsSlack + def teams_notifications_workflows(): + from tests_scripts.workflows.teams_workflows import WorkflowsTeamsNotifications return TestConfiguration( name=inspect.currentframe().f_code.co_name, - test_obj=WorkflowsSlack, - create_test_tenant = True - ) + test_obj=WorkflowsTeamsNotifications, + deployments=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http"), + deployments1=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http1"), + getMessagesFunc=get_messages_from_teams_channel, + enrichAlertChannelFunc=enrich_teams_alert_channel, + create_test_tenant = False + ) + + # @staticmethod + # def jira_notifications_workflows(): + # from tests_scripts.workflows.jira_workflows import WorkflowsJiraNotifications + # return TestConfiguration( + # name=inspect.currentframe().f_code.co_name, + # test_obj=WorkflowsJiraNotifications, + # deployments=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http"), + # deployments1=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http1"), + # # getMessagesFunc=, // TODO: implement this function + # # enrichAlertChannelFunc=, // TODO: implement this function + # create_test_tenant = False + # ) + @staticmethod + def workflows_configurations(): + from tests_scripts.workflows.conf_workflows import WorkflowConfigurations + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + test_obj=WorkflowConfigurations, + create_test_tenant = False + ) \ No newline at end of file diff --git a/configurations/workflows_notifications/deployments/http/apache-http.yaml b/configurations/workflows_notifications/deployments/http/apache-http.yaml new file mode 100644 index 00000000..06efc767 --- /dev/null +++ b/configurations/workflows_notifications/deployments/http/apache-http.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: http + labels: + app: http +spec: + replicas: 1 + selector: + matchLabels: + app: http + template: + metadata: + labels: + app: http + spec: + containers: + - image: httpd:alpine3.16 + name: http + env: + - name: app + value: http \ No newline at end of file diff --git a/configurations/workflows_notifications/deployments/http1/apache1-http.yaml b/configurations/workflows_notifications/deployments/http1/apache1-http.yaml new file mode 100644 index 00000000..29fbe2b9 --- /dev/null +++ b/configurations/workflows_notifications/deployments/http1/apache1-http.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: http-alt + labels: + app: http-alt +spec: + replicas: 1 + selector: + matchLabels: + app: http-alt + template: + metadata: + labels: + app: http-alt + spec: + containers: + - image: httpd:alpine3.16 + name: http-alt + env: + - name: app + value: http-alt diff --git a/configurations/workflows_notifications/slack-alert-channel.json b/configurations/workflows_notifications/slack-alert-channel.json new file mode 100644 index 00000000..3af9171f --- /dev/null +++ b/configurations/workflows_notifications/slack-alert-channel.json @@ -0,0 +1,51 @@ +{ + "channel": { + "name": "My Slack Channel", + "provider": "slack", + "context": { + "channel": { + "id": "abcd1234" + } + } + }, + "notifications": [ + { + "notificationType": "securityRiskPush:newSecurityRisk", + "disabled": false, + "parameters": { + "minSeverity": 300 + } + }, + { + "notificationType": "push:newClusterAdmin", + "disabled": false + }, + { + "notificationType": "containerScanPush:newVulnerability", + "disabled": false, + "parameters": { + "minSeverity": 500 + } + }, + { + "notificationType": "containerScanPush:vulnerabilityNewFix", + "disabled": false, + "parameters": { + "minSeverity": 500 + } + }, + { + "notificationType": "push:complianceDrift", + "disabled": false, + "parameters": { + "driftPercentage": 15 + } + } + ], + "scope": [ + { + "cluster": "cluster", + "namespaces": ["alerts"] + } + ] +} \ No newline at end of file diff --git a/configurations/workflows_notifications/teams-alert-channel.json b/configurations/workflows_notifications/teams-alert-channel.json new file mode 100644 index 00000000..346143d3 --- /dev/null +++ b/configurations/workflows_notifications/teams-alert-channel.json @@ -0,0 +1,52 @@ +{ + "channel": { + "name": "My Teams Channel", + "provider": "teams", + "context": { + "webhook": { + "name": "webhook", + "id": "https://teams/mywebhook" + } + } + }, + "notifications": [ + { + "notificationType": "securityRiskPush:newSecurityRisk", + "disabled": false, + "parameters": { + "minSeverity": 300 + } + }, + { + "notificationType": "push:newClusterAdmin", + "disabled": false + }, + { + "notificationType": "containerScanPush:newVulnerability", + "disabled": false, + "parameters": { + "minSeverity": 500 + } + }, + { + "notificationType": "containerScanPush:vulnerabilityNewFix", + "disabled": false, + "parameters": { + "minSeverity": 500 + } + }, + { + "notificationType": "push:complianceDrift", + "disabled": false, + "parameters": { + "driftPercentage": 15 + } + } + ], + "scope": [ + { + "cluster": "cluster", + "namespaces": ["alerts"] + } + ] +} \ No newline at end of file diff --git a/infrastructure/backend_api.py b/infrastructure/backend_api.py index b17ff394..36c5141a 100644 --- a/infrastructure/backend_api.py +++ b/infrastructure/backend_api.py @@ -135,6 +135,12 @@ class NotExistingCustomer(Exception): API_SECCOMP_LIST = "/api/v1/seccomp/list" API_SECCOMP_GENERATE = "/api/v1/seccomp/generate" +API_WORKFLOWS = "/api/v1/workflows" + +API_WEBHOOKS = "/api/v1/notifications/teams" +API_TEAMS_TEST_MESSAGE = "/api/v1/notifications/teams/testMessage" + + def deco_cookie(func): def apply_cookie(*args, **kwargs): @@ -1970,7 +1976,9 @@ def delete(self, url, **args): # for deletion we need to wait a while if not 'timeout' in args or args["timeout"] < 120: args["timeout"] = 120 - return requests.delete(self.server + url, **args) + url = self.server + url + return requests.delete(url, **args) + def get_cookie(self): return self.selected_tenant_cookie @@ -2902,7 +2910,6 @@ def get_seccomp_workloads_list(self, body: dict): return r def generate_seccomp_profile(self, body: dict): - r = self.post(API_SECCOMP_GENERATE, params={"customerGUID": self.customer_guid}, json=body, timeout=60) if not 200 <= r.status_code < 300: @@ -2910,9 +2917,115 @@ def generate_seccomp_profile(self, body: dict): 'Error accessing dashboard. Request to: %s "%s" (code: %d, message: %s)' % ( API_SECCOMP_GENERATE, self.customer, r.status_code, r.text)) return r + + def get_workflows(self, **kwargs): + url = API_WORKFLOWS + "/list" + params = {"customerGUID": self.selected_tenant_id} + if kwargs: + params.update(**kwargs) + r = self.post(url, params=params, json={"pageSize": 50, "pageNum": 1, "orderBy": "", "innerFilters":[]}) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error accessing workflows. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + + + + def create_workflow(self, body): + url = API_WORKFLOWS + params = {"customerGUID": self.selected_tenant_id} + r = self.post(url, params=params, json=body) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error creating workflow. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + def delete_workflow(self, guid): + url = API_WORKFLOWS + params = {"customerGUID": self.selected_tenant_id} + body = { + "innerFilters": [ + { + "guid": guid + } + ] + } + r = self.delete(url, params=params, json=body) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error deleting workflow. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + + + def update_workflow(self, body): + url = API_WORKFLOWS + params = {"customerGUID": self.selected_tenant_id} + r = self.put(url, params=params, json=body) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error updating workflow. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + + def get_webhooks(self): + url = API_WEBHOOKS + r = self.get(url, params={"customerGUID": self.selected_tenant_id}) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error accessing webhooks. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + + + def create_webhook(self, body): + url = API_WEBHOOKS + params = {"customerGUID": self.selected_tenant_id} + r = self.post(url, params=params, json=body) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error creating webhook. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + def delete_webhook(self, body): + url = API_WEBHOOKS + params = {"customerGUID": self.selected_tenant_id} + r = self.delete(url, params=params, body=body) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error deleting webhook. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + + def update_webhook(self, body): + url = API_WEBHOOKS + params = {"customerGUID": self.selected_tenant_id} + r = self.put(url, params=params, json=body) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error updating webhook. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + + def test_webhook_message(self, body): + params = {"customerGUID": self.selected_tenant_id} + r = self.post(API_TEAMS_TEST_MESSAGE, params=params, json=body) + if not 200 <= r.status_code < 300: + raise Exception( + 'Error testing webhook. Customer: "%s" (code: %d, message: %s)' % ( + self.customer, r.status_code, r.text)) + return r.json() + + + + + + + class Solution(object): """docstring for Solution""" diff --git a/system_test_mapping.json b/system_test_mapping.json index 30b67671..6567547c 100644 --- a/system_test_mapping.json +++ b/system_test_mapping.json @@ -230,7 +230,6 @@ "skip_on_environment": "", "owner": "" }, - "scan_local_file": { "target": [ "CLI" @@ -801,7 +800,7 @@ "skip_on_environment": "", "owner": "eranm@armosec.io" }, - "smart_remediation_all_controls":{ + "smart_remediation_all_controls": { "target": [ "In cluster", "Backend" @@ -1625,7 +1624,7 @@ "skip_on_environment": "", "owner": "rinao@armosec.io" }, - "slack_alerts_workflows": { + "slack_notifications_workflows": { "target": [ "Backend" ], @@ -1639,7 +1638,7 @@ "skip_on_environment": "", "owner": "jonathang@armosec.io" }, - "teams_alerts_workflows": { + "teams_notifications_workflows": { "target": [ "Backend" ], @@ -1652,5 +1651,33 @@ "description": "Checks teams workflows", "skip_on_environment": "", "owner": "jonathang@armosec.io" + }, + "jira_notifications_workflows": { + "target": [ + "Backend" + ], + "target_repositories": [ + "users-notification-service-dummy", + "config-service-dummy", + "cadashboardbe-dummy", + "event-ingester-service-dummy" + ], + "description": "Checks teams workflows", + "skip_on_environment": "", + "owner": "jonathang@armosec.io" + }, + "workflows_configurations": { + "target": [ + "Backend" + ], + "target_repositories": [ + "users-notification-service-dummy", + "config-service-dummy", + "cadashboardbe-dummy", + "event-ingester-service-dummy" + ], + "description": "Checks workflows configurations", + "skip_on_environment": "", + "owner": "jonathang@armosec.io" } -} +} \ No newline at end of file diff --git a/systest_utils/statics.py b/systest_utils/statics.py index f110fa09..5cadb7e8 100644 --- a/systest_utils/statics.py +++ b/systest_utils/statics.py @@ -71,6 +71,10 @@ DEFAULT_NOTIFICATIONS_DEPLOYMENT_PATH = os.path.join(DEFAULT_NOTIFICATIONS_PATHS, 'deployments') DEFAULT_NOTIFICATIONS_JOB_PATH = os.path.join(DEFAULT_NOTIFICATIONS_PATHS, 'jobs') +# workflows +DEFAULT_WORKFLOWS_PATHS = os.path.abspath(os.path.join('configurations', 'workflows_notifications')) +DEFAULT_WORKFLOWS_DEPLOYMENT_PATH = os.path.join(DEFAULT_WORKFLOWS_PATHS, 'deployments') + # kdr DEFAULT_KDR_DEPLOYMENT_PATH = os.path.join(DEFAULT_K8S_PATHS, 'deployments') diff --git a/tests_scripts/workflows/conf_workflows.py b/tests_scripts/workflows/conf_workflows.py new file mode 100644 index 00000000..fab68ef6 --- /dev/null +++ b/tests_scripts/workflows/conf_workflows.py @@ -0,0 +1,206 @@ +from utils import(get_env, +WORKFLOW_NAME, +SEVERITIES_CRITICAL, +SLACK_CHANNEL_NAME, +TEAMS_CHANNEL_NAME, +UPDATED_WORKFLOW_NAME, +SEVERITIES_HIGH, +EXPECTED_CREATE_RESPONSE, +EXPECTED_UPDATE_RESPONSE) + +from configurations.system.tests_cases.structures import TestConfiguration +from systest_utils import Logger +from tests_scripts.base_test import BaseTest + + + +class WorkflowConfigurations(BaseTest): + """ + Check workflow - list, create, update, delete + """ + def __init__(self, test_obj=None, backend=None, test_driver=None): + super().__init__(test_driver=test_driver, test_obj=test_obj, backend=backend) + self.test_obj: TestConfiguration = test_obj + + def start(self): + """ + Agenda: + 1. create workflow + 2. validate workflows + 3. update workflow + 4. validate updated workflow + 5. delete workflow + 6. cleanup + """ + assert self.backend is not None, f'The test {self.test_driver.test_name} must run with backend' + + Logger.logger.info("1. create slack workflow") + workflow_creation_body = self.build_slack_workflow_body(name=WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, channel_name=SLACK_CHANNEL_NAME, channel_id=get_env("SLACK_CHANNEL_ID")) + self.create_and_assert_workflow(workflow_creation_body, EXPECTED_CREATE_RESPONSE) + + Logger.logger.info("2. validate slack workflow created successfully") + self.validate_slack_workflow(WORKFLOW_NAME, SEVERITIES_CRITICAL, SLACK_CHANNEL_NAME) + + Logger.logger.info("3. update slack workflow") + workflow_guid = self.return_workflow_guid(WORKFLOW_NAME) + update_workflow_body = self.build_slack_workflow_body(name=UPDATED_WORKFLOW_NAME, severities=SEVERITIES_HIGH, channel_name=SLACK_CHANNEL_NAME, channel_id=get_env("SLACK_CHANNEL_ID"), guid=workflow_guid) + self.create_and_assert_workflow(update_workflow_body, EXPECTED_UPDATE_RESPONSE, update=True) + + Logger.logger.info("4. validate slack updated workflow") + self.validate_slack_workflow(UPDATED_WORKFLOW_NAME, SEVERITIES_HIGH, SLACK_CHANNEL_NAME) + + Logger.logger.info("5. delete slack workflow") + workflow_guid = self.return_workflow_guid(UPDATED_WORKFLOW_NAME) + self.delete_and_assert_workflow(workflow_guid=workflow_guid) + + Logger.logger.info("6. create teams workflow") + workflow_creation_body = self.build_teams_workflow_body(name=WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, channel_name=TEAMS_CHANNEL_NAME, channel_id=get_env("TEAMS_CHANNEL_GUID"), webhook_url=get_env("WEBHOOK_URL")) + self.create_and_assert_workflow(workflow_creation_body, EXPECTED_CREATE_RESPONSE) + + Logger.logger.info("7. validate teams workflow created successfully") + self.validate_teams_workflow(WORKFLOW_NAME, SEVERITIES_CRITICAL, TEAMS_CHANNEL_NAME) + + Logger.logger.info("8. update teams workflow") + workflow_guid = self.return_workflow_guid(WORKFLOW_NAME) + update_workflow_body = self.build_teams_workflow_body(name=UPDATED_WORKFLOW_NAME, severities=SEVERITIES_HIGH, channel_name=TEAMS_CHANNEL_NAME, channel_id=get_env("TEAMS_CHANNEL_GUID"), webhook_url=get_env("WEBHOOK_URL"), guid=workflow_guid) + self.create_and_assert_workflow(update_workflow_body, EXPECTED_UPDATE_RESPONSE, update=True) + + Logger.logger.info("9. validate teams updated workflow") + self.validate_teams_workflow(UPDATED_WORKFLOW_NAME, SEVERITIES_HIGH, TEAMS_CHANNEL_NAME) + + Logger.logger.info("10. delete teams workflow") + workflow_guid = self.return_workflow_guid(UPDATED_WORKFLOW_NAME) + self.delete_and_assert_workflow(workflow_guid=workflow_guid) + + Logger.logger.info("11. cleanup") + return self.cleanup() + + + + def build_slack_workflow_body(self, name, severities, channel_name, channel_id, guid=None): + return { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [], + "conditions": [ + { + "category": "SecurityRisks", + "parameters": { + "severities": severities + } + } + ], + "notifications": [ + { + "provider": "slack", + "slackChannels": [ + { + "id": channel_id, + "name": channel_name + } + ] + } + ] + } + + def build_teams_workflow_body(self, name, severities, channel_name, channel_id, webhook_url, guid=None): + return { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [], + "conditions": [ + { + "category": "SecurityRisks", + "parameters": { + "severities": severities + } + } + ], + "notifications": [ + { + "provider": "teams", + "teamsChannels": [ + { + "guid": channel_id, + "name": channel_name, + "webhookURL": webhook_url + } + ] + } + ] + } + + def create_and_assert_workflow(self, workflow_body, expected_response, update=False): + if update: + workflow_res = self.backend.update_workflow(body=workflow_body) + else: + workflow_res = self.backend.create_workflow(body=workflow_body) + + + assert workflow_res == expected_response, f"Expected {expected_response}, but got {workflow_res['response']}" + return workflow_res + + def validate_slack_workflow(self, expected_name, expected_severities, expected_slack_channel): + workflows = self.backend.get_workflows() + assert workflows["total"]["value"] >= 1, f"Expected total value to be greater or equal to 1, but got {workflows['total']['value']}" + + + found = False + for workflow in workflows["response"]: + if workflow["name"] == expected_name: + severities = workflow["conditions"][0]["parameters"]["severities"] + assert severities == expected_severities, f"Expected severities {expected_severities} but got {severities}" + + slack_channel = workflow["notifications"][0]["slackChannels"][0]["name"] + assert slack_channel == expected_slack_channel, f"Expected slack channel {expected_slack_channel} but got {slack_channel}" + + found = True + break + + assert found, f"Workflow with name {expected_name} not found" + + def validate_teams_workflow(self, expected_name, expected_severities, expected_teams_channel): + workflows = self.backend.get_workflows() + assert workflows["total"]["value"] >= 1, f"Expected total value to be greater or equal to 1, but got {workflows['total']['value']}" + + found = False + for workflow in workflows["response"]: + if workflow["name"] == expected_name: + severities = workflow["conditions"][0]["parameters"]["severities"] + assert severities == expected_severities, f"Expected severities {expected_severities} but got {severities}" + + teams_channel = workflow["notifications"][0]["teamsChannels"][0]["name"] + assert teams_channel == expected_teams_channel, f"Expected teams channel {expected_teams_channel} but got {teams_channel}" + + found = True + break + + assert found, f"Workflow with name {expected_name} not found" + + + + def delete_and_assert_workflow(self, workflow_guid): + workflow_delete_res = self.backend.delete_workflow(workflow_guid) + assert workflow_delete_res == "Workflow deleted", f"Expected 'Workflow deleted', but got {workflow_delete_res['response']}" + workflows = self.backend.get_workflows()["response"] + for workflow in workflows: + assert workflow["guid"] != workflow_guid, f"Expected workflow with guid {workflow_guid} to be deleted, but it still exists" + self.cleanup() + + def return_workflow_guid(self, workflow_name): + workflows = self.backend.get_workflows()["response"] + for workflow in workflows: + if workflow["name"] == workflow_name: + return workflow["guid"] + print(f"Workflow with name {workflow_name} not found") + return None + + + def cleanup(self, **kwargs): + return super().cleanup(**kwargs) \ No newline at end of file diff --git a/tests_scripts/workflows/jira_workflows.py b/tests_scripts/workflows/jira_workflows.py new file mode 100644 index 00000000..72b1cb7e --- /dev/null +++ b/tests_scripts/workflows/jira_workflows.py @@ -0,0 +1,325 @@ +from tests_scripts.workflows.workflows import Workflows +from tests_scripts.workflows.utils import ( + get_env, + NOTIFICATIONS_SVC_DELAY, + NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, + EXPECTED_CREATE_RESPONSE, + JIRA_PROVIDER_NAME, + SECURITY_RISKS, + SECURITY_RISKS_ID, + VULNERABILITIES, + SEVERITIES_CRITICAL, + SEVERITIES_HIGH, + VULNERABILITIES_WORKFLOW_NAME, + SECURITY_RISKS_WORKFLOW_NAME, + COMPLIANCE_WORKFLOW_NAME, + COMPLIANCE +) +from systest_utils import Logger, TestUtil +import time +from infrastructure import KubectlWrapper +from systest_utils import Logger, statics, TestUtil + + + + +class WorkflowsJiraNotifications(Workflows): + def __init__(self, test_obj=None, backend=None, kubernetes_obj=None, test_driver=None): + super(Workflows, self).__init__(test_driver=test_driver, test_obj=test_obj, backend=backend, + kubernetes_obj=kubernetes_obj) + self.fw_name = None + self.cluster = None + self.wait_for_agg_to_end = False + + + def start(self): + """ + Agenda: + 1. Post custom framework + 2. Create new workflows + 3. Validate workflows created successfully + 4. Apply deployment + 5. Install kubescape with helm-chart + 6. Trigger first scan + 7. Apply second deployment + 8. Update custom framework + 9. Add SA to cluster-admin + 10. Trigger second scan + 11. Assert all messages sent + 12. Cleanup + """ + + assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' + + self.cluster, namespace = self.setup(apply_services=False) + + Logger.logger.info("Stage 1: Post custom framework") + self.fw_name = "systest-fw-" + self.cluster + _, fw = self.post_custom_framework(framework_file="system-test-framework-high-comp.json", + cluster_name=self.cluster) + + + Logger.logger.info("Stage 2: Create new workflows") + workflow_body = self.build_securityRisk_workflow_body(name=SECURITY_RISKS_WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=SECURITY_RISKS, securityRiskIDs=SECURITY_RISKS_ID, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + workflow_body = self.build_vulnerabilities_workflow_body(name=VULNERABILITIES_WORKFLOW_NAME, severities=SEVERITIES_HIGH, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=VULNERABILITIES, cvss=6, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + workflow_body = self.build_compliance_workflow_body(name=COMPLIANCE_WORKFLOW_NAME, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=COMPLIANCE, driftPercentage=15, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + before_test_message_ts = time.time() + + Logger.logger.info("Stage 3: Validate workflows created successfully") + self.validate_workflow(VULNERABILITIES_WORKFLOW_NAME, JIRA_PROVIDER_NAME) + self.validate_workflow(SECURITY_RISKS_WORKFLOW_NAME, JIRA_PROVIDER_NAME) + self.validate_workflow(COMPLIANCE_WORKFLOW_NAME, JIRA_PROVIDER_NAME) + + Logger.logger.info('Stage 4: Apply deployment') + workload_objs: list = self.apply_directory(path=self.test_obj["deployments"], namespace=namespace) + self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240) + + Logger.logger.info('Stage 6: Install kubescape with helm-chart') + self.install_kubescape() + + + Logger.logger.info('Stage 7: Trigger first scan') + self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) + TestUtil.sleep(NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, "waiting for first scan to be saved in notification service") + + Logger.logger.info('Stage 8: Apply second deployment') + workload_objs: list = self.apply_directory(path=self.test_obj["deployments1"], namespace=namespace) + self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240) + + + Logger.logger.info('Stage 9: Update custom framework') + self.put_custom_framework(framework_file="system-test-framework-low-comp.json", + framework_guid=fw['guid'], cluster_name=self.cluster) + + Logger.logger.info('Stage 10: Add SA to cluster-admin') + KubectlWrapper.add_new_service_account_to_cluster_admin(service_account="service-account", + namespace=namespace) + + Logger.logger.info('Stage 11: Trigger second scan') + self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) + TestUtil.sleep(NOTIFICATIONS_SVC_DELAY, "waiting for first scan to be saved in notification service") + + Logger.logger.info('Stage 12: Assert all messages sent') + # // TODO: implement this function + + Logger.logger.info('Stage 13: Cleanup') + return self.cleanup() + + + + def cleanup(self, **kwargs): + self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) + self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) + self.delete_and_assert_workflow(self.return_workflow_guid(COMPLIANCE_WORKFLOW_NAME)) + return super().cleanup(**kwargs) + + + def post_custom_framework(self, framework_file, cluster_name: str): + framework_name, ks_custom_fw = self.create_ks_custom_fw(cluster_name=cluster_name, + framework_file=framework_file) + report_fw, _ = self.wait_for_report(report_type=self.backend.post_custom_framework, fw_object=ks_custom_fw) + return ks_custom_fw, report_fw + + def put_custom_framework(self, framework_file, framework_guid: str, cluster_name: str): + framework_name, ks_custom_fw = self.create_ks_custom_fw(cluster_name=cluster_name, + framework_file=framework_file, + framework_guid=framework_guid) + report_fw, _ = self.wait_for_report(report_type=self.backend.put_custom_framework, fw_object=ks_custom_fw) + return ks_custom_fw, report_fw + + def assert_security_risks_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "Risk:" in message_string and cluster in message_string: + found += 1 + assert found > 0, "expected to have at least one security risk message" + + def assert_vulnerability_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "New Vulnerability found" in message_string and cluster: + found += 1 + assert found > 0, "expected to have at least one vulnerability message" + + def assert_misconfiguration_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "Your compliance score has decreased by" in message_string and cluster in message_string: + found += 1 + assert found > 0, f"expected to have exactly one new misconfiguration message, found {found}" + + + + # def assert_jira_ticket_created(self, begin_time, cluster): + + + + def install_kubescape(self, helm_kwargs: dict = None): + self.add_and_upgrade_armo_to_repo() + self.install_armo_helm_chart(helm_kwargs=helm_kwargs) + self.verify_running_pods(namespace=statics.CA_NAMESPACE_FROM_HELM_NAME) + + + def create_and_assert_workflow(self, workflow_body, expected_response, update=False): + if update: + workflow_res = self.backend.update_workflow(body=workflow_body) + else: + workflow_res = self.backend.create_workflow(body=workflow_body) + + + assert workflow_res == expected_response, f"Expected {expected_response}, but got {workflow_res['response']}" + return workflow_res + + def delete_and_assert_workflow(self, workflow_guid): + workflow_delete_res = self.backend.delete_workflow(workflow_guid) + assert workflow_delete_res == "Workflow deleted", f"Expected 'Workflow deleted', but got {workflow_delete_res['response']}" + workflows = self.backend.get_workflows()["response"] + for workflow in workflows: + assert workflow["guid"] != workflow_guid, f"Expected workflow with guid {workflow_guid} to be deleted, but it still exists" + self.cleanup() + + def return_workflow_guid(self, workflow_name): + workflows = self.backend.get_workflows()["response"] + for workflow in workflows: + if workflow["name"] == workflow_name: + return workflow["guid"] + print(f"Workflow with name {workflow_name} not found") + return None + + def build_securityRisk_workflow_body(self, name, severities, siteId, projectId, cluster, namespace, category, securityRiskIDs, issueTypeId, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "severities": severities, + "securityRiskIDs": [securityRiskIDs] + + } + } + ], + "notifications": [ + { + "provider": "jira", + "jiraTicketIdentifiers": [ + { + "siteId": siteId, + "projectId": projectId, + "issueTypeId": issueTypeId, + "fields": {} + } + ] + } + ] + } + return workflow_body + + def build_vulnerabilities_workflow_body(self, name, severities, siteId, projectId, cluster, namespace, category, cvss, issueTypeId, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "severities": severities, + "cvss": cvss, + "inUse": True, + "fixable": True + } + } + ], + "notifications": [ + { + "provider": "jira", + "jiraTicketIdentifiers": [ + { + "siteId": siteId, + "projectId": projectId, + "issueTypeId": issueTypeId, + "fields": {} + } + ] + } + ] + } + return workflow_body + + def build_compliance_workflow_body(self, name, siteId, projectId, cluster, namespace, category, driftPercentage, issueTypeId, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "driftPercentage": driftPercentage + } + } + ], + "notifications": [ + { + "provider": "jira", + "jiraTicketIdentifiers": [ + { + "siteId": siteId, + "projectId": projectId, + "issueTypeId": issueTypeId, + "fields": {} + } + ] + } + ] + } + return workflow_body + + + def validate_workflow(self, expected_name, expected_provider): + workflows = self.backend.get_workflows() + assert workflows["total"]["value"] >= 1, f"Expected total value to be greater or equal to 1, but got {workflows['total']['value']}" + + found = False + for workflow in workflows["response"]: + if workflow["name"] == expected_name: + provider = workflow["notifications"][0]["provider"] + assert provider == expected_provider, f"Expected provider {expected_provider} but got {provider}" + found = True + break + + assert found, f"Workflow with name {expected_name} not found" + diff --git a/tests_scripts/workflows/slack_workflows.py b/tests_scripts/workflows/slack_workflows.py new file mode 100644 index 00000000..d963ba89 --- /dev/null +++ b/tests_scripts/workflows/slack_workflows.py @@ -0,0 +1,330 @@ +from tests_scripts.workflows.workflows import Workflows + +from tests_scripts.workflows.utils import ( + get_env, + NOTIFICATIONS_SVC_DELAY, + NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, + EXPECTED_CREATE_RESPONSE, + SLACK_CHANNEL_NAME, + SECURITY_RISKS, + SECURITY_RISKS_ID, + VULNERABILITIES, + SEVERITIES_CRITICAL, + SEVERITIES_HIGH, + VULNERABILITIES_WORKFLOW_NAME, + SECURITY_RISKS_WORKFLOW_NAME, + COMPLIANCE_WORKFLOW_NAME, + COMPLIANCE +) +from systest_utils import Logger, TestUtil +import time +from infrastructure import KubectlWrapper +from systest_utils import Logger, statics, TestUtil + + + +class WorkflowsSlackNotifications(Workflows): + + def __init__(self, test_obj=None, backend=None, kubernetes_obj=None, test_driver=None): + super(Workflows, self).__init__(test_driver=test_driver, test_obj=test_obj, backend=backend, + kubernetes_obj=kubernetes_obj) + self.fw_name = "systest-fw-colima" + self.cluster = None + self.wait_for_agg_to_end = False + + + def start(self): + """ + Agenda: + 1. Post custom framework + 2. Create new workflows + 3. Validate workflows created successfully + 4. Apply deployment + 5. Install kubescape with helm-chart + 6. Trigger first scan + 7. Apply second deployment + 8. Update custom framework + 9. Add SA to cluster-admin + 10. Trigger second scan + 11. Assert all messages sent + 12. Cleanup + """ + + assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' + + self.cluster, namespace = self.setup(apply_services=False) + + Logger.logger.info("Stage 1: Post custom framework") + self.fw_name = "systest-fw-" + self.cluster + _, fw = self.post_custom_framework(framework_file="system-test-framework-high-comp.json", + cluster_name=self.cluster) + + + Logger.logger.info("Stage 2: Create new workflows") + workflow_body = self.build_securityRisk_workflow_body(name=SECURITY_RISKS_WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, channel_name=SLACK_CHANNEL_NAME, channel_id=get_env("SLACK_CHANNEL_ID"), cluster=self.cluster, namespace=None, category=SECURITY_RISKS, securityRiskIDs=SECURITY_RISKS_ID) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + workflow_body = self.build_vulnerabilities_workflow_body(name=VULNERABILITIES_WORKFLOW_NAME, severities=SEVERITIES_HIGH, channel_name=SLACK_CHANNEL_NAME, channel_id=get_env("SLACK_CHANNEL_ID"), cluster=self.cluster, namespace=None, category=VULNERABILITIES, cvss=6) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + workflow_body = self.build_compliance_workflow_body(name=COMPLIANCE_WORKFLOW_NAME, channel_name=SLACK_CHANNEL_NAME, channel_id=get_env("SLACK_CHANNEL_ID"), cluster=self.cluster, namespace=None, category=COMPLIANCE, driftPercentage=15) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + before_test_message_ts = time.time() + + Logger.logger.info("Stage 3: Validate workflows created successfully") + self.validate_workflow(VULNERABILITIES_WORKFLOW_NAME, SLACK_CHANNEL_NAME) + self.validate_workflow(SECURITY_RISKS_WORKFLOW_NAME, SLACK_CHANNEL_NAME) + self.validate_workflow(COMPLIANCE_WORKFLOW_NAME, SLACK_CHANNEL_NAME) + + Logger.logger.info('Stage 4: Apply deployment') + workload_objs: list = self.apply_directory(path=self.test_obj["deployments"], namespace=namespace) + self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240) + + Logger.logger.info('Stage 6: Install kubescape with helm-chart') + self.install_kubescape() + + + Logger.logger.info('Stage 7: Trigger first scan') + self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) + TestUtil.sleep(NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, "waiting for first scan to be saved in notification service") + + Logger.logger.info('Stage 8: Apply second deployment') + workload_objs: list = self.apply_directory(path=self.test_obj["deployments1"], namespace=namespace) + self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240) + + + Logger.logger.info('Stage 9: Update custom framework') + self.put_custom_framework(framework_file="system-test-framework-low-comp.json", + framework_guid=fw['guid'], cluster_name=self.cluster) + + Logger.logger.info('Stage 10: Add SA to cluster-admin') + KubectlWrapper.add_new_service_account_to_cluster_admin(service_account="service-account", + namespace=namespace) + + Logger.logger.info('Stage 11: Trigger second scan') + self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) + TestUtil.sleep(NOTIFICATIONS_SVC_DELAY, "waiting for second scan to be saved in notification service") + + Logger.logger.info('Stage 12: Assert all messages sent') + self.assert_messages_sent(before_test_message_ts, self.cluster) + + Logger.logger.info('Stage 13: Cleanup') + return self.cleanup() + + + def cleanup(self, **kwargs): + self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) + self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) + self.delete_and_assert_workflow(self.return_workflow_guid(COMPLIANCE_WORKFLOW_NAME)) + return super().cleanup(**kwargs) + + + def post_custom_framework(self, framework_file, cluster_name: str): + framework_name, ks_custom_fw = self.create_ks_custom_fw(cluster_name=cluster_name, + framework_file=framework_file) + report_fw, _ = self.wait_for_report(report_type=self.backend.post_custom_framework, fw_object=ks_custom_fw) + return ks_custom_fw, report_fw + + def put_custom_framework(self, framework_file, framework_guid: str, cluster_name: str): + framework_name, ks_custom_fw = self.create_ks_custom_fw(cluster_name=cluster_name, + framework_file=framework_file, + framework_guid=framework_guid) + report_fw, _ = self.wait_for_report(report_type=self.backend.put_custom_framework, fw_object=ks_custom_fw) + return ks_custom_fw, report_fw + + def assert_security_risks_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "Risk:" in message_string and cluster in message_string: + found += 1 + assert found > 0, "expected to have at least one security risk message" + + def assert_vulnerability_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "New Vulnerability found" in message_string and cluster: + found += 1 + assert found > 0, "expected to have at least one vulnerability message" + + def assert_misconfiguration_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "Your compliance score has decreased by" in message_string and cluster in message_string: + found += 1 + assert found > 0, f"expected to have exactly one new misconfiguration message, found {found}" + + + + def assert_messages_sent(self, begin_time, cluster): + for i in range(5): + try: + messages = self.test_obj["getMessagesFunc"](begin_time) + found = str(messages).count(cluster) + assert found > 1, f"expected to have at least 1 messages, found {found}" + self.assert_security_risks_message_sent(messages, cluster) + self.assert_vulnerability_message_sent(messages, cluster) + self.assert_misconfiguration_message_sent(messages, cluster) + except AssertionError: + if i == 0: + raise + TestUtil.sleep(30, "waiting additional 30 seconds for messages to arrive") + + + def install_kubescape(self, helm_kwargs: dict = None): + self.add_and_upgrade_armo_to_repo() + self.install_armo_helm_chart(helm_kwargs=helm_kwargs) + self.verify_running_pods(namespace=statics.CA_NAMESPACE_FROM_HELM_NAME) + + + def create_and_assert_workflow(self, workflow_body, expected_response, update=False): + if update: + workflow_res = self.backend.update_workflow(body=workflow_body) + else: + workflow_res = self.backend.create_workflow(body=workflow_body) + + + assert workflow_res == expected_response, f"Expected {expected_response}, but got {workflow_res['response']}" + return workflow_res + + def delete_and_assert_workflow(self, workflow_guid): + workflow_delete_res = self.backend.delete_workflow(workflow_guid) + assert workflow_delete_res == "Workflow deleted", f"Expected 'Workflow deleted', but got {workflow_delete_res['response']}" + workflows = self.backend.get_workflows()["response"] + for workflow in workflows: + assert workflow["guid"] != workflow_guid, f"Expected workflow with guid {workflow_guid} to be deleted, but it still exists" + + + def return_workflow_guid(self, workflow_name): + workflows = self.backend.get_workflows()["response"] + for workflow in workflows: + if workflow["name"] == workflow_name: + return workflow["guid"] + print(f"Workflow with name {workflow_name} not found") + return None + + def build_securityRisk_workflow_body(self, name, severities, channel_name, channel_id, cluster, namespace, category, securityRiskIDs, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "severities": severities, + "securityRiskIDs": [securityRiskIDs] + + } + } + ], + "notifications": [ + { + "provider": "slack", + "slackChannels": [ + { + "id": channel_id, + "name": channel_name + } + ] + } + ] + } + return workflow_body + + def build_vulnerabilities_workflow_body(self, name, severities, channel_name, channel_id, cluster, namespace, category, cvss, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "severities": severities, + "cvss": cvss, + "inUse": True, + "fixable": True + } + } + ], + "notifications": [ + { + "provider": "slack", + "slackChannels": [ + { + "id": channel_id, + "name": channel_name + } + ] + } + ] + } + return workflow_body + + def build_compliance_workflow_body(self, name, channel_name, channel_id, cluster, namespace, category, driftPercentage, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "driftPercentage": driftPercentage + } + } + ], + "notifications": [ + { + "provider": "slack", + "slackChannels": [ + { + "id": channel_id, + "name": channel_name + } + ] + } + ] + } + return workflow_body + + + def validate_workflow(self, expected_name, expected_slack_channel): + workflows = self.backend.get_workflows() + assert workflows["total"]["value"] >= 1, f"Expected total value to be greater or equal to 1, but got {workflows['total']['value']}" + + found = False + for workflow in workflows["response"]: + if workflow["name"] == expected_name: + slack_channel = workflow["notifications"][0]["slackChannels"][0]["name"] + assert slack_channel == expected_slack_channel, f"Expected slack channel {expected_slack_channel} but got {slack_channel}" + + found = True + break + + assert found, f"Workflow with name {expected_name} not found" \ No newline at end of file diff --git a/tests_scripts/workflows/teams_workflows.py b/tests_scripts/workflows/teams_workflows.py new file mode 100644 index 00000000..4a04af3b --- /dev/null +++ b/tests_scripts/workflows/teams_workflows.py @@ -0,0 +1,332 @@ +from tests_scripts.workflows.workflows import Workflows +from tests_scripts.workflows.utils import ( + get_env, + NOTIFICATIONS_SVC_DELAY, + NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, + EXPECTED_CREATE_RESPONSE, + TEAMS_CHANNEL_NAME, + SECURITY_RISKS, + SECURITY_RISKS_ID, + VULNERABILITIES, + SEVERITIES_CRITICAL, + SEVERITIES_HIGH, + VULNERABILITIES_WORKFLOW_NAME, + SECURITY_RISKS_WORKFLOW_NAME, + COMPLIANCE_WORKFLOW_NAME, + COMPLIANCE +) +from systest_utils import Logger, TestUtil +import time +from infrastructure import KubectlWrapper +from systest_utils import Logger, statics, TestUtil + + + + +class WorkflowsTeamsNotifications(Workflows): + def __init__(self, test_obj=None, backend=None, kubernetes_obj=None, test_driver=None): + super(Workflows, self).__init__(test_driver=test_driver, test_obj=test_obj, backend=backend, + kubernetes_obj=kubernetes_obj) + self.fw_name = None + self.cluster = None + self.wait_for_agg_to_end = False + + + def start(self): + """ + Agenda: + 1. Post custom framework + 2. Create new workflows + 3. Validate workflows created successfully + 4. Apply deployment + 5. Install kubescape with helm-chart + 6. Trigger first scan + 7. Apply second deployment + 8. Update custom framework + 9. Add SA to cluster-admin + 10. Trigger second scan + 11. Assert all messages sent + 12. Cleanup + """ + + assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' + + self.cluster, namespace = self.setup(apply_services=False) + + Logger.logger.info("Stage 1: Post custom framework") + self.fw_name = "systest-fw-" + self.cluster + _, fw = self.post_custom_framework(framework_file="system-test-framework-high-comp.json", + cluster_name=self.cluster) + + + Logger.logger.info("Stage 2: Create new workflows") + workflow_body = self.build_securityRisk_workflow_body(name=SECURITY_RISKS_WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, channel_name=TEAMS_CHANNEL_NAME, channel_guid=get_env("TEAMS_CHANNEL_GUID"), cluster=self.cluster, namespace=None, category=SECURITY_RISKS, webhook_url=get_env("WEBHOOK_URL"), securityRiskIDs=SECURITY_RISKS_ID) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + workflow_body = self.build_vulnerabilities_workflow_body(name=VULNERABILITIES_WORKFLOW_NAME, severities=SEVERITIES_HIGH, channel_name=TEAMS_CHANNEL_NAME, channel_guid=get_env("TEAMS_CHANNEL_GUID"), cluster=self.cluster, namespace=None, category=VULNERABILITIES, cvss=6, webhook_url=get_env("WEBHOOK_URL")) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + workflow_body = self.build_compliance_workflow_body(name=COMPLIANCE_WORKFLOW_NAME, channel_name=TEAMS_CHANNEL_NAME, channel_guid=get_env("TEAMS_CHANNEL_GUID"), cluster=self.cluster, namespace=None, category=COMPLIANCE, driftPercentage=15, webhook_url=get_env("WEBHOOK_URL")) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + before_test_message_ts = time.time() + + Logger.logger.info("Stage 3: Validate workflows created successfully") + self.validate_workflow(VULNERABILITIES_WORKFLOW_NAME, TEAMS_CHANNEL_NAME) + self.validate_workflow(SECURITY_RISKS_WORKFLOW_NAME, TEAMS_CHANNEL_NAME) + self.validate_workflow(COMPLIANCE_WORKFLOW_NAME, TEAMS_CHANNEL_NAME) + + Logger.logger.info('Stage 4: Apply deployment') + workload_objs: list = self.apply_directory(path=self.test_obj["deployments"], namespace=namespace) + self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240) + + Logger.logger.info('Stage 6: Install kubescape with helm-chart') + self.install_kubescape() + + + Logger.logger.info('Stage 7: Trigger first scan') + self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) + TestUtil.sleep(NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, "waiting for first scan to be saved in notification service") + + Logger.logger.info('Stage 8: Apply second deployment') + workload_objs: list = self.apply_directory(path=self.test_obj["deployments1"], namespace=namespace) + self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240) + + + Logger.logger.info('Stage 9: Update custom framework') + self.put_custom_framework(framework_file="system-test-framework-low-comp.json", + framework_guid=fw['guid'], cluster_name=self.cluster) + + Logger.logger.info('Stage 10: Add SA to cluster-admin') + KubectlWrapper.add_new_service_account_to_cluster_admin(service_account="service-account", + namespace=namespace) + + Logger.logger.info('Stage 11: Trigger second scan') + self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) + TestUtil.sleep(NOTIFICATIONS_SVC_DELAY, "waiting for first scan to be saved in notification service") + + Logger.logger.info('Stage 12: Assert all messages sent') + self.assert_messages_sent(before_test_message_ts, self.cluster) + + Logger.logger.info('Stage 13: Cleanup') + return self.cleanup() + + + + def cleanup(self, **kwargs): + self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) + self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) + self.delete_and_assert_workflow(self.return_workflow_guid(COMPLIANCE_WORKFLOW_NAME)) + return super().cleanup(**kwargs) + + + def post_custom_framework(self, framework_file, cluster_name: str): + framework_name, ks_custom_fw = self.create_ks_custom_fw(cluster_name=cluster_name, + framework_file=framework_file) + report_fw, _ = self.wait_for_report(report_type=self.backend.post_custom_framework, fw_object=ks_custom_fw) + return ks_custom_fw, report_fw + + def put_custom_framework(self, framework_file, framework_guid: str, cluster_name: str): + framework_name, ks_custom_fw = self.create_ks_custom_fw(cluster_name=cluster_name, + framework_file=framework_file, + framework_guid=framework_guid) + report_fw, _ = self.wait_for_report(report_type=self.backend.put_custom_framework, fw_object=ks_custom_fw) + return ks_custom_fw, report_fw + + def assert_security_risks_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "Risk:" in message_string and cluster in message_string: + found += 1 + assert found > 0, "expected to have at least one security risk message" + + def assert_vulnerability_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "New Vulnerability found" in message_string and cluster: + found += 1 + assert found > 0, "expected to have at least one vulnerability message" + + def assert_misconfiguration_message_sent(self, messages, cluster): + found = 0 + for message in messages: + message_string = str(message) + if "Your compliance score has decreased by" in message_string and cluster in message_string: + found += 1 + assert found > 0, f"expected to have exactly one new misconfiguration message, found {found}" + + + + def assert_messages_sent(self, begin_time, cluster): + for i in range(5): + try: + messages = self.test_obj["getMessagesFunc"](begin_time) + found = str(messages).count(cluster) + assert found > 1, f"expected to have at least 1 messages, found {found}" + self.assert_security_risks_message_sent(messages, cluster) + self.assert_vulnerability_message_sent(messages, cluster) + self.assert_misconfiguration_message_sent(messages, cluster) + except AssertionError: + if i == 0: + raise + TestUtil.sleep(30, "waiting additional 30 seconds for messages to arrive") + + + def install_kubescape(self, helm_kwargs: dict = None): + self.add_and_upgrade_armo_to_repo() + self.install_armo_helm_chart(helm_kwargs=helm_kwargs) + self.verify_running_pods(namespace=statics.CA_NAMESPACE_FROM_HELM_NAME) + + + def create_and_assert_workflow(self, workflow_body, expected_response, update=False): + if update: + workflow_res = self.backend.update_workflow(body=workflow_body) + else: + workflow_res = self.backend.create_workflow(body=workflow_body) + + + assert workflow_res == expected_response, f"Expected {expected_response}, but got {workflow_res['response']}" + return workflow_res + + def delete_and_assert_workflow(self, workflow_guid): + workflow_delete_res = self.backend.delete_workflow(workflow_guid) + assert workflow_delete_res == "Workflow deleted", f"Expected 'Workflow deleted', but got {workflow_delete_res['response']}" + workflows = self.backend.get_workflows()["response"] + for workflow in workflows: + assert workflow["guid"] != workflow_guid, f"Expected workflow with guid {workflow_guid} to be deleted, but it still exists" + self.cleanup() + + def return_workflow_guid(self, workflow_name): + workflows = self.backend.get_workflows()["response"] + for workflow in workflows: + if workflow["name"] == workflow_name: + return workflow["guid"] + print(f"Workflow with name {workflow_name} not found") + return None + + def build_securityRisk_workflow_body(self, name, severities, channel_name, channel_guid, cluster, namespace, category, webhook_url, securityRiskIDs, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "severities": severities, + "securityRiskIDs": [securityRiskIDs] + } + } + ], + "notifications": [ + { + "provider": "teams", + "teamsChannels": [ + { + "guid": channel_guid, + "name": channel_name, + "webhookURL": webhook_url + } + ] + } + ] + } + return workflow_body + + def build_vulnerabilities_workflow_body(self, name, severities, channel_name, channel_guid, cluster, namespace, category, cvss, webhook_url, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "severities": severities, + "cvss": cvss, + "inUse": True, + "fixable": True + } + } + ], + "notifications": [ + { + "provider": "teams", + "teamsChannels": [ + { + "guid": channel_guid, + "name": channel_name, + "webhookURL": webhook_url + } + ] + } + ] + } + return workflow_body + + def build_compliance_workflow_body(self, name, channel_name, channel_guid, cluster, namespace, category, driftPercentage, webhook_url, guid=None): + workflow_body = { + "guid": guid, + "updatedTime": "", + "updatedBy": "", + "enabled": True, + "name": name, + "scope": [ + { + "cluster": cluster, + "namespace": namespace + } + ], + "conditions": [ + { + "category": category, + "parameters": { + "driftPercentage": driftPercentage + } + } + ], + "notifications": [ + { + "provider": "teams", + "teamsChannels": [ + { + "guid": channel_guid, + "name": channel_name, + "webhookURL": webhook_url + } + ] + } + ] + } + return workflow_body + + + def validate_workflow(self, expected_name, expected_teams_channel): + workflows = self.backend.get_workflows() + assert workflows["total"]["value"] >= 1, f"Expected total value to be greater or equal to 1, but got {workflows['total']['value']}" + + found = False + for workflow in workflows["response"]: + if workflow["name"] == expected_name: + teams_channel = workflow["notifications"][0]["teamsChannels"][0]["name"] + assert teams_channel == expected_teams_channel, f"Expected Teams channel {expected_teams_channel} but got {teams_channel}" + + found = True + break + + assert found, f"Workflow with name {expected_name} not found" diff --git a/tests_scripts/workflows/utils.py b/tests_scripts/workflows/utils.py new file mode 100644 index 00000000..7e35364f --- /dev/null +++ b/tests_scripts/workflows/utils.py @@ -0,0 +1,105 @@ +from slack_sdk import WebClient +import requests +from datetime import datetime +from configurations.system.tests_cases.structures import TestConfiguration +from systest_utils import Logger +import os + + + + + +# tests constants +NOTIFICATIONS_SVC_DELAY_FIRST_SCAN = 7 * 60 +NOTIFICATIONS_SVC_DELAY = 7 * 60 + +# severity levels +SEVERITIES_CRITICAL = ["Critical"] +SEVERITIES_HIGH = ["High"] + +# workflow names +WORKFLOW_NAME = "system_test_workflow" +UPDATED_WORKFLOW_NAME = "system_test_workflow_updated" +SECURITY_RISKS_WORKFLOW_NAME = "security_risks_workflow" +VULNERABILITIES_WORKFLOW_NAME = "vulnerabilities_workflow" +COMPLIANCE_WORKFLOW_NAME = "compliance_workflow" + +# channel provider names +SLACK_CHANNEL_NAME = "system_tests_slack" +TEAMS_CHANNEL_NAME = "system_tests_teams" +JIRA_PROVIDER_NAME = "jira" + + +# expected responses +EXPECTED_CREATE_RESPONSE = "Workflow created" +EXPECTED_UPDATE_RESPONSE = "Workflow updated" +EXPECTED_DELETE_RESPONSE = "Workflow deleted" + +# categories +SECURITY_RISKS = "SecurityRisks" +VULNERABILITIES = "Vulnerability" +COMPLIANCE = "Compliance" +SECURITY_RISKS_ID = "R_0017" + + +def get_access_token(): + url = "https://login.microsoftonline.com/50a70646-52e3-4e46-911e-6ca1b46afba3/oauth2/v2.0/token" + headers = { + 'Content-Type': 'application/x-www-form-urlencoded' + } + body = { + 'grant_type': 'client_credentials', + 'client_id': get_env("MS_TEAMS_CLIENT_ID"), + 'client_secret': get_env("MS_TEAMS_CLIENT_SECRET"), + 'scope': 'https://graph.microsoft.com/.default' + } + response = requests.post(url, headers=headers, data=body) + return response.json().get('access_token') + + +def get_messages_from_teams_channel(before_test): + before_test_utc = datetime.utcfromtimestamp(before_test).isoformat() + "Z" + endpoint = f'https://graph.microsoft.com/v1.0/teams/{get_env("TEAMS_ID")}/channels/{get_env("CHANNEL_ID")}' \ + f'/messages/delta?$filter=lastModifiedDateTime gt {before_test_utc}' + headers = { + 'Authorization': 'Bearer ' + get_access_token(), + 'Accept': 'application/json', + 'Content-Type': 'application/json' + } + response = requests.get(endpoint, headers=headers) + return response.json().get('value', []) + + +def get_messages_from_slack_channel(before_test): + formatted_time = format(before_test, ".6f") + Logger.logger.info('Attempting to read messages from slack before timestamp ' + formatted_time) + client = WebClient(token=get_env("SLACK_SYSTEM_TEST_TOKEN")) + result = client.conversations_history(channel=f'{get_env("SLACK_CHANNEL_ID")}', oldest=formatted_time) + if result is not None and isinstance(result.data, dict) and 'messages' in result.data: + return result.data['messages'] + else: + Logger.logger.info("No 'messages' key found in the result.") + return [] + +def enrich_slack_alert_channel(data): + data["channel"]["context"]["channel"]["id"] = get_env("SLACK_CHANNEL_ID") + +def enrich_teams_alert_channel(data): + data["channel"]["context"]["webhook"]["id"] = get_env("CHANNEL_WEBHOOK") + + + +def mask_value(value): + if len(value) <= 3: + return "***" + return value[:3] + '*' * (len(value) - 6) + value[-3:] + + +def get_env(env_var_name): + value = os.getenv(env_var_name) + if value is not None: + masked_value = mask_value(value) + Logger.logger.info(f"Environment variable '{env_var_name}' retrieved with value: {masked_value}") + else: + Logger.logger.info(f"Environment variable '{env_var_name}' not found.") + return value \ No newline at end of file diff --git a/tests_scripts/workflows/workflows.py b/tests_scripts/workflows/workflows.py index cba1ec73..87fe60aa 100644 --- a/tests_scripts/workflows/workflows.py +++ b/tests_scripts/workflows/workflows.py @@ -1,17 +1,14 @@ import json -import time - - from configurations.system.tests_cases.structures import TestConfiguration -from systest_utils import statics, Logger +from systest_utils import Logger from tests_scripts.helm.base_helm import BaseHelm class Workflows(BaseHelm): - def __init__(self, test_obj=None, backend=None, test_driver=None): - super().__init__(test_driver=test_driver, test_obj=test_obj, backend=backend) - self.test_obj: TestConfiguration = test_obj + def __init__(self, test_obj=None, backend=None, test_driver=None, kubernetes_obj=None): + super().__init__(test_driver=test_driver, test_obj=test_obj, backend=backend, kubernetes_obj=kubernetes_obj) + def active_workflow(self): @@ -24,51 +21,3 @@ def active_workflow(self): assert response["workflowsEnabled"] == True, f"workflowsEnabled is False" assert response["workflowsConverted"] == True, f"workflowsConverted is False" Logger.logger.info(f"active_workflow response: {response}") - - -class WorkflowsSlack(Workflows): - def __init__(self, test_obj=None, backend=None, test_driver=None): - super().__init__(test_driver=test_driver, test_obj=test_obj, backend=backend) - self.test_obj: TestConfiguration = test_obj - - def copy_slack_token(self): - # copy the slack token - res = self.backend.copy_slack_token(self.test_tenant_id) - response = json.loads(res.text) - Logger.logger.info(f"copy_slack_token response: {response}") - - def start(self): - super().active_workflow() - - # # copy the slack token only if we are under a test tenant - # if self.test_tenant_id != "": - # self.copy_slack_token() - - # ******************** - # COMPLETE TEST HERE - - # ******************** - - return self.cleanup() - - def cleanup(self, **kwargs): - return super().cleanup(**kwargs) - - -class WorkflowsTeams(Workflows): - def __init__(self, test_obj=None, backend=None, test_driver=None): - super().__init__(test_driver=test_driver, test_obj=test_obj, backend=backend) - self.test_obj: TestConfiguration = test_obj - - def start(self): - super().active_workflow() - - # ******************** - # COMPLETE TEST HERE - - # ******************** - - return self.cleanup() - - def cleanup(self, **kwargs): - return super().cleanup(**kwargs) From f66b3d8a9a332ed164f66da4908343edcc0e2933 Mon Sep 17 00:00:00 2001 From: jnathangreeg Date: Mon, 25 Nov 2024 09:42:19 +0200 Subject: [PATCH 2/5] after CR Signed-off-by: jnathangreeg --- .../slack-alert-channel.json | 51 ------------------ .../teams-alert-channel.json | 52 ------------------- tests_scripts/workflows/jira_workflows.py | 47 +---------------- 3 files changed, 2 insertions(+), 148 deletions(-) delete mode 100644 configurations/workflows_notifications/slack-alert-channel.json delete mode 100644 configurations/workflows_notifications/teams-alert-channel.json diff --git a/configurations/workflows_notifications/slack-alert-channel.json b/configurations/workflows_notifications/slack-alert-channel.json deleted file mode 100644 index 3af9171f..00000000 --- a/configurations/workflows_notifications/slack-alert-channel.json +++ /dev/null @@ -1,51 +0,0 @@ -{ - "channel": { - "name": "My Slack Channel", - "provider": "slack", - "context": { - "channel": { - "id": "abcd1234" - } - } - }, - "notifications": [ - { - "notificationType": "securityRiskPush:newSecurityRisk", - "disabled": false, - "parameters": { - "minSeverity": 300 - } - }, - { - "notificationType": "push:newClusterAdmin", - "disabled": false - }, - { - "notificationType": "containerScanPush:newVulnerability", - "disabled": false, - "parameters": { - "minSeverity": 500 - } - }, - { - "notificationType": "containerScanPush:vulnerabilityNewFix", - "disabled": false, - "parameters": { - "minSeverity": 500 - } - }, - { - "notificationType": "push:complianceDrift", - "disabled": false, - "parameters": { - "driftPercentage": 15 - } - } - ], - "scope": [ - { - "cluster": "cluster", - "namespaces": ["alerts"] - } - ] -} \ No newline at end of file diff --git a/configurations/workflows_notifications/teams-alert-channel.json b/configurations/workflows_notifications/teams-alert-channel.json deleted file mode 100644 index 346143d3..00000000 --- a/configurations/workflows_notifications/teams-alert-channel.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "channel": { - "name": "My Teams Channel", - "provider": "teams", - "context": { - "webhook": { - "name": "webhook", - "id": "https://teams/mywebhook" - } - } - }, - "notifications": [ - { - "notificationType": "securityRiskPush:newSecurityRisk", - "disabled": false, - "parameters": { - "minSeverity": 300 - } - }, - { - "notificationType": "push:newClusterAdmin", - "disabled": false - }, - { - "notificationType": "containerScanPush:newVulnerability", - "disabled": false, - "parameters": { - "minSeverity": 500 - } - }, - { - "notificationType": "containerScanPush:vulnerabilityNewFix", - "disabled": false, - "parameters": { - "minSeverity": 500 - } - }, - { - "notificationType": "push:complianceDrift", - "disabled": false, - "parameters": { - "driftPercentage": 15 - } - } - ], - "scope": [ - { - "cluster": "cluster", - "namespaces": ["alerts"] - } - ] -} \ No newline at end of file diff --git a/tests_scripts/workflows/jira_workflows.py b/tests_scripts/workflows/jira_workflows.py index 72b1cb7e..62b9f41c 100644 --- a/tests_scripts/workflows/jira_workflows.py +++ b/tests_scripts/workflows/jira_workflows.py @@ -11,9 +11,7 @@ SEVERITIES_CRITICAL, SEVERITIES_HIGH, VULNERABILITIES_WORKFLOW_NAME, - SECURITY_RISKS_WORKFLOW_NAME, - COMPLIANCE_WORKFLOW_NAME, - COMPLIANCE + SECURITY_RISKS_WORKFLOW_NAME ) from systest_utils import Logger, TestUtil import time @@ -64,14 +62,11 @@ def start(self): self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) workflow_body = self.build_vulnerabilities_workflow_body(name=VULNERABILITIES_WORKFLOW_NAME, severities=SEVERITIES_HIGH, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=VULNERABILITIES, cvss=6, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) - workflow_body = self.build_compliance_workflow_body(name=COMPLIANCE_WORKFLOW_NAME, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=COMPLIANCE, driftPercentage=15, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) - self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) before_test_message_ts = time.time() Logger.logger.info("Stage 3: Validate workflows created successfully") self.validate_workflow(VULNERABILITIES_WORKFLOW_NAME, JIRA_PROVIDER_NAME) self.validate_workflow(SECURITY_RISKS_WORKFLOW_NAME, JIRA_PROVIDER_NAME) - self.validate_workflow(COMPLIANCE_WORKFLOW_NAME, JIRA_PROVIDER_NAME) Logger.logger.info('Stage 4: Apply deployment') workload_objs: list = self.apply_directory(path=self.test_obj["deployments"], namespace=namespace) @@ -113,7 +108,6 @@ def start(self): def cleanup(self, **kwargs): self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) - self.delete_and_assert_workflow(self.return_workflow_guid(COMPLIANCE_WORKFLOW_NAME)) return super().cleanup(**kwargs) @@ -271,44 +265,7 @@ def build_vulnerabilities_workflow_body(self, name, severities, siteId, projectI } return workflow_body - def build_compliance_workflow_body(self, name, siteId, projectId, cluster, namespace, category, driftPercentage, issueTypeId, guid=None): - workflow_body = { - "guid": guid, - "updatedTime": "", - "updatedBy": "", - "enabled": True, - "name": name, - "scope": [ - { - "cluster": cluster, - "namespace": namespace - } - ], - "conditions": [ - { - "category": category, - "parameters": { - "driftPercentage": driftPercentage - } - } - ], - "notifications": [ - { - "provider": "jira", - "jiraTicketIdentifiers": [ - { - "siteId": siteId, - "projectId": projectId, - "issueTypeId": issueTypeId, - "fields": {} - } - ] - } - ] - } - return workflow_body - - + def validate_workflow(self, expected_name, expected_provider): workflows = self.backend.get_workflows() assert workflows["total"]["value"] >= 1, f"Expected total value to be greater or equal to 1, but got {workflows['total']['value']}" From 8adeec8e20875b55b44138d439c613afee1dfc62 Mon Sep 17 00:00:00 2001 From: jnathangreeg Date: Mon, 25 Nov 2024 17:32:08 +0200 Subject: [PATCH 3/5] add jira test Signed-off-by: jnathangreeg --- .../system/tests_cases/workflows_tests.py | 21 ++-- infrastructure/backend_api.py | 4 +- tests_scripts/workflows/conf_workflows.py | 8 +- tests_scripts/workflows/jira_workflows.py | 106 +++++++----------- tests_scripts/workflows/slack_workflows.py | 7 +- tests_scripts/workflows/teams_workflows.py | 4 +- tests_scripts/workflows/workflows.py | 1 + 7 files changed, 64 insertions(+), 87 deletions(-) diff --git a/configurations/system/tests_cases/workflows_tests.py b/configurations/system/tests_cases/workflows_tests.py index 12f6fee1..261cdc30 100644 --- a/configurations/system/tests_cases/workflows_tests.py +++ b/configurations/system/tests_cases/workflows_tests.py @@ -40,18 +40,15 @@ def teams_notifications_workflows(): create_test_tenant = False ) - # @staticmethod - # def jira_notifications_workflows(): - # from tests_scripts.workflows.jira_workflows import WorkflowsJiraNotifications - # return TestConfiguration( - # name=inspect.currentframe().f_code.co_name, - # test_obj=WorkflowsJiraNotifications, - # deployments=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http"), - # deployments1=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http1"), - # # getMessagesFunc=, // TODO: implement this function - # # enrichAlertChannelFunc=, // TODO: implement this function - # create_test_tenant = False - # ) + @staticmethod + def jira_notifications_workflows(): + from tests_scripts.workflows.jira_workflows import WorkflowsJiraNotifications + return TestConfiguration( + name=inspect.currentframe().f_code.co_name, + test_obj=WorkflowsJiraNotifications, + deployments=join(DEFAULT_WORKFLOWS_DEPLOYMENT_PATH, "http"), + create_test_tenant = False + ) @staticmethod def workflows_configurations(): diff --git a/infrastructure/backend_api.py b/infrastructure/backend_api.py index 36c5141a..86cc7cfb 100644 --- a/infrastructure/backend_api.py +++ b/infrastructure/backend_api.py @@ -2430,8 +2430,8 @@ def get_security_risks_list(self, cluster_name=None, namespace=None, security_ri raise Exception( 'Error accessing dashboard. Request: get_security_risks_list "%s" (code: %d, message: %s)' % ( self.customer, r.status_code, r.text)) - return r - + return r.text + def get_security_risks_severities(self, cluster_name=None, namespace=None, security_risk_ids=[]): params = {"customerGUID": self.selected_tenant_id} diff --git a/tests_scripts/workflows/conf_workflows.py b/tests_scripts/workflows/conf_workflows.py index fab68ef6..2672022f 100644 --- a/tests_scripts/workflows/conf_workflows.py +++ b/tests_scripts/workflows/conf_workflows.py @@ -10,11 +10,12 @@ from configurations.system.tests_cases.structures import TestConfiguration from systest_utils import Logger -from tests_scripts.base_test import BaseTest +from tests_scripts.workflows.workflows import Workflows -class WorkflowConfigurations(BaseTest): + +class WorkflowConfigurations(Workflows): """ Check workflow - list, create, update, delete """ @@ -33,6 +34,7 @@ def start(self): 6. cleanup """ assert self.backend is not None, f'The test {self.test_driver.test_name} must run with backend' + # self.active_workflow(self.test_tenant_id) Logger.logger.info("1. create slack workflow") workflow_creation_body = self.build_slack_workflow_body(name=WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, channel_name=SLACK_CHANNEL_NAME, channel_id=get_env("SLACK_CHANNEL_ID")) @@ -191,7 +193,7 @@ def delete_and_assert_workflow(self, workflow_guid): workflows = self.backend.get_workflows()["response"] for workflow in workflows: assert workflow["guid"] != workflow_guid, f"Expected workflow with guid {workflow_guid} to be deleted, but it still exists" - self.cleanup() + def return_workflow_guid(self, workflow_name): workflows = self.backend.get_workflows()["response"] diff --git a/tests_scripts/workflows/jira_workflows.py b/tests_scripts/workflows/jira_workflows.py index 62b9f41c..7a3ff940 100644 --- a/tests_scripts/workflows/jira_workflows.py +++ b/tests_scripts/workflows/jira_workflows.py @@ -1,7 +1,6 @@ from tests_scripts.workflows.workflows import Workflows from tests_scripts.workflows.utils import ( get_env, - NOTIFICATIONS_SVC_DELAY, NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, EXPECTED_CREATE_RESPONSE, JIRA_PROVIDER_NAME, @@ -11,10 +10,12 @@ SEVERITIES_CRITICAL, SEVERITIES_HIGH, VULNERABILITIES_WORKFLOW_NAME, - SECURITY_RISKS_WORKFLOW_NAME + SECURITY_RISKS_WORKFLOW_NAME, + SECURITY_RISKS_ID ) from systest_utils import Logger, TestUtil import time +import json from infrastructure import KubectlWrapper from systest_utils import Logger, statics, TestUtil @@ -33,81 +34,53 @@ def __init__(self, test_obj=None, backend=None, kubernetes_obj=None, test_driver def start(self): """ Agenda: - 1. Post custom framework - 2. Create new workflows - 3. Validate workflows created successfully - 4. Apply deployment - 5. Install kubescape with helm-chart - 6. Trigger first scan - 7. Apply second deployment - 8. Update custom framework - 9. Add SA to cluster-admin - 10. Trigger second scan - 11. Assert all messages sent - 12. Cleanup + 1. Create new workflows + 2. Validate workflows created successfully + 3. Apply deployment + 4. Install kubescape with helm-chart + 5. Trigger first scan + 6. Assert jira ticket was created + 7. Cleanup """ assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' - self.cluster, namespace = self.setup(apply_services=False) - - Logger.logger.info("Stage 1: Post custom framework") - self.fw_name = "systest-fw-" + self.cluster - _, fw = self.post_custom_framework(framework_file="system-test-framework-high-comp.json", - cluster_name=self.cluster) - - - Logger.logger.info("Stage 2: Create new workflows") + # self.backend.active_workflow(self.test_tenant_id) + + Logger.logger.info("Stage 1: Create new workflows") workflow_body = self.build_securityRisk_workflow_body(name=SECURITY_RISKS_WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=SECURITY_RISKS, securityRiskIDs=SECURITY_RISKS_ID, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) - workflow_body = self.build_vulnerabilities_workflow_body(name=VULNERABILITIES_WORKFLOW_NAME, severities=SEVERITIES_HIGH, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=VULNERABILITIES, cvss=6, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) - self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) - before_test_message_ts = time.time() + # workflow_body = self.build_vulnerabilities_workflow_body(name=VULNERABILITIES_WORKFLOW_NAME, severities=SEVERITIES_HIGH, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=VULNERABILITIES, cvss=6, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) + # self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) - Logger.logger.info("Stage 3: Validate workflows created successfully") - self.validate_workflow(VULNERABILITIES_WORKFLOW_NAME, JIRA_PROVIDER_NAME) + Logger.logger.info("Stage 2: Validate workflows created successfully") self.validate_workflow(SECURITY_RISKS_WORKFLOW_NAME, JIRA_PROVIDER_NAME) + # self.validate_workflow(VULNERABILITIES_WORKFLOW_NAME, JIRA_PROVIDER_NAME) - Logger.logger.info('Stage 4: Apply deployment') + Logger.logger.info('Stage 3: Apply deployment') workload_objs: list = self.apply_directory(path=self.test_obj["deployments"], namespace=namespace) self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240) - Logger.logger.info('Stage 6: Install kubescape with helm-chart') + Logger.logger.info('Stage 4: Install kubescape with helm-chart') self.install_kubescape() - - Logger.logger.info('Stage 7: Trigger first scan') + Logger.logger.info('Stage 5: Trigger first scan') self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) TestUtil.sleep(NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, "waiting for first scan to be saved in notification service") - - Logger.logger.info('Stage 8: Apply second deployment') - workload_objs: list = self.apply_directory(path=self.test_obj["deployments1"], namespace=namespace) - self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240) - - - Logger.logger.info('Stage 9: Update custom framework') - self.put_custom_framework(framework_file="system-test-framework-low-comp.json", - framework_guid=fw['guid'], cluster_name=self.cluster) - - Logger.logger.info('Stage 10: Add SA to cluster-admin') - KubectlWrapper.add_new_service_account_to_cluster_admin(service_account="service-account", - namespace=namespace) - - Logger.logger.info('Stage 11: Trigger second scan') - self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) - TestUtil.sleep(NOTIFICATIONS_SVC_DELAY, "waiting for first scan to be saved in notification service") - - Logger.logger.info('Stage 12: Assert all messages sent') - # // TODO: implement this function + + Logger.logger.info('Stage 6: Assert jira ticket was created') + r = self.backend.get_security_risks_list(cluster_name=self.cluster, security_risk_ids=[SECURITY_RISKS_ID]) + self.assert_security_risks_jira_ticket_created(response=r) - Logger.logger.info('Stage 13: Cleanup') + Logger.logger.info('Stage 7: Cleanup') return self.cleanup() def cleanup(self, **kwargs): self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) - self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) + # self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) + # self.backend.delete_tenant(self.test_tenant_id) return super().cleanup(**kwargs) @@ -124,14 +97,20 @@ def put_custom_framework(self, framework_file, framework_guid: str, cluster_name report_fw, _ = self.wait_for_report(report_type=self.backend.put_custom_framework, fw_object=ks_custom_fw) return ks_custom_fw, report_fw - def assert_security_risks_message_sent(self, messages, cluster): - found = 0 - for message in messages: - message_string = str(message) - if "Risk:" in message_string and cluster in message_string: - found += 1 - assert found > 0, "expected to have at least one security risk message" + def assert_security_risks_jira_ticket_created(self, response): + try: + response_json = json.loads(response) + except json.JSONDecodeError as e: + raise AssertionError(f"Response is not valid JSON: {e}") + risks = response_json.get("response", []) + assert len(risks) > 0, "No security risks found in the response" + + for risk in risks: + tickets = risk.get("tickets", []) + assert len(tickets) > 0, f"No tickets associated with security risk {risk.get('securityRiskID', 'Unknown')}" + + def assert_vulnerability_message_sent(self, messages, cluster): found = 0 for message in messages: @@ -150,10 +129,6 @@ def assert_misconfiguration_message_sent(self, messages, cluster): - # def assert_jira_ticket_created(self, begin_time, cluster): - - - def install_kubescape(self, helm_kwargs: dict = None): self.add_and_upgrade_armo_to_repo() self.install_armo_helm_chart(helm_kwargs=helm_kwargs) @@ -176,7 +151,6 @@ def delete_and_assert_workflow(self, workflow_guid): workflows = self.backend.get_workflows()["response"] for workflow in workflows: assert workflow["guid"] != workflow_guid, f"Expected workflow with guid {workflow_guid} to be deleted, but it still exists" - self.cleanup() def return_workflow_guid(self, workflow_name): workflows = self.backend.get_workflows()["response"] diff --git a/tests_scripts/workflows/slack_workflows.py b/tests_scripts/workflows/slack_workflows.py index d963ba89..a0ad606e 100644 --- a/tests_scripts/workflows/slack_workflows.py +++ b/tests_scripts/workflows/slack_workflows.py @@ -28,7 +28,7 @@ class WorkflowsSlackNotifications(Workflows): def __init__(self, test_obj=None, backend=None, kubernetes_obj=None, test_driver=None): super(Workflows, self).__init__(test_driver=test_driver, test_obj=test_obj, backend=backend, kubernetes_obj=kubernetes_obj) - self.fw_name = "systest-fw-colima" + self.fw_name = None self.cluster = None self.wait_for_agg_to_end = False @@ -50,8 +50,10 @@ def start(self): 12. Cleanup """ - assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' + assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' + # self.active_workflow(self.test_tenant_id) + # self.backend.copy_slack_token(tenantID=self.test_tenant_id) self.cluster, namespace = self.setup(apply_services=False) Logger.logger.info("Stage 1: Post custom framework") @@ -114,6 +116,7 @@ def cleanup(self, **kwargs): self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(COMPLIANCE_WORKFLOW_NAME)) + # self.backend.delete_tenant(self.test_tenant_id) return super().cleanup(**kwargs) diff --git a/tests_scripts/workflows/teams_workflows.py b/tests_scripts/workflows/teams_workflows.py index 4a04af3b..5953098e 100644 --- a/tests_scripts/workflows/teams_workflows.py +++ b/tests_scripts/workflows/teams_workflows.py @@ -50,8 +50,8 @@ def start(self): """ assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' - self.cluster, namespace = self.setup(apply_services=False) + # self.active_workflow(self.test_tenant_id) Logger.logger.info("Stage 1: Post custom framework") self.fw_name = "systest-fw-" + self.cluster @@ -114,6 +114,7 @@ def cleanup(self, **kwargs): self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(COMPLIANCE_WORKFLOW_NAME)) + # self.backend.delete_tenant(self.test_tenant_id) return super().cleanup(**kwargs) @@ -193,7 +194,6 @@ def delete_and_assert_workflow(self, workflow_guid): workflows = self.backend.get_workflows()["response"] for workflow in workflows: assert workflow["guid"] != workflow_guid, f"Expected workflow with guid {workflow_guid} to be deleted, but it still exists" - self.cleanup() def return_workflow_guid(self, workflow_name): workflows = self.backend.get_workflows()["response"] diff --git a/tests_scripts/workflows/workflows.py b/tests_scripts/workflows/workflows.py index 87fe60aa..cc25756c 100644 --- a/tests_scripts/workflows/workflows.py +++ b/tests_scripts/workflows/workflows.py @@ -9,6 +9,7 @@ class Workflows(BaseHelm): def __init__(self, test_obj=None, backend=None, test_driver=None, kubernetes_obj=None): super().__init__(test_driver=test_driver, test_obj=test_obj, backend=backend, kubernetes_obj=kubernetes_obj) + def active_workflow(self): From d16433d2a3fcdc0c020a0159e43081c2c838979e Mon Sep 17 00:00:00 2001 From: jnathangreeg Date: Thu, 28 Nov 2024 11:29:23 +0200 Subject: [PATCH 4/5] finished version of workflows tests Signed-off-by: jnathangreeg --- infrastructure/backend_api.py | 2 +- system_test_mapping.json | 50 ++++++++++---------- tests_scripts/workflows/conf_workflows.py | 1 - tests_scripts/workflows/jira_workflows.py | 54 ++++++++++++++++++---- tests_scripts/workflows/slack_workflows.py | 3 -- tests_scripts/workflows/teams_workflows.py | 2 - 6 files changed, 70 insertions(+), 42 deletions(-) diff --git a/infrastructure/backend_api.py b/infrastructure/backend_api.py index 86cc7cfb..dd2a2c98 100644 --- a/infrastructure/backend_api.py +++ b/infrastructure/backend_api.py @@ -2367,7 +2367,7 @@ def get_vulns_v2(self, body: dict, expected_results: int = 0, scope: str = None) if scope: params = {"scope": scope} return self.post_list_request(API_VULNERABILITY_V2, body, expected_results, params=params) - + def get_vuln_v2_details(self, body: dict): return self.post_details_request(API_VULNERABILITY_V2, body) diff --git a/system_test_mapping.json b/system_test_mapping.json index 6567547c..4db72a4b 100644 --- a/system_test_mapping.json +++ b/system_test_mapping.json @@ -1379,7 +1379,7 @@ "users-notification-service" ], "description": "testing teams alert channels with compliance and vulnerabilities notifications", - "skip_on_environment": "production,production-us", + "skip_on_environment": "production,production-us,staging", "owner": "" }, "slack_alerts": { @@ -1393,7 +1393,7 @@ "users-notification-service" ], "description": "testing slack alert channels with compliance and vulnerabilities notifications", - "skip_on_environment": "production,production-us,custom", + "skip_on_environment": "production,production-us,custom,staging", "owner": "refaelm@armosec.io" }, "sr_r_0035_attack_chain": { @@ -1629,13 +1629,13 @@ "Backend" ], "target_repositories": [ - "users-notification-service-dummy", - "config-service-dummy", - "cadashboardbe-dummy", - "event-ingester-service-dummy" + "users-notification-service", + "config-service", + "cadashboardbe", + "event-ingester-service" ], - "description": "Checks slack workflows", - "skip_on_environment": "", + "description": "Checks workflows slack notifications", + "skip_on_environment": "production,production-us", "owner": "jonathang@armosec.io" }, "teams_notifications_workflows": { @@ -1643,13 +1643,13 @@ "Backend" ], "target_repositories": [ - "users-notification-service-dummy", - "config-service-dummy", - "cadashboardbe-dummy", - "event-ingester-service-dummy" + "users-notification-service", + "config-service", + "cadashboardbe", + "event-ingester-service" ], - "description": "Checks teams workflows", - "skip_on_environment": "", + "description": "Checks workflows teams notifications", + "skip_on_environment": "production,production-us", "owner": "jonathang@armosec.io" }, "jira_notifications_workflows": { @@ -1657,13 +1657,13 @@ "Backend" ], "target_repositories": [ - "users-notification-service-dummy", - "config-service-dummy", - "cadashboardbe-dummy", - "event-ingester-service-dummy" + "users-notification-service", + "config-service", + "cadashboardbe", + "event-ingester-service" ], - "description": "Checks teams workflows", - "skip_on_environment": "", + "description": "Checks workflows jira notifications", + "skip_on_environment": "production,production-us", "owner": "jonathang@armosec.io" }, "workflows_configurations": { @@ -1671,13 +1671,13 @@ "Backend" ], "target_repositories": [ - "users-notification-service-dummy", - "config-service-dummy", - "cadashboardbe-dummy", - "event-ingester-service-dummy" + "users-notification-service", + "config-service", + "cadashboardbe", + "event-ingester-service" ], "description": "Checks workflows configurations", - "skip_on_environment": "", + "skip_on_environment": "production,production-us", "owner": "jonathang@armosec.io" } } \ No newline at end of file diff --git a/tests_scripts/workflows/conf_workflows.py b/tests_scripts/workflows/conf_workflows.py index 2672022f..697059c0 100644 --- a/tests_scripts/workflows/conf_workflows.py +++ b/tests_scripts/workflows/conf_workflows.py @@ -34,7 +34,6 @@ def start(self): 6. cleanup """ assert self.backend is not None, f'The test {self.test_driver.test_name} must run with backend' - # self.active_workflow(self.test_tenant_id) Logger.logger.info("1. create slack workflow") workflow_creation_body = self.build_slack_workflow_body(name=WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, channel_name=SLACK_CHANNEL_NAME, channel_id=get_env("SLACK_CHANNEL_ID")) diff --git a/tests_scripts/workflows/jira_workflows.py b/tests_scripts/workflows/jira_workflows.py index 7a3ff940..f7e3aa53 100644 --- a/tests_scripts/workflows/jira_workflows.py +++ b/tests_scripts/workflows/jira_workflows.py @@ -45,17 +45,17 @@ def start(self): assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' self.cluster, namespace = self.setup(apply_services=False) - # self.backend.active_workflow(self.test_tenant_id) + Logger.logger.info("Stage 1: Create new workflows") workflow_body = self.build_securityRisk_workflow_body(name=SECURITY_RISKS_WORKFLOW_NAME, severities=SEVERITIES_CRITICAL, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=SECURITY_RISKS, securityRiskIDs=SECURITY_RISKS_ID, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) - # workflow_body = self.build_vulnerabilities_workflow_body(name=VULNERABILITIES_WORKFLOW_NAME, severities=SEVERITIES_HIGH, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=VULNERABILITIES, cvss=6, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) - # self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) + workflow_body = self.build_vulnerabilities_workflow_body(name=VULNERABILITIES_WORKFLOW_NAME, severities=SEVERITIES_HIGH, siteId=get_env("JIRA_SITE_ID"), projectId=get_env("JIRA_PROJECT_ID"), cluster=self.cluster, namespace=None, category=VULNERABILITIES, cvss=6, issueTypeId=get_env("JIRA_ISSUE_TYPE_ID")) + self.create_and_assert_workflow(workflow_body, EXPECTED_CREATE_RESPONSE, update=False) Logger.logger.info("Stage 2: Validate workflows created successfully") self.validate_workflow(SECURITY_RISKS_WORKFLOW_NAME, JIRA_PROVIDER_NAME) - # self.validate_workflow(VULNERABILITIES_WORKFLOW_NAME, JIRA_PROVIDER_NAME) + self.validate_workflow(VULNERABILITIES_WORKFLOW_NAME, JIRA_PROVIDER_NAME) Logger.logger.info('Stage 3: Apply deployment') workload_objs: list = self.apply_directory(path=self.test_obj["deployments"], namespace=namespace) @@ -68,21 +68,40 @@ def start(self): self.backend.create_kubescape_job_request(cluster_name=self.cluster, framework_list=[self.fw_name]) TestUtil.sleep(NOTIFICATIONS_SVC_DELAY_FIRST_SCAN, "waiting for first scan to be saved in notification service") - Logger.logger.info('Stage 6: Assert jira ticket was created') - r = self.backend.get_security_risks_list(cluster_name=self.cluster, security_risk_ids=[SECURITY_RISKS_ID]) - self.assert_security_risks_jira_ticket_created(response=r) + Logger.logger.info('Stage 6: Assert jira tickets was created') + self.assert_jira_tickets_was_created(self.cluster, SECURITY_RISKS_ID) Logger.logger.info('Stage 7: Cleanup') return self.cleanup() - def cleanup(self, **kwargs): self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) - # self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) - # self.backend.delete_tenant(self.test_tenant_id) + self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) return super().cleanup(**kwargs) + def assert_jira_tickets_was_created(self, cluster_name, security_risk_ids, ): + r = self.backend.get_security_risks_list(cluster_name=cluster_name, security_risk_ids=security_risk_ids) + self.assert_security_risks_jira_ticket_created(response=r) + body = { + "fields": { + "severity": "" + }, + "innerFilters": [ + { + "severity": "High", + "cluster": self.cluster, + "cvssInfo.baseScore": "6|greater", + "isRelevant": "Yes" + } + ], + "countFields": True + } + r2 = self.backend.get_vulns_v2(body=body, expected_results=1, scope=None) + self.assert_vulnerability_jira_ticket_created(response=r2) + + + def post_custom_framework(self, framework_file, cluster_name: str): framework_name, ks_custom_fw = self.create_ks_custom_fw(cluster_name=cluster_name, @@ -110,6 +129,21 @@ def assert_security_risks_jira_ticket_created(self, response): tickets = risk.get("tickets", []) assert len(tickets) > 0, f"No tickets associated with security risk {risk.get('securityRiskID', 'Unknown')}" + def assert_vulnerability_jira_ticket_created(self, response): + try: + response_json = json.loads(response) + except json.JSONDecodeError as e: + raise AssertionError(f"Response is not valid JSON: {e}") + + vulnerabilities = response_json.get("response", []) + assert len(vulnerability) > 0, "No vulnerability found in the response" + + for vulnerability in vulnerabilities: + tickets = vulnerability.get("tickets", []) + assert len(tickets) > 0, f"No tickets associated with security risk {vulnerability.get('securityRiskID', 'Unknown')}" + + + def assert_vulnerability_message_sent(self, messages, cluster): found = 0 diff --git a/tests_scripts/workflows/slack_workflows.py b/tests_scripts/workflows/slack_workflows.py index a0ad606e..da9bc239 100644 --- a/tests_scripts/workflows/slack_workflows.py +++ b/tests_scripts/workflows/slack_workflows.py @@ -52,8 +52,6 @@ def start(self): assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' - # self.active_workflow(self.test_tenant_id) - # self.backend.copy_slack_token(tenantID=self.test_tenant_id) self.cluster, namespace = self.setup(apply_services=False) Logger.logger.info("Stage 1: Post custom framework") @@ -116,7 +114,6 @@ def cleanup(self, **kwargs): self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(COMPLIANCE_WORKFLOW_NAME)) - # self.backend.delete_tenant(self.test_tenant_id) return super().cleanup(**kwargs) diff --git a/tests_scripts/workflows/teams_workflows.py b/tests_scripts/workflows/teams_workflows.py index 5953098e..62805bda 100644 --- a/tests_scripts/workflows/teams_workflows.py +++ b/tests_scripts/workflows/teams_workflows.py @@ -51,7 +51,6 @@ def start(self): assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend' self.cluster, namespace = self.setup(apply_services=False) - # self.active_workflow(self.test_tenant_id) Logger.logger.info("Stage 1: Post custom framework") self.fw_name = "systest-fw-" + self.cluster @@ -114,7 +113,6 @@ def cleanup(self, **kwargs): self.delete_and_assert_workflow(self.return_workflow_guid(SECURITY_RISKS_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(VULNERABILITIES_WORKFLOW_NAME)) self.delete_and_assert_workflow(self.return_workflow_guid(COMPLIANCE_WORKFLOW_NAME)) - # self.backend.delete_tenant(self.test_tenant_id) return super().cleanup(**kwargs) From fb123e2c73c5a6caba545f5c7255214574a3f952 Mon Sep 17 00:00:00 2001 From: jnathangreeg Date: Thu, 28 Nov 2024 15:01:35 +0200 Subject: [PATCH 5/5] change skip_on_environment Signed-off-by: jnathangreeg --- system_test_mapping.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system_test_mapping.json b/system_test_mapping.json index 4db72a4b..2bfc0519 100644 --- a/system_test_mapping.json +++ b/system_test_mapping.json @@ -1379,7 +1379,7 @@ "users-notification-service" ], "description": "testing teams alert channels with compliance and vulnerabilities notifications", - "skip_on_environment": "production,production-us,staging", + "skip_on_environment": "staging", "owner": "" }, "slack_alerts": { @@ -1393,7 +1393,7 @@ "users-notification-service" ], "description": "testing slack alert channels with compliance and vulnerabilities notifications", - "skip_on_environment": "production,production-us,custom,staging", + "skip_on_environment": "staging", "owner": "refaelm@armosec.io" }, "sr_r_0035_attack_chain": {