Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding kdr slack and teams tests #436

Merged
merged 6 commits into from
Aug 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 30 additions & 3 deletions configurations/system/tests_cases/runtime_tests.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
import inspect
from .structures import KubescapeConfiguration

from tests_scripts.runtime.alerts import enrich_slack_alert_notifications, enrich_teams_alert_notifications
from tests_scripts.users_notifications.alert_notifications import enrich_slack_alert_channel, enrich_teams_alert_channel, get_messages_from_slack_channel, get_messages_from_teams_channel
from .structures import KubescapeConfiguration, TestConfiguration
from os.path import join
from systest_utils.statics import DEFAULT_DEPLOYMENT_PATH, DEFAULT_SERVICE_PATH, DEFAULT_CONFIGMAP_PATH
from systest_utils.statics import DEFAULT_DEPLOYMENT_PATH



Expand All @@ -24,4 +27,28 @@ def kdr_runtime_policies_configurations():
name=inspect.currentframe().f_code.co_name,
test_obj=RuntimePoliciesConfigurations,
create_test_tenant=True,
)
)

@staticmethod
def kdr_teams_alerts():
from tests_scripts.runtime.alerts import IncidentsAlerts
return TestConfiguration(
name=inspect.currentframe().f_code.co_name,
test_obj=IncidentsAlerts,
deployments=join(DEFAULT_DEPLOYMENT_PATH, "redis_sleep_long"),
getMessagesFunc=get_messages_from_teams_channel,
enrichAlertChannelFunc=enrich_teams_alert_notifications,

)

@staticmethod
def kdr_slack_alerts():
from tests_scripts.runtime.alerts import IncidentsAlerts
return TestConfiguration(
name=inspect.currentframe().f_code.co_name,
test_obj=IncidentsAlerts,
deployments=join(DEFAULT_DEPLOYMENT_PATH, "redis_sleep_long"),
getMessagesFunc=get_messages_from_slack_channel,
enrichAlertChannelFunc=enrich_slack_alert_notifications,
)

26 changes: 26 additions & 0 deletions system_test_mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -1502,6 +1502,32 @@
"skip_on_environment": "",
"owner": ""
},
"kdr_slack_alerts": {
"target": [
"Backend"
],
"target_repositories": [
"cadashboardbe",
"event-ingester-service",
"config-service"
],
"description": "Test kdr incidents is being sent to slack",
"skip_on_environment": "",
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

R U sure you want them to run on PROD?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For now yes

"owner": "[email protected]"
},
"kdr_teams_alerts": {
"target": [
"Backend"
],
"target_repositories": [
"cadashboardbe",
"event-ingester-service",
"config-service"
],
"description": "Test kdr incidents is being sent to teams",
"skip_on_environment": "",
"owner": "[email protected]"
},
"sr_ac_scan_status": {
"target": [
"In cluster",
Expand Down
4 changes: 4 additions & 0 deletions systest_utils/statics.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,10 @@
# notifications
DEFAULT_NOTIFICATIONS_PATHS = os.path.abspath(os.path.join('configurations', 'notifications'))
DEFAULT_NOTIFICATIONS_DEPLOYMENT_PATH = os.path.join(DEFAULT_NOTIFICATIONS_PATHS, 'deployments')
DEFAULT_NOTIFICATIONS_JOB_PATH = os.path.join(DEFAULT_NOTIFICATIONS_PATHS, 'jobs')

# kdr
DEFAULT_KDR_DEPLOYMENT_PATH = os.path.join(DEFAULT_K8S_PATHS, 'deployments')

DEFAULT_DEPLOY_INTEGRATIONS_PATH = os.path.join(DEFAULT_K8S_PATHS, 'integrations')
DEFAULT_INTEGRATIONS_PATH = os.path.abspath(os.path.join('configurations', 'integrations'))
Expand Down
30 changes: 30 additions & 0 deletions tests_scripts/helm/base_helm.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,24 @@
from tests_scripts.kubernetes.base_k8s import BaseK8S
import signal
import psutil
from kubernetes.dynamic import ResourceField
import json



DEFAULT_BRANCH = "release"

HTTPD_PROXY_CRT_PATH = os.path.join(statics.DEFAULT_HELM_PROXY_PATH, "httpd-proxy.crt")
HTTPD_PROXY_KEY_PATH = os.path.join(statics.DEFAULT_HELM_PROXY_PATH, "httpd-proxy.key")


class ResourceFieldEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ResourceField):
return obj.__dict__
return json.JSONEncoder.default(self, obj)


class BaseHelm(BaseK8S):
def __init__(self, test_obj=None, backend=None, kubernetes_obj=None, test_driver=None, with_proxy=False):
super(BaseHelm, self).__init__(test_driver=test_driver, test_obj=test_obj, backend=backend,
Expand Down Expand Up @@ -183,6 +194,25 @@ def get_posture_frameworks(self, report_guid, framework_name: str = ""):
framework_name=framework_name, report_guid=report_guid)
return c_panel_info


def verify_application_profiles(self, wlids: list, namespace):
Logger.logger.info("Get application profiles")
k8s_data = self.kubernetes_obj.get_dynamic_client("spdx.softwarecomposition.kubescape.io/v1beta1",
"ApplicationProfile").get(namespace=namespace).items
assert k8s_data is not None, "Failed to get application profiles"
assert len(k8s_data) >= len(wlids), f"Failed to get all application profiles {len(k8s_data)}"
Logger.logger.info(f"Application profiles are presented {len(k8s_data)}")
ap_wlids = [i.metadata.annotations['kubescape.io/wlid'] for i in k8s_data]
for i in wlids:
assert i in ap_wlids, f"Failed to get application profile for {i}"
# kubescape.io/status: completed, kubescape.io/completion: complete
# i.metadata.annotations['kubescape.io/completion'] != 'complete' or
not_complete_application_profiles = [i for i in k8s_data if
i.metadata.annotations['kubescape.io/status'] != 'completed']

assert len(
not_complete_application_profiles) == 0, f"Application profiles are not complete {json.dumps([i.metadata for i in not_complete_application_profiles], cls=ResourceFieldEncoder)}"

# ---------------------- helm ------------------------
@staticmethod
def add_and_upgrade_armo_to_repo():
Expand Down
176 changes: 176 additions & 0 deletions tests_scripts/runtime/alerts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@


import json
from random import random
import time
from configurations.system.tests_cases.structures import TestConfiguration
from infrastructure.kubectl_wrapper import KubectlWrapper
from systest_utils.systests_utilities import TestUtil
from systest_utils.tests_logger import Logger
from tests_scripts.base_test import BaseTest
from tests_scripts.runtime.policies import POLICY_CREATED_RESPONSE, RuntimePoliciesConfigurations
from tests_scripts.users_notifications.alert_notifications import TEST_NAMESPACE, AlertNotifications, get_env


class IncidentsAlerts(AlertNotifications, RuntimePoliciesConfigurations):
def __init__(self, test_obj: TestConfiguration = None, backend=None, test_driver=None):
super(IncidentsAlerts, self).__init__(test_obj=test_obj, backend=backend, test_driver=test_driver)

self.helm_kwargs = {
"capabilities.configurationScan": "disable",
"capabilities.continuousScan": "disable",
"capabilities.nodeScan": "disable",
"capabilities.vulnerabilityScan": "disable",
"grypeOfflineDB.enabled": "false",
# not clear why
"capabilities.relevancy": "enable",
# enable application profile, malware and runtime detection
"capabilities.runtimeObservability": "enable",
"capabilities.malwareDetection": "enable",
"capabilities.runtimeDetection": "enable",
"capabilities.nodeProfileService": "enable",
"alertCRD.installDefault": True,
"alertCRD.scopeClustered": True,
# short learning period
"nodeAgent.config.maxLearningPeriod": "60s",
"nodeAgent.config.learningPeriod": "50s",
"nodeAgent.config.updatePeriod": "30s",
"nodeAgent.config.nodeProfileInterval": "1m",
# "nodeAgent.image.repository": "docker.io/amitschendel/node-agent",
# "nodeAgent.image.tag": "v0.0.5",
}
test_helm_kwargs = self.test_obj.get_arg("helm_kwargs")
if test_helm_kwargs:
self.helm_kwargs.update(test_helm_kwargs)

self.fw_name = None
self.cluster = None
self.wait_for_agg_to_end = False
self.test_policy_guids = []

def start(self):
"""
agenda:
1. get runtime incidents rulesets
2. enrich the new runtime policy with alert notifications
3. create new runtime policy
4. Install kubescape
5. apply the deployment that will generate the incident
6. wait for the runtime incidents to be generated
7. verify messages were sent
"""
assert self.backend is not None, f'the test {self.test_driver.test_name} must run with backend'


self.cluster, namespace = self.setup(apply_services=False)

before_test_message_ts = time.time()


Logger.logger.info("1. get runtime incidents rulesets")
res = self.backend.get_runtime_incidents_rulesets()
incident_rulesets = json.loads(res.text)

incident_rulesets_guids = [rule["guid"] for rule in incident_rulesets["response"] if rule["name"] == "Anomaly"]


# Update the name field
new_runtime_policy_body = {
"name": f"Malware-new-systest-" + self.cluster,
"description": "Default Malware RuleSet System Test",
"enabled": True,
"scope": {},
"ruleSetType": "Managed",
"managedRuleSetIDs": incident_rulesets_guids,
"notifications": [],
"actions": []
}


Logger.logger.info("2. enrich the new runtime policy with alert notifications")
self.test_obj["enrichAlertChannelFunc"](new_runtime_policy_body)


Logger.logger.info("3. create new runtime policy")
new_policy_guid = self.validate_new_policy(new_runtime_policy_body)
self.test_policy_guids.append(new_policy_guid)


Logger.logger.info('4. Install kubescape')
self.install_kubescape(helm_kwargs=self.helm_kwargs)

Logger.logger.info('5. apply the deployment that will generate the incident')
workload_objs: list = self.apply_directory(path=self.test_obj["deployments"], namespace=namespace)
self.verify_all_pods_are_running(namespace=namespace, workload=workload_objs, timeout=240)

wlids = self.get_wlid(workload=workload_objs, namespace=namespace, cluster=self.cluster)
if isinstance(wlids, str):
wlids = [wlids]

Logger.logger.info('6. wait for the runtime incidents to be generated')
self.wait_for_report(self.verify_running_pods, sleep_interval=5, timeout=180, namespace=namespace)

Logger.logger.info(
f'workloads are running, waiting for application profile finalizing before exec into pod {wlids}')
self.wait_for_report(self.verify_application_profiles, wlids=wlids, namespace=namespace)
time.sleep(6)
self.exec_pod(wlid=wlids[0], command="ls -l /tmp")


Logger.logger.info('7. verify messages were sent')
res = self.wait_for_report(self.assert_all_messages_sent, begin_time=before_test_message_ts, cluster=self.cluster)
return self.cleanup()

def cleanup(self):
for policy_guid in self.test_policy_guids:
body = {
"innerFilters": [
{
"guid": policy_guid,
}
]
}
self.backend.delete_runtime_policies(body)
return super().cleanup()


def assert_all_messages_sent(self, begin_time, cluster):
messages = self.test_obj["getMessagesFunc"](begin_time)
found = str(messages).count(cluster)
assert found > 0, f"expected to have at least 1 message, found {found}"
assert_runtime_incident_message_sent(messages, cluster)



def assert_runtime_incident_message_sent(messages, cluster):
found = 0
Logger.logger.info(f"total messages found: {len(messages)}, looking for runtime incident messages")
if len(messages) > 0:
Logger.logger.info(f"first message: {messages[0]}")

for message in messages:
message_string = str(message)
if "New threat found" in message_string and cluster in message_string and "redis" in message_string:
found += 1
assert found > 0, "expected to have at least one runtime incident message"


def enrich_teams_alert_notifications(data):
data["notifications"] =[
{
"provider": "teams",
"teamsWebhookURL" : get_env("CHANNEL_WEBHOOK")
}
]


def enrich_slack_alert_notifications(data):
data["notifications"] = [
{
"provider": "slack",
"slackChannel": {
"channelID": get_env("SLACK_CHANNEL_ID"),
"channelName": "dev-system-tests"
}
}
]
24 changes: 0 additions & 24 deletions tests_scripts/runtime/incidents.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import json
import time

from kubernetes.dynamic import ResourceField

from configurations.system.tests_cases.structures import TestConfiguration
from systest_utils import statics, Logger
Expand All @@ -11,11 +10,6 @@
__RESPONSE_FIELD__ = "response"


class ResourceFieldEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, ResourceField):
return obj.__dict__
return json.JSONEncoder.default(self, obj)


class Incidents(BaseHelm):
Expand Down Expand Up @@ -347,23 +341,5 @@ def verify_incident_in_backend_list(self, cluster, namespace, incident_name):
assert len(incs) > 0, f"Failed to get incidents list {json.dumps(incs)}"
return incs

def verify_application_profiles(self, wlids: list, namespace):
Logger.logger.info("Get application profiles")
k8s_data = self.kubernetes_obj.get_dynamic_client("spdx.softwarecomposition.kubescape.io/v1beta1",
"ApplicationProfile").get(namespace=namespace).items
assert k8s_data is not None, "Failed to get application profiles"
assert len(k8s_data) >= len(wlids), f"Failed to get all application profiles {len(k8s_data)}"
Logger.logger.info(f"Application profiles are presented {len(k8s_data)}")
ap_wlids = [i.metadata.annotations['kubescape.io/wlid'] for i in k8s_data]
for i in wlids:
assert i in ap_wlids, f"Failed to get application profile for {i}"
# kubescape.io/status: completed, kubescape.io/completion: complete
# i.metadata.annotations['kubescape.io/completion'] != 'complete' or
not_complete_application_profiles = [i for i in k8s_data if
i.metadata.annotations['kubescape.io/status'] != 'completed']

assert len(
not_complete_application_profiles) == 0, f"Application profiles are not complete {json.dumps([i.metadata for i in not_complete_application_profiles], cls=ResourceFieldEncoder)}"

def cleanup(self, **kwargs):
return super().cleanup(**kwargs)
4 changes: 2 additions & 2 deletions tests_scripts/runtime/policies.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,8 @@ def validate_new_policy(self, body):

res = self.backend.get_runtime_policies_list(new_generated_runtime_policy_body)
incident_policies = json.loads(res.text)["response"]
props_to_check = ["name", "scope", "ruleSetType", "managedRuleSetIDs", "notifications", "actions"]
assert len(incident_policies) == 1, f"failed to get new runtime policy, expected 1 but got {len(incident_policies)}, got result {incident_policies}"
props_to_check = ["name", "scope", "ruleSetType", "managedRuleSetIDs", "actions"]
assert len(incident_policies) > 0, f"failed to get new runtime policy, expected more than 1 but got {len(incident_policies)}, got result {incident_policies}"

for prop in props_to_check:
assert incident_policies[0][prop] == body[prop], f"failed to get new runtime policy, expected '{prop}' {body[prop]} but got {incident_policies[0][prop]}, got result {incident_policies}"
Expand Down
Loading
Loading