From 0325e2af02302f7f3d823b55bfc1c0121c1ba67c Mon Sep 17 00:00:00 2001 From: Rares Mardare Date: Wed, 17 Jul 2024 12:16:27 +0300 Subject: [PATCH 01/24] Label error handling tweaks (#4687) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # What this PR does These changes make it so that we no longer show an error notification when you first add an empty pair `key/value` due to the undefined key. Other than that, the handling remains the same, with some refactorings as we don't need the try catch clauses. Would be nice in the future to find a way not to duplicate the usage of those 2 functions that fetch the key and the value and share them somehow 🤔 --- .../ColumnsSelectorWrapper/ColumnsModal.tsx | 3 +- .../ColumnsSelectorWrapper.tsx | 3 +- .../IntegrationLabelsForm.tsx | 28 ++++--------------- .../src/containers/Labels/Labels.tsx | 26 ++++------------- .../RouteLabelsDisplay/RouteLabelsDisplay.tsx | 21 ++++---------- .../src/models/alertgroup/alertgroup.ts | 4 +-- grafana-plugin/src/models/label/label.ts | 3 ++ grafana-plugin/src/utils/consts.ts | 1 + 8 files changed, 27 insertions(+), 62 deletions(-) diff --git a/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsModal.tsx b/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsModal.tsx index 7ca7d07ddc..dead50d3c4 100644 --- a/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsModal.tsx +++ b/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsModal.tsx @@ -26,6 +26,7 @@ import { ApiSchemas } from 'network/oncall-api/api.types'; import { components } from 'network/oncall-api/autogenerated-api.types'; import { useStore } from 'state/useStore'; import { UserActions } from 'utils/authorization/authorization'; +import { PROCESSING_REQUEST_ERROR } from 'utils/consts'; import { WrapWithGlobalNotification } from 'utils/decorators'; import { useDebouncedCallback, useIsLoading } from 'utils/hooks'; import { pluralize } from 'utils/utils'; @@ -142,7 +143,7 @@ export const ColumnsModal: React.FC = observer( variant="primary" onClick={WrapWithGlobalNotification(onAddNewColumns, { success: 'New column has been added to the list.', - failure: 'There was an error processing your request. Please try again.', + failure: PROCESSING_REQUEST_ERROR, })} > {isLoading ? : 'Add'} diff --git a/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsSelectorWrapper.tsx b/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsSelectorWrapper.tsx index fe7ccc9d1a..eb346615b9 100644 --- a/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsSelectorWrapper.tsx +++ b/grafana-plugin/src/containers/ColumnsSelectorWrapper/ColumnsSelectorWrapper.tsx @@ -13,6 +13,7 @@ import { ActionKey } from 'models/loader/action-keys'; import { ApiSchemas } from 'network/oncall-api/api.types'; import { useStore } from 'state/useStore'; import { UserActions } from 'utils/authorization/authorization'; +import { PROCESSING_REQUEST_ERROR } from 'utils/consts'; import { WrapAutoLoadingState, WrapWithGlobalNotification } from 'utils/decorators'; import { useIsLoading } from 'utils/hooks'; @@ -81,7 +82,7 @@ export const ColumnsSelectorWrapper: React.FC = obs onClick={WrapAutoLoadingState( WrapWithGlobalNotification(onColumnRemovalClick, { success: 'Column has been removed from the list.', - failure: 'There was an error processing your request. Please try again', + failure: PROCESSING_REQUEST_ERROR, }), ActionKey.REMOVE_COLUMN_FROM_ALERT_GROUP )} diff --git a/grafana-plugin/src/containers/IntegrationLabelsForm/IntegrationLabelsForm.tsx b/grafana-plugin/src/containers/IntegrationLabelsForm/IntegrationLabelsForm.tsx index bee1cb6227..e5dd7b26e0 100644 --- a/grafana-plugin/src/containers/IntegrationLabelsForm/IntegrationLabelsForm.tsx +++ b/grafana-plugin/src/containers/IntegrationLabelsForm/IntegrationLabelsForm.tsx @@ -284,32 +284,16 @@ const CustomLabels = (props: CustomLabelsProps) => { }; const onLoadKeys = async (search?: string) => { - let result = undefined; - - try { - result = await labelsStore.loadKeys(search); - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); - } - - const groups = splitToGroups(result); - - return groups; + const result = await labelsStore.loadKeys(search); + return splitToGroups(result); }; const onLoadValuesForKey = async (key: string, search?: string) => { - let result = undefined; - - try { - const { values } = await labelsStore.loadValuesForKey(key, search); - result = values; - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); + if (!key) { + return []; } - - const groups = splitToGroups(result); - - return groups; + const { values } = await labelsStore.loadValuesForKey(key, search); + return splitToGroups(values); }; return ( diff --git a/grafana-plugin/src/containers/Labels/Labels.tsx b/grafana-plugin/src/containers/Labels/Labels.tsx index d7a76ff812..02625f461b 100644 --- a/grafana-plugin/src/containers/Labels/Labels.tsx +++ b/grafana-plugin/src/containers/Labels/Labels.tsx @@ -49,30 +49,16 @@ const _Labels = observer( ); const onLoadKeys = async (search?: string) => { - let result = undefined; - try { - result = await labelsStore.loadKeys(search); - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); - } - - const groups = splitToGroups(result); - - return groups; + const result = await labelsStore.loadKeys(search); + return splitToGroups(result); }; const onLoadValuesForKey = async (key: string, search?: string) => { - let result = undefined; - try { - const { values } = await labelsStore.loadValuesForKey(key, search); - result = values; - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); + if (!key) { + return []; } - - const groups = splitToGroups(result); - - return groups; + const { values } = await labelsStore.loadValuesForKey(key, search); + return splitToGroups(values); }; const isValid = () => { diff --git a/grafana-plugin/src/containers/RouteLabelsDisplay/RouteLabelsDisplay.tsx b/grafana-plugin/src/containers/RouteLabelsDisplay/RouteLabelsDisplay.tsx index 17743e8ea9..cd3c64fcd5 100644 --- a/grafana-plugin/src/containers/RouteLabelsDisplay/RouteLabelsDisplay.tsx +++ b/grafana-plugin/src/containers/RouteLabelsDisplay/RouteLabelsDisplay.tsx @@ -32,28 +32,17 @@ export const RouteLabelsDisplay: React.FC = ({ labels, }; const onLoadKeys = async (search?: string) => { - let result = undefined; - - try { - result = await labelsStore.loadKeys(search); - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); - } - + const result = await labelsStore.loadKeys(search); return splitToGroups(result); }; const onLoadValuesForKey = async (key: string, search?: string) => { - let result = undefined; - - try { - const { values } = await labelsStore.loadValuesForKey(key, search); - result = values; - } catch (error) { - openErrorNotification('There was an error processing your request. Please try again'); + if (!key) { + return []; } - return splitToGroups(result); + const { values } = await labelsStore.loadValuesForKey(key, search); + return splitToGroups(values); }; return ( diff --git a/grafana-plugin/src/models/alertgroup/alertgroup.ts b/grafana-plugin/src/models/alertgroup/alertgroup.ts index aefde6e6d5..ca4c11c52c 100644 --- a/grafana-plugin/src/models/alertgroup/alertgroup.ts +++ b/grafana-plugin/src/models/alertgroup/alertgroup.ts @@ -9,7 +9,7 @@ import { onCallApi } from 'network/oncall-api/http-client'; import { RootStore } from 'state/rootStore'; import { SelectOption } from 'state/types'; import { LocationHelper } from 'utils/LocationHelper'; -import { GENERIC_ERROR, PAGE } from 'utils/consts'; +import { GENERIC_ERROR, PAGE, PROCESSING_REQUEST_ERROR } from 'utils/consts'; import { AutoLoadingState, WithGlobalNotification } from 'utils/decorators'; import { AlertGroupHelper } from './alertgroup.helpers'; @@ -136,7 +136,7 @@ export class AlertGroupStore { @AutoLoadingState(ActionKey.REMOVE_COLUMN_FROM_ALERT_GROUP) @WithGlobalNotification({ success: 'Column has been removed from the list.', - failure: 'There was an error processing your request. Please try again', + failure: PROCESSING_REQUEST_ERROR, }) async removeTableColumn( columnToBeRemoved: AlertGroupColumn, diff --git a/grafana-plugin/src/models/label/label.ts b/grafana-plugin/src/models/label/label.ts index 0ca89ce35d..7559632ac8 100644 --- a/grafana-plugin/src/models/label/label.ts +++ b/grafana-plugin/src/models/label/label.ts @@ -6,6 +6,7 @@ import { ApiSchemas } from 'network/oncall-api/api.types'; import { components } from 'network/oncall-api/autogenerated-api.types'; import { onCallApi } from 'network/oncall-api/http-client'; import { RootStore } from 'state/rootStore'; +import { PROCESSING_REQUEST_ERROR } from 'utils/consts'; import { WithGlobalNotification } from 'utils/decorators'; export class LabelStore extends BaseStore { @@ -17,6 +18,7 @@ export class LabelStore extends BaseStore { this.path = '/labels/'; } + @WithGlobalNotification({ failure: PROCESSING_REQUEST_ERROR }) @action.bound public async loadKeys(search = ''): Promise> { const { data } = await onCallApi().GET('/labels/keys/', undefined); @@ -26,6 +28,7 @@ export class LabelStore extends BaseStore { return filtered; } + @WithGlobalNotification({ failure: PROCESSING_REQUEST_ERROR }) @action.bound async loadValuesForKey( key: ApiSchemas['LabelKey']['id'], diff --git a/grafana-plugin/src/utils/consts.ts b/grafana-plugin/src/utils/consts.ts index a091378e28..99b6a19a8b 100644 --- a/grafana-plugin/src/utils/consts.ts +++ b/grafana-plugin/src/utils/consts.ts @@ -97,5 +97,6 @@ export enum OnCallAGStatus { } export const GENERIC_ERROR = 'An error has occurred. Please try again'; +export const PROCESSING_REQUEST_ERROR = 'There was an error processing your request. Please try again'; export const INTEGRATION_SERVICENOW = 'servicenow'; From 65a794af0ab0bce5a41aa6b179eb711fbdabf1f0 Mon Sep 17 00:00:00 2001 From: Vadim Stepanov Date: Wed, 17 Jul 2024 14:44:16 +0100 Subject: [PATCH 02/24] PR template: "Closes" -> "Related to" (#4693) Make it so that you need to explicitly use the "Closes" keyword when opening PRs (as per [this sprint retro comment](https://miro.com/app/board/uXjVKzhhfgE=/?moveToWidget=3458764594901560846&cot=14)) --- .github/pull_request_template.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4424f0123b..717dc48e72 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -2,10 +2,11 @@ ## Which issue(s) this PR closes -Closes [issue link here] +Related to [issue link here] From 49d1127698409abeb7458f32c9093ed12f0522dd Mon Sep 17 00:00:00 2001 From: Yulya Artyukhina Date: Wed, 17 Jul 2024 19:00:26 +0200 Subject: [PATCH 03/24] Fix `send_bundled_notification` task (#4696) # What this PR does Fix scheduling `perform_notification` from `send_bundled_notification` task - leftover after resolving merge conflict ## Checklist - [ ] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- engine/apps/alerts/tasks/notify_user.py | 1 + 1 file changed, 1 insertion(+) diff --git a/engine/apps/alerts/tasks/notify_user.py b/engine/apps/alerts/tasks/notify_user.py index bbcec36d0b..8258da1956 100644 --- a/engine/apps/alerts/tasks/notify_user.py +++ b/engine/apps/alerts/tasks/notify_user.py @@ -626,6 +626,7 @@ def send_bundled_notification(user_notification_bundle_id: int): schedule_perform_notification_task, log_record_notification_triggered.pk, log_record_notification_triggered.alert_group_id, + False, ) ) notifications.delete() From 4e03732fb108e21450b9e9415ba565d31b3e49ff Mon Sep 17 00:00:00 2001 From: Yulya Artyukhina Date: Wed, 17 Jul 2024 19:01:04 +0200 Subject: [PATCH 04/24] Drop stale metrics cache (#4695) # What this PR does based on this PR - https://github.com/grafana/oncall/pull/4517 ## Which issue(s) this PR closes Related to https://github.com/grafana/oncall/issues/4455 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- .../metrics_exporter/metrics_collectors.py | 10 ++++ .../apps/metrics_exporter/tests/conftest.py | 54 +++++++++++++++++++ .../tests/test_metrics_collectors.py | 37 +++++++++++++ 3 files changed, 101 insertions(+) diff --git a/engine/apps/metrics_exporter/metrics_collectors.py b/engine/apps/metrics_exporter/metrics_collectors.py index 282e57180d..720cdea9c6 100644 --- a/engine/apps/metrics_exporter/metrics_collectors.py +++ b/engine/apps/metrics_exporter/metrics_collectors.py @@ -1,3 +1,4 @@ +import logging import re import typing @@ -28,6 +29,7 @@ application_metrics_registry = CollectorRegistry() +logger = logging.getLogger(__name__) # _RE_BASE_PATTERN allows for optional curly-brackets around the metric name as in some cases this may occur # see common.cache.ensure_cache_key_allocates_to_the_same_hash_slot for more details regarding this @@ -92,6 +94,10 @@ def _get_alert_groups_total_metric(self, org_ids): ) for org_key, ag_states in org_ag_states.items(): for _, integration_data in ag_states.items(): + if "services" not in integration_data: + logger.warning(f"Deleting stale metrics cache for {org_key}") + cache.delete(org_key) + break # Labels values should have the same order as _integration_labels_with_state labels_values = [ integration_data["integration_name"], # integration @@ -125,6 +131,10 @@ def _get_response_time_metric(self, org_ids): ) for org_key, ag_response_time in org_ag_response_times.items(): for _, integration_data in ag_response_time.items(): + if "services" not in integration_data: + logger.warning(f"Deleting stale metrics cache for {org_key}") + cache.delete(org_key) + break # Labels values should have the same order as _integration_labels labels_values = [ integration_data["integration_name"], # integration diff --git a/engine/apps/metrics_exporter/tests/conftest.py b/engine/apps/metrics_exporter/tests/conftest.py index 07dda37d49..b3204bce79 100644 --- a/engine/apps/metrics_exporter/tests/conftest.py +++ b/engine/apps/metrics_exporter/tests/conftest.py @@ -113,6 +113,60 @@ def _mock_cache_get_many(keys, *args, **kwargs): monkeypatch.setattr(cache, "get_many", _mock_cache_get_many) +@pytest.fixture() +def mock_cache_get_old_metrics_for_collector(monkeypatch): + def _mock_cache_get(key, *args, **kwargs): + if ALERT_GROUPS_TOTAL in key: + key = ALERT_GROUPS_TOTAL + elif ALERT_GROUPS_RESPONSE_TIME in key: + key = ALERT_GROUPS_RESPONSE_TIME + elif USER_WAS_NOTIFIED_OF_ALERT_GROUPS in key: + key = USER_WAS_NOTIFIED_OF_ALERT_GROUPS + test_metrics = { + ALERT_GROUPS_TOTAL: { + 1: { + "integration_name": "Test metrics integration", + "team_name": "Test team", + "team_id": 1, + "org_id": 1, + "slug": "Test stack", + "id": 1, + "firing": 2, + "silenced": 4, + "acknowledged": 3, + "resolved": 5, + }, + }, + ALERT_GROUPS_RESPONSE_TIME: { + 1: { + "integration_name": "Test metrics integration", + "team_name": "Test team", + "team_id": 1, + "org_id": 1, + "slug": "Test stack", + "id": 1, + "response_time": [2, 10, 200, 650], + }, + }, + USER_WAS_NOTIFIED_OF_ALERT_GROUPS: { + 1: { + "org_id": 1, + "slug": "Test stack", + "id": 1, + "user_username": "Alex", + "counter": 4, + } + }, + } + return test_metrics.get(key) + + def _mock_cache_get_many(keys, *args, **kwargs): + return {key: _mock_cache_get(key) for key in keys if _mock_cache_get(key)} + + monkeypatch.setattr(cache, "get", _mock_cache_get) + monkeypatch.setattr(cache, "get_many", _mock_cache_get_many) + + @pytest.fixture() def mock_get_metrics_cache(monkeypatch): def _mock_cache_get(key, *args, **kwargs): diff --git a/engine/apps/metrics_exporter/tests/test_metrics_collectors.py b/engine/apps/metrics_exporter/tests/test_metrics_collectors.py index 24b54a5c04..640ac57bc7 100644 --- a/engine/apps/metrics_exporter/tests/test_metrics_collectors.py +++ b/engine/apps/metrics_exporter/tests/test_metrics_collectors.py @@ -1,6 +1,7 @@ from unittest.mock import patch import pytest +from django.core.cache import cache from django.test import override_settings from prometheus_client import CollectorRegistry, generate_latest @@ -11,6 +12,7 @@ NO_SERVICE_VALUE, USER_WAS_NOTIFIED_OF_ALERT_GROUPS, ) +from apps.metrics_exporter.helpers import get_metric_alert_groups_response_time_key, get_metric_alert_groups_total_key from apps.metrics_exporter.metrics_collectors import ApplicationMetricsCollector from apps.metrics_exporter.tests.conftest import METRICS_TEST_SERVICE_NAME @@ -75,3 +77,38 @@ def get_expected_labels(service_name=NO_SERVICE_VALUE, **kwargs): # Since there is no recalculation timer for test org in cache, start_calculate_and_cache_metrics must be called assert mocked_start_calculate_and_cache_metrics.called test_metrics_registry.unregister(collector) + + +@patch("apps.metrics_exporter.metrics_collectors.get_organization_ids", return_value=[1]) +@patch("apps.metrics_exporter.metrics_collectors.start_calculate_and_cache_metrics.apply_async") +@pytest.mark.django_db +def test_application_metrics_collector_with_old_metrics_without_services( + mocked_org_ids, mocked_start_calculate_and_cache_metrics, mock_cache_get_old_metrics_for_collector +): + """Test that ApplicationMetricsCollector generates expected metrics from cache""" + + org_id = 1 + collector = ApplicationMetricsCollector() + test_metrics_registry = CollectorRegistry() + test_metrics_registry.register(collector) + for metric in test_metrics_registry.collect(): + if metric.name == ALERT_GROUPS_TOTAL: + alert_groups_total_metrics_cache = cache.get(get_metric_alert_groups_total_key(org_id)) + assert alert_groups_total_metrics_cache and "services" not in alert_groups_total_metrics_cache[1] + assert len(metric.samples) == 0 + elif metric.name == ALERT_GROUPS_RESPONSE_TIME: + alert_groups_response_time_metrics_cache = cache.get(get_metric_alert_groups_response_time_key(org_id)) + assert ( + alert_groups_response_time_metrics_cache + and "services" not in alert_groups_response_time_metrics_cache[1] + ) + assert len(metric.samples) == 0 + elif metric.name == USER_WAS_NOTIFIED_OF_ALERT_GROUPS: + # metric with labels for each notified user + assert len(metric.samples) == 1 + result = generate_latest(test_metrics_registry).decode("utf-8") + assert result is not None + assert mocked_org_ids.called + # Since there is no recalculation timer for test org in cache, start_calculate_and_cache_metrics must be called + assert mocked_start_calculate_and_cache_metrics.called + test_metrics_registry.unregister(collector) From dd84aa15c6c9209da87f61c7901eaf46150a3082 Mon Sep 17 00:00:00 2001 From: Innokentii Konstantinov Date: Thu, 18 Jul 2024 13:14:52 +0800 Subject: [PATCH 05/24] Add missing app_type argument to uninstall request (#4700) --- engine/apps/chatops_proxy/client.py | 8 +++----- .../chatops_proxy/register_oncall_tenant.py | 4 ++-- engine/apps/chatops_proxy/utils.py | 20 ++++++++++--------- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/engine/apps/chatops_proxy/client.py b/engine/apps/chatops_proxy/client.py index 1a0b63bcb1..1c78a21312 100644 --- a/engine/apps/chatops_proxy/client.py +++ b/engine/apps/chatops_proxy/client.py @@ -6,7 +6,7 @@ import requests from django.conf import settings -SERVICE_TYPE_ONCALL = "oncall" +APP_TYPE_ONCALL = "oncall" PROVIDER_TYPE_SLACK = "slack" @@ -172,16 +172,14 @@ def get_oauth_installation( return OAuthInstallation(**response.json()["oauth_installation"]), response def delete_oauth_installation( - self, - stack_id: int, - provider_type: str, - grafana_user_id: int, + self, stack_id: int, provider_type: str, grafana_user_id: int, app_type: str ) -> tuple[bool, requests.models.Response]: url = f"{self.api_base_url}/oauth_installations/uninstall" d = { "stack_id": stack_id, "provider_type": provider_type, "grafana_user_id": grafana_user_id, + "app_type": app_type, } response = requests.post(url=url, json=d, headers=self._headers) self._check_response(response) diff --git a/engine/apps/chatops_proxy/register_oncall_tenant.py b/engine/apps/chatops_proxy/register_oncall_tenant.py index 5aaa45c414..c0daeb5641 100644 --- a/engine/apps/chatops_proxy/register_oncall_tenant.py +++ b/engine/apps/chatops_proxy/register_oncall_tenant.py @@ -1,7 +1,7 @@ # register_oncall_tenant moved to separate file from engine/apps/chatops_proxy/utils.py to avoid circular imports. from django.conf import settings -from apps.chatops_proxy.client import SERVICE_TYPE_ONCALL, ChatopsProxyAPIClient +from apps.chatops_proxy.client import APP_TYPE_ONCALL, ChatopsProxyAPIClient def register_oncall_tenant(org): @@ -12,7 +12,7 @@ def register_oncall_tenant(org): client.register_tenant( str(org.uuid), settings.ONCALL_BACKEND_REGION, - SERVICE_TYPE_ONCALL, + APP_TYPE_ONCALL, org.stack_id, org.stack_slug, ) diff --git a/engine/apps/chatops_proxy/utils.py b/engine/apps/chatops_proxy/utils.py index 06e6f14ec3..a106297787 100644 --- a/engine/apps/chatops_proxy/utils.py +++ b/engine/apps/chatops_proxy/utils.py @@ -6,7 +6,7 @@ from django.conf import settings -from .client import PROVIDER_TYPE_SLACK, SERVICE_TYPE_ONCALL, ChatopsProxyAPIClient, ChatopsProxyAPIException +from .client import APP_TYPE_ONCALL, PROVIDER_TYPE_SLACK, ChatopsProxyAPIClient, ChatopsProxyAPIException from .register_oncall_tenant import register_oncall_tenant from .tasks import ( link_slack_team_async, @@ -30,7 +30,7 @@ def get_installation_link_from_chatops_proxy(user) -> typing.Optional[str]: org.stack_id, user.user_id, org.web_link, - SERVICE_TYPE_ONCALL, + APP_TYPE_ONCALL, ) return link except ChatopsProxyAPIException as api_exc: @@ -63,7 +63,7 @@ def register_oncall_tenant_with_async_fallback(org): kwargs={ "service_tenant_id": str(org.uuid), "cluster_slug": settings.ONCALL_BACKEND_REGION, - "service_type": SERVICE_TYPE_ONCALL, + "service_type": APP_TYPE_ONCALL, "stack_id": org.stack_id, "stack_slug": org.stack_slug, "org_id": org.id, @@ -80,7 +80,7 @@ def unregister_oncall_tenant(service_tenant_id: str, cluster_slug: str): kwargs={ "service_tenant_id": service_tenant_id, "cluster_slug": cluster_slug, - "service_type": SERVICE_TYPE_ONCALL, + "service_type": APP_TYPE_ONCALL, }, countdown=2, ) @@ -97,7 +97,7 @@ def can_link_slack_team( """ client = ChatopsProxyAPIClient(settings.ONCALL_GATEWAY_URL, settings.ONCALL_GATEWAY_API_TOKEN) try: - response = client.can_slack_link(service_tenant_id, cluster_slug, slack_team_id, SERVICE_TYPE_ONCALL) + response = client.can_slack_link(service_tenant_id, cluster_slug, slack_team_id, APP_TYPE_ONCALL) return response.status_code == 200 except Exception as e: logger.error( @@ -111,7 +111,7 @@ def can_link_slack_team( def link_slack_team(service_tenant_id: str, slack_team_id: str): client = ChatopsProxyAPIClient(settings.ONCALL_GATEWAY_URL, settings.ONCALL_GATEWAY_API_TOKEN) try: - client.link_slack_team(service_tenant_id, slack_team_id, SERVICE_TYPE_ONCALL) + client.link_slack_team(service_tenant_id, slack_team_id, APP_TYPE_ONCALL) except Exception as e: logger.error( f'msg="Failed to link slack team: {e}"' @@ -121,7 +121,7 @@ def link_slack_team(service_tenant_id: str, slack_team_id: str): kwargs={ "service_tenant_id": service_tenant_id, "slack_team_id": slack_team_id, - "service_type": SERVICE_TYPE_ONCALL, + "service_type": APP_TYPE_ONCALL, }, countdown=2, ) @@ -132,7 +132,7 @@ def unlink_slack_team(service_tenant_id: str, slack_team_id: str): kwargs={ "service_tenant_id": service_tenant_id, "slack_team_id": slack_team_id, - "service_type": SERVICE_TYPE_ONCALL, + "service_type": APP_TYPE_ONCALL, } ) @@ -144,7 +144,9 @@ def uninstall_slack(stack_id: int, grafana_user_id: int) -> bool: """ client = ChatopsProxyAPIClient(settings.ONCALL_GATEWAY_URL, settings.ONCALL_GATEWAY_API_TOKEN) try: - removed, response = client.delete_oauth_installation(stack_id, PROVIDER_TYPE_SLACK, grafana_user_id) + removed, response = client.delete_oauth_installation( + stack_id, PROVIDER_TYPE_SLACK, grafana_user_id, APP_TYPE_ONCALL + ) except ChatopsProxyAPIException as api_exc: if api_exc.status == 404: return True From 7777a6003a13b0a7ffe398863a302c20865c72c9 Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Thu, 18 Jul 2024 11:15:48 -0300 Subject: [PATCH 06/24] Handle multiple unpage event logs when getting paged users (#4698) Related to https://github.com/grafana/oncall-private/issues/2816 --- engine/apps/alerts/models/alert_group.py | 6 ++++-- engine/apps/alerts/tests/test_alert_group.py | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/engine/apps/alerts/models/alert_group.py b/engine/apps/alerts/models/alert_group.py index 8eea0c0e23..3429361cd4 100644 --- a/engine/apps/alerts/models/alert_group.py +++ b/engine/apps/alerts/models/alert_group.py @@ -554,7 +554,7 @@ def get_paged_users(self) -> typing.List[PagedUser]: log_records = self.log_records.filter( type__in=(AlertGroupLogRecord.TYPE_DIRECT_PAGING, AlertGroupLogRecord.TYPE_UNPAGE_USER) - ) + ).order_by("created_at") for log_record in log_records: # filter paging events, track still active escalations @@ -592,7 +592,9 @@ def get_paged_users(self) -> typing.List[PagedUser]: } else: # user was unpaged at some point, remove them - del users[user_id] + # there could be multiple unpage log records if API was hit several times + if user_id in users: + del users[user_id] return list(users.values()) diff --git a/engine/apps/alerts/tests/test_alert_group.py b/engine/apps/alerts/tests/test_alert_group.py index caecaa8e6d..ae2aeceedd 100644 --- a/engine/apps/alerts/tests/test_alert_group.py +++ b/engine/apps/alerts/tests/test_alert_group.py @@ -589,6 +589,24 @@ def _make_log_record(alert_group, user, log_type, important=False): assert len(paged_users) == 1 assert alert_group.get_paged_users()[0]["pk"] == user.public_primary_key + # user was paged and then paged again, then unpaged - they should not show up + alert_group = make_alert_group(alert_receive_channel) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER) + + paged_users = alert_group.get_paged_users() + assert len(paged_users) == 0 + + # adding extra unpage events should not break things + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_UNPAGE_USER) + _make_log_record(alert_group, user, AlertGroupLogRecord.TYPE_DIRECT_PAGING) + + paged_users = alert_group.get_paged_users() + assert len(paged_users) == 1 + assert alert_group.get_paged_users()[0]["pk"] == user.public_primary_key + @patch("apps.alerts.models.AlertGroup.start_unsilence_task", return_value=None) @pytest.mark.django_db From 1c339645431caa897ea7db3eff2f6e32eb1494bc Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Thu, 18 Jul 2024 11:15:58 -0300 Subject: [PATCH 07/24] Update schedule events internal API to return default priority level (#4697) No need to change 0 to `None` in priority level when returning schedule events (related to this [thread](https://raintank-corp.slack.com/archives/C0229FD3CE9/p1721239227358309?thread_ts=1721148066.857589&cid=C0229FD3CE9)) --- engine/apps/api/tests/test_oncall_shift.py | 2 +- engine/apps/api/tests/test_schedules.py | 4 ++-- .../apps/schedules/models/on_call_schedule.py | 2 +- .../schedules/tests/test_on_call_schedule.py | 22 +++++++++---------- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/engine/apps/api/tests/test_oncall_shift.py b/engine/apps/api/tests/test_oncall_shift.py index 3e47100902..9ebaa9f6f1 100644 --- a/engine/apps/api/tests/test_oncall_shift.py +++ b/engine/apps/api/tests/test_oncall_shift.py @@ -1778,7 +1778,7 @@ def test_on_call_shift_preview_without_users( "is_override": False, "is_empty": True, "is_gap": False, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "source": "web", diff --git a/engine/apps/api/tests/test_schedules.py b/engine/apps/api/tests/test_schedules.py index 7755ad1f10..e0477ec003 100644 --- a/engine/apps/api/tests/test_schedules.py +++ b/engine/apps/api/tests/test_schedules.py @@ -1201,7 +1201,7 @@ def test_filter_events_overrides( } ], "missing_users": [], - "priority_level": None, + "priority_level": 0, "source": "api", "calendar_type": OnCallSchedule.OVERRIDES, "is_empty": False, @@ -1307,7 +1307,7 @@ def test_filter_events_final_schedule( "end": start_date + timezone.timedelta(hours=start + duration), "is_gap": is_gap, "is_override": is_override, - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start), "user": user, } diff --git a/engine/apps/schedules/models/on_call_schedule.py b/engine/apps/schedules/models/on_call_schedule.py index 5dcd83e462..78636333f9 100644 --- a/engine/apps/schedules/models/on_call_schedule.py +++ b/engine/apps/schedules/models/on_call_schedule.py @@ -412,7 +412,7 @@ def filter_events( for user in shift["users"] ], "missing_users": shift["missing_users"], - "priority_level": shift["priority"] if shift["priority"] != 0 else None, + "priority_level": shift["priority"] or 0, "source": shift["source"], "calendar_type": shift["calendar_type"], "is_empty": len(shift["users"]) == 0 and not is_gap, diff --git a/engine/apps/schedules/tests/test_on_call_schedule.py b/engine/apps/schedules/tests/test_on_call_schedule.py index 6baa33fdf8..4e249d67c6 100644 --- a/engine/apps/schedules/tests/test_on_call_schedule.py +++ b/engine/apps/schedules/tests/test_on_call_schedule.py @@ -120,7 +120,7 @@ def test_filter_events(make_organization, make_user_for_organization, make_sched "is_override": True, "is_empty": False, "is_gap": False, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [ { @@ -178,7 +178,7 @@ def test_filter_events_include_gaps(make_organization, make_user_for_organizatio "is_override": False, "is_empty": False, "is_gap": True, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": None}, @@ -213,7 +213,7 @@ def test_filter_events_include_gaps(make_organization, make_user_for_organizatio "is_override": False, "is_empty": False, "is_gap": True, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": None}, @@ -263,7 +263,7 @@ def test_filter_events_include_shift_info( "is_override": False, "is_empty": False, "is_gap": True, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": None}, @@ -302,7 +302,7 @@ def test_filter_events_include_shift_info( "is_override": False, "is_empty": False, "is_gap": True, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": None}, @@ -512,7 +512,7 @@ def test_final_schedule_events( "end": start_date + timezone.timedelta(hours=start + duration), "is_gap": is_gap, "is_override": is_override, - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start), "user": user, "shift": ( @@ -599,7 +599,7 @@ def test_final_schedule_override_no_priority_shift( "calendar_type": 1 if is_override else 0, "end": start_date + timezone.timedelta(hours=start + duration), "is_override": is_override, - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start, milliseconds=1 if start == 0 else 0), "user": user, } @@ -679,7 +679,7 @@ def test_final_schedule_override_split( "calendar_type": 1 if is_override else 0, "end": start_date + timezone.timedelta(hours=start + duration), "is_override": is_override, - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start, milliseconds=1 if start == 0 else 0), "user": user, } @@ -1051,7 +1051,7 @@ def test_preview_shift_no_user(make_organization, make_user_for_organization, ma "is_override": False, "is_empty": True, "is_gap": False, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [], "shift": {"pk": new_shift.public_primary_key}, @@ -1130,7 +1130,7 @@ def test_preview_override_shift(make_organization, make_user_for_organization, m "is_override": True, "is_empty": False, "is_gap": False, - "priority_level": None, + "priority_level": 0, "missing_users": [], "users": [ { @@ -1156,7 +1156,7 @@ def test_preview_override_shift(make_organization, make_user_for_organization, m expected_events = [ { "end": start_date + timezone.timedelta(hours=start + duration), - "priority_level": priority, + "priority_level": priority or 0, "start": start_date + timezone.timedelta(hours=start, milliseconds=1 if start == 0 else 0), "user": user, "is_override": is_override, From 8d82b078d3c803c93354d961334ada2580afcaec Mon Sep 17 00:00:00 2001 From: Dominik Broj Date: Fri, 19 Jul 2024 07:47:34 +0200 Subject: [PATCH 08/24] Cleanup and split tiltfile by profiles (#4691) # What this PR does Make it possible to select what resources are installed in Tilt by doing `ONCALL_PROFILES=grafana,plugin,backend,tests tilt up` ## Which issue(s) this PR closes Related to https://github.com/grafana/irm/issues/3 ## Checklist - [ ] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- .github/workflows/e2e-tests.yml | 2 +- .tilt/backend/Tiltfile | 16 +++ .tilt/deps/Tiltfile | 11 ++ .tilt/plugin/Tiltfile | 26 ++++ .tilt/tests/Tiltfile | 80 ++++++++++++ Tiltfile | 219 +++++++++----------------------- dev/README.md | 8 ++ 7 files changed, 204 insertions(+), 158 deletions(-) create mode 100644 .tilt/backend/Tiltfile create mode 100644 .tilt/deps/Tiltfile create mode 100644 .tilt/plugin/Tiltfile create mode 100644 .tilt/tests/Tiltfile diff --git a/.github/workflows/e2e-tests.yml b/.github/workflows/e2e-tests.yml index fcc9e03069..675d11ec5d 100644 --- a/.github/workflows/e2e-tests.yml +++ b/.github/workflows/e2e-tests.yml @@ -150,7 +150,7 @@ jobs: if: inputs.run-expensive-tests shell: bash env: - E2E_TESTS_CMD: "cd grafana-plugin && yarn test:e2e-expensive" + E2E_TESTS_CMD: "cd ../../grafana-plugin && yarn test:e2e-expensive" GRAFANA_VERSION: ${{ inputs.grafana_version }} GRAFANA_ADMIN_USERNAME: "irm" GRAFANA_ADMIN_PASSWORD: "irm" diff --git a/.tilt/backend/Tiltfile b/.tilt/backend/Tiltfile new file mode 100644 index 0000000000..52493436c8 --- /dev/null +++ b/.tilt/backend/Tiltfile @@ -0,0 +1,16 @@ +label = "OnCall.Backend" + +k8s_resource( + workload="celery", + resource_deps=["mariadb", "redis-master"], + labels=[label], +) + +k8s_resource( + workload="engine", + port_forwards=8080, + resource_deps=["mariadb", "redis-master"], + labels=[label], +) + +k8s_resource(workload="engine-migrate", labels=[label]) \ No newline at end of file diff --git a/.tilt/deps/Tiltfile b/.tilt/deps/Tiltfile new file mode 100644 index 0000000000..acd4e44eef --- /dev/null +++ b/.tilt/deps/Tiltfile @@ -0,0 +1,11 @@ +label = "OnCall.Deps" + +k8s_resource(workload="redis-master", labels=[label]) + +k8s_resource(workload="prometheus-server", labels=[label]) + +k8s_resource( + workload="mariadb", + port_forwards='3307:3306', # : + labels=[label], +) \ No newline at end of file diff --git a/.tilt/plugin/Tiltfile b/.tilt/plugin/Tiltfile new file mode 100644 index 0000000000..858ecffd74 --- /dev/null +++ b/.tilt/plugin/Tiltfile @@ -0,0 +1,26 @@ +label = "OnCall.Plugin" + +is_ci=config.tilt_subcommand == "ci" +grafana_plugin_dir="../../grafana-plugin" + +# On CI dependencies are installed separately so we just build prod bundle to be consumed by Grafana dev server +if is_ci: + local_resource( + "build-ui", + labels=[label], + dir=grafana_plugin_dir, + cmd="yarn build", + allow_parallel=True, + ) + +# Locally we install dependencies and we run watch mode +if not is_ci: + local_resource( + "build-ui", + labels=[label], + dir=grafana_plugin_dir, + cmd="yarn install", + serve_dir=grafana_plugin_dir, + serve_cmd="yarn watch", + allow_parallel=True, + ) \ No newline at end of file diff --git a/.tilt/tests/Tiltfile b/.tilt/tests/Tiltfile new file mode 100644 index 0000000000..7fec693baf --- /dev/null +++ b/.tilt/tests/Tiltfile @@ -0,0 +1,80 @@ +label = "OnCall.AllTests" + +load('ext://uibutton', 'cmd_button', 'location', 'text_input', 'bool_input') + +e2e_tests_cmd=os.getenv("E2E_TESTS_CMD", "cd ../../grafana-plugin && yarn test:e2e") +is_ci=config.tilt_subcommand == "ci" + +local_resource( + "e2e-tests", + labels=[label], + cmd=e2e_tests_cmd, + trigger_mode=TRIGGER_MODE_MANUAL, + auto_init=is_ci, + resource_deps=["build-ui", "grafana", "grafana-oncall-app-provisioning-configmap", "engine", "celery"] +) + +cmd_button( + name="E2E Tests - headless run", + argv=["sh", "-c", "yarn --cwd ./grafana-plugin test:e2e $STOP_ON_FIRST_FAILURE $TESTS_FILTER"], + text="Restart headless run", + resource="e2e-tests", + icon_name="replay", + inputs=[ + text_input("BROWSERS", "Browsers (e.g. \"chromium,firefox,webkit\")", "chromium", "chromium,firefox,webkit"), + text_input("TESTS_FILTER", "Test filter (e.g. \"timezones.test quality.test\")", "", "Test file names to run"), + bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), + ] +) + +cmd_button( + name="E2E Tests - open watch mode", + argv=["sh", "-c", "yarn --cwd grafana-plugin test:e2e:watch"], + text="Open watch mode", + resource="e2e-tests", + icon_name="visibility", +) + +cmd_button( + name="E2E Tests - show report", + argv=["sh", "-c", "yarn --cwd grafana-plugin playwright show-report"], + text="Show last HTML report", + resource="e2e-tests", + icon_name="assignment", +) + +cmd_button( + name="E2E Tests - stop current run", + argv=["sh", "-c", "kill -9 $(pgrep -f test:e2e)"], + text="Stop", + resource="e2e-tests", + icon_name="dangerous", +) + +# Inspired by https://github.com/grafana/slo/blob/main/Tiltfile#L72 +pod_engine_pytest_script = ''' +set -eu +# get engine k8s pod name from tilt resource name +POD_NAME="$(tilt get kubernetesdiscovery "engine" -ojsonpath='{.status.pods[0].name}')" +kubectl exec "$POD_NAME" -- pytest . $STOP_ON_FIRST_FAILURE $TESTS_FILTER +''' +local_resource( + "pytest-tests", + labels=[label], + cmd=['sh', '-c', pod_engine_pytest_script], + trigger_mode=TRIGGER_MODE_MANUAL, + auto_init=False, + resource_deps=["engine"] +) + +cmd_button( + name="pytest Tests - headless run", + argv=['sh', '-c', pod_engine_pytest_script], + text="Run pytest", + resource="pytest-tests", + icon_name="replay", + inputs=[ + text_input("TESTS_FILTER", "pytest optional arguments (e.g. \"apps/webhooks/tests/test_webhook.py::test_build_url_private_raises\")", "", "Test file names to run"), + bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), + ] +) \ No newline at end of file diff --git a/Tiltfile b/Tiltfile index df0e53fa29..ab4a1f64f8 100644 --- a/Tiltfile +++ b/Tiltfile @@ -1,9 +1,7 @@ load('ext://uibutton', 'cmd_button', 'location', 'text_input', 'bool_input') +load("ext://configmap", "configmap_create") + running_under_parent_tiltfile = os.getenv("TILT_PARENT", "false") == "true" -# The user/pass that you will login to Grafana with -grafana_admin_user_pass = os.getenv("GRAFANA_ADMIN_USER_PASS", "oncall") -grafana_version = os.getenv("GRAFANA_VERSION", "latest") -e2e_tests_cmd=os.getenv("E2E_TESTS_CMD", "cd grafana-plugin && yarn test:e2e") twilio_values=[ "oncall.twilio.accountSid=" + os.getenv("TWILIO_ACCOUNT_SID", ""), "oncall.twilio.authToken=" + os.getenv("TWILIO_AUTH_TOKEN", ""), @@ -16,25 +14,11 @@ HELM_PREFIX = "oncall-dev" # Use docker registery generated by ctlptl (dev/kind-config.yaml) DOCKER_REGISTRY = "localhost:63628/" -if not running_under_parent_tiltfile: - # Load the custom Grafana extensions - v1alpha1.extension_repo( - name="grafana-tilt-extensions", - ref="v1.2.0", - url="https://github.com/grafana/tilt-extensions", - ) -v1alpha1.extension( - name="grafana", repo_name="grafana-tilt-extensions", repo_path="grafana" -) - -load("ext://grafana", "grafana") -load("ext://configmap", "configmap_create") load("ext://docker_build_sub", "docker_build_sub") # Tell ops-devenv/Tiltifle where our plugin.json file lives plugin_file = os.path.abspath("grafana-plugin/src/plugin.json") - def plugin_json(): return plugin_file @@ -59,108 +43,15 @@ docker_build_sub( ], ) -# On CI dependencies are installed separately so we just build prod bundle to be consumed by Grafana dev server -if is_ci: - local_resource( - "build-ui", - labels=["OnCallUI"], - dir="grafana-plugin", - cmd="yarn build", - allow_parallel=True, - ) - -# Locally we install dependencies and we run watch mode -if not is_ci: - local_resource( - "build-ui", - labels=["OnCallUI"], - dir="grafana-plugin", - cmd="yarn install", - serve_dir="grafana-plugin", - serve_cmd="yarn watch", - allow_parallel=True, - ) - -local_resource( - "e2e-tests", - labels=["allTests"], - cmd=e2e_tests_cmd, - trigger_mode=TRIGGER_MODE_MANUAL, - auto_init=is_ci, - resource_deps=["build-ui", "grafana", "grafana-oncall-app-provisioning-configmap", "engine", "celery"] -) - -cmd_button( - name="E2E Tests - headless run", - argv=["sh", "-c", "yarn --cwd ./grafana-plugin test:e2e $STOP_ON_FIRST_FAILURE $TESTS_FILTER"], - text="Restart headless run", - resource="e2e-tests", - icon_name="replay", - inputs=[ - text_input("BROWSERS", "Browsers (e.g. \"chromium,firefox,webkit\")", "chromium", "chromium,firefox,webkit"), - text_input("TESTS_FILTER", "Test filter (e.g. \"timezones.test quality.test\")", "", "Test file names to run"), - bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), - ] -) - -cmd_button( - name="E2E Tests - open watch mode", - argv=["sh", "-c", "yarn --cwd grafana-plugin test:e2e:watch"], - text="Open watch mode", - resource="e2e-tests", - icon_name="visibility", -) - -cmd_button( - name="E2E Tests - show report", - argv=["sh", "-c", "yarn --cwd grafana-plugin playwright show-report"], - text="Show last HTML report", - resource="e2e-tests", - icon_name="assignment", -) - -cmd_button( - name="E2E Tests - stop current run", - argv=["sh", "-c", "kill -9 $(pgrep -f test:e2e)"], - text="Stop", - resource="e2e-tests", - icon_name="dangerous", -) -# Inspired by https://github.com/grafana/slo/blob/main/Tiltfile#L72 -pod_engine_pytest_script = ''' -set -eu -# get engine k8s pod name from tilt resource name -POD_NAME="$(tilt get kubernetesdiscovery "engine" -ojsonpath='{.status.pods[0].name}')" -kubectl exec "$POD_NAME" -- pytest . $STOP_ON_FIRST_FAILURE $TESTS_FILTER -''' -local_resource( - "pytest-tests", - labels=["allTests"], - cmd=['sh', '-c', pod_engine_pytest_script], - trigger_mode=TRIGGER_MODE_MANUAL, - auto_init=False, - resource_deps=["engine"] -) - -cmd_button( - name="pytest Tests - headless run", - argv=['sh', '-c', pod_engine_pytest_script], - text="Run pytest", - resource="pytest-tests", - icon_name="replay", - inputs=[ - text_input("TESTS_FILTER", "pytest optional arguments (e.g. \"apps/webhooks/tests/test_webhook.py::test_build_url_private_raises\")", "", "Test file names to run"), - bool_input("STOP_ON_FIRST_FAILURE", "Stop on first failure", True, "-x", ""), - ] -) +def load_oncall_helm(): + helm_oncall_values = ["./dev/helm-local.yml", "./dev/helm-local.dev.yml"] + if is_ci: + helm_oncall_values = helm_oncall_values + ["./.github/helm-ci.yml"] + yaml = helm("helm/oncall", name=HELM_PREFIX, values=helm_oncall_values, set=twilio_values, namespace="default") + k8s_yaml(yaml) -helm_oncall_values = ["./dev/helm-local.yml", "./dev/helm-local.dev.yml"] -if is_ci: - helm_oncall_values = helm_oncall_values + ["./.github/helm-ci.yml"] -yaml = helm("helm/oncall", name=HELM_PREFIX, values=helm_oncall_values, set=twilio_values, namespace="default") - -k8s_yaml(yaml) +# --- GRAFANA START ---- # Generate and load the grafana deploy yaml configmap_create( @@ -169,49 +60,63 @@ configmap_create( from_file="dev/grafana/provisioning/plugins/grafana-oncall-app-provisioning.yaml", ) -k8s_resource( - objects=["grafana-oncall-app-provisioning:configmap"], - new_name="grafana-oncall-app-provisioning-configmap", - resource_deps=["build-ui"], - labels=["Grafana"], -) - -# Use separate grafana helm chart if not running_under_parent_tiltfile: - grafana( - grafana_version=grafana_version, - context="grafana-plugin", - plugin_files=["grafana-plugin/src/plugin.json"], - namespace="default", - deps=["grafana-oncall-app-provisioning-configmap", "build-ui"], - extra_env={ - "GF_SECURITY_ADMIN_PASSWORD": "oncall", - "GF_SECURITY_ADMIN_USER": "oncall", - "GF_AUTH_ANONYMOUS_ENABLED": "false", - }, + # Load the custom Grafana extensions + v1alpha1.extension_repo( + name="grafana-tilt-extensions", + ref="v1.2.0", + url="https://github.com/grafana/tilt-extensions", ) - -k8s_resource( - workload="celery", - resource_deps=["mariadb", "redis-master"], - labels=["OnCallBackend"], -) -k8s_resource( - workload="engine", - port_forwards=8080, - resource_deps=["mariadb", "redis-master"], - labels=["OnCallBackend"], -) -k8s_resource(workload="engine-migrate", labels=["OnCallBackend"]) - -k8s_resource(workload="redis-master", labels=["OnCallDeps"]) -k8s_resource(workload="prometheus-server", labels=["OnCallDeps"]) -k8s_resource( - workload="mariadb", - port_forwards='3307:3306', # : - labels=["OnCallDeps"], +v1alpha1.extension( + name="grafana", repo_name="grafana-tilt-extensions", repo_path="grafana" ) +load("ext://grafana", "grafana") + +def load_grafana(): + # The user/pass that you will login to Grafana with + grafana_admin_user_pass = os.getenv("GRAFANA_ADMIN_USER_PASS", "oncall") + grafana_version = os.getenv("GRAFANA_VERSION", "latest") + + + k8s_resource( + objects=["grafana-oncall-app-provisioning:configmap"], + new_name="grafana-oncall-app-provisioning-configmap", + resource_deps=["build-ui"], + labels=["Grafana"], + ) + # Use separate grafana helm chart + if not running_under_parent_tiltfile: + grafana( + grafana_version=grafana_version, + context="grafana-plugin", + plugin_files=["grafana-plugin/src/plugin.json"], + namespace="default", + deps=["grafana-oncall-app-provisioning-configmap", "build-ui"], + extra_env={ + "GF_SECURITY_ADMIN_PASSWORD": "oncall", + "GF_SECURITY_ADMIN_USER": "oncall", + "GF_AUTH_ANONYMOUS_ENABLED": "false", + }, + ) +# --- GRAFANA END ---- + + +def get_profiles(): + profiles = os.getenv('ONCALL_PROFILES', 'grafana,plugin,backend,tests') + return profiles.split(',') +profiles = get_profiles() + +if 'grafana' in profiles: + load_grafana() +if 'plugin' in profiles: + include(".tilt/plugin/Tiltfile") +if 'backend' in profiles: + load_oncall_helm() + include(".tilt/backend/Tiltfile") + include(".tilt/deps/Tiltfile") +if 'tests' in profiles: + include(".tilt/tests/Tiltfile") # name all tilt resources after the k8s object namespace + name def resource_name(id): diff --git a/dev/README.md b/dev/README.md index c6f82c5abf..7b0b308f4e 100644 --- a/dev/README.md +++ b/dev/README.md @@ -36,6 +36,14 @@ Related: [How to develop integrations](/engine/config_integrations/README.md) value: "True" ``` + You can also choose set of resources that will be installed in your local cluster, e.g.: + + ```bash + ONCALL_PROFILES=grafana,plugin,backend tilt up + ``` + + Available profiles are: `grafana,plugin,backend,tests`, by default all the profiles are enabled. + 3. Wait until all resources are green and open (user: oncall, password: oncall) 4. Modify source code, backend and frontend will be hot reloaded From 1465db36e55852bcd4a6cf5e30d389e688c53cea Mon Sep 17 00:00:00 2001 From: Innokentii Konstantinov Date: Fri, 19 Jul 2024 18:08:52 +0800 Subject: [PATCH 09/24] Support stack header from chatops-proxy (#4578) This PR supports new flow of selecting org to run command in a slack workspace if several stacks are using it. In this case user selects default stack to run commands or pass a --stack flag. Both handled by chatops-proxy which injects selected stack as a header. On a side note - I found one ScenarioStep with incompatible set of arguments with parent class. I didn't fixed it, just left TODO https://github.com/grafana/oncall/pull/4578/files#diff-e323b5f38ed665f73d5da3fa0575958ed54ab587f6521b4cd87e1491b5430f8bR364 Related to https://github.com/grafana/oncall-gateway/issues/256 --------- Co-authored-by: Vadim Stepanov --- .../slack/scenarios/alertgroup_appearance.py | 7 +- .../slack/scenarios/alertgroup_timeline.py | 2 + .../apps/slack/scenarios/declare_incident.py | 4 +- .../apps/slack/scenarios/distribute_alerts.py | 40 ++++--- .../slack/scenarios/invited_to_channel.py | 4 +- .../apps/slack/scenarios/manage_responders.py | 15 ++- .../scenarios/notified_user_not_in_channel.py | 4 +- engine/apps/slack/scenarios/onboarding.py | 7 +- engine/apps/slack/scenarios/paging.py | 70 +++++++++--- engine/apps/slack/scenarios/profile_update.py | 4 +- .../apps/slack/scenarios/resolution_note.py | 11 +- engine/apps/slack/scenarios/scenario_step.py | 11 ++ engine/apps/slack/scenarios/schedules.py | 4 +- .../slack/scenarios/shift_swap_requests.py | 4 +- engine/apps/slack/scenarios/slack_channel.py | 13 ++- .../scenarios/slack_channel_integration.py | 4 +- .../apps/slack/scenarios/slack_usergroup.py | 7 +- .../tests/test_interactive_api_endpoint.py | 64 +++++++++-- .../tests/test_scenario_steps/test_paging.py | 107 ++++++++++++++---- engine/apps/slack/views.py | 19 +++- 20 files changed, 319 insertions(+), 82 deletions(-) diff --git a/engine/apps/slack/scenarios/alertgroup_appearance.py b/engine/apps/slack/scenarios/alertgroup_appearance.py index 431e1a211b..b07bd85b5e 100644 --- a/engine/apps/slack/scenarios/alertgroup_appearance.py +++ b/engine/apps/slack/scenarios/alertgroup_appearance.py @@ -18,6 +18,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class OpenAlertAppearanceDialogStep(AlertGroupActionsMixin, scenario_step.ScenarioStep): @@ -27,7 +28,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -75,7 +77,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.alerts.models import AlertGroup diff --git a/engine/apps/slack/scenarios/alertgroup_timeline.py b/engine/apps/slack/scenarios/alertgroup_timeline.py index 87976b493c..08f74b8802 100644 --- a/engine/apps/slack/scenarios/alertgroup_timeline.py +++ b/engine/apps/slack/scenarios/alertgroup_timeline.py @@ -13,6 +13,7 @@ PayloadType, ScenarioRoute, ) +from apps.user_management.models import Organization from .step_mixins import AlertGroupActionsMixin @@ -28,6 +29,7 @@ def process_scenario( slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", payload: EventPayload, + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): diff --git a/engine/apps/slack/scenarios/declare_incident.py b/engine/apps/slack/scenarios/declare_incident.py index 7a7589df5b..cf50e20d35 100644 --- a/engine/apps/slack/scenarios/declare_incident.py +++ b/engine/apps/slack/scenarios/declare_incident.py @@ -5,6 +5,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class DeclareIncidentStep(scenario_step.ScenarioStep): @@ -12,7 +13,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Slack sends a POST request to the backend upon clicking a button with a redirect link to Incident. diff --git a/engine/apps/slack/scenarios/distribute_alerts.py b/engine/apps/slack/scenarios/distribute_alerts.py index 6eb5586067..c4fb8890e6 100644 --- a/engine/apps/slack/scenarios/distribute_alerts.py +++ b/engine/apps/slack/scenarios/distribute_alerts.py @@ -50,6 +50,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization ATTACH_TO_ALERT_GROUPS_LIMIT = 20 @@ -222,7 +223,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: pass @@ -239,7 +241,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.user_management.models import User @@ -275,7 +278,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -304,7 +308,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -324,7 +329,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -490,7 +496,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: # submit selection in modal window if payload["type"] == PayloadType.VIEW_SUBMISSION: @@ -542,7 +549,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -567,7 +575,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -594,7 +603,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: ResolutionNoteModalStep = scenario_step.ScenarioStep.get_step("resolution_note", "ResolutionNoteModalStep") @@ -635,7 +645,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -655,7 +666,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -675,7 +687,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -736,7 +749,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.alerts.models import AlertGroup diff --git a/engine/apps/slack/scenarios/invited_to_channel.py b/engine/apps/slack/scenarios/invited_to_channel.py index e56ab9f41e..acdd5fecb6 100644 --- a/engine/apps/slack/scenarios/invited_to_channel.py +++ b/engine/apps/slack/scenarios/invited_to_channel.py @@ -9,6 +9,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -19,7 +20,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: if payload["event"]["user"] == slack_team_identity.bot_user_id: channel_id = payload["event"]["channel"] diff --git a/engine/apps/slack/scenarios/manage_responders.py b/engine/apps/slack/scenarios/manage_responders.py index 63b8bd7fe1..8068475a09 100644 --- a/engine/apps/slack/scenarios/manage_responders.py +++ b/engine/apps/slack/scenarios/manage_responders.py @@ -18,7 +18,8 @@ if typing.TYPE_CHECKING: from apps.alerts.models import AlertGroup from apps.slack.models import SlackTeamIdentity, SlackUserIdentity - from apps.user_management.models import User + from apps.user_management.models import Organization, User + MANAGE_RESPONDERS_USER_SELECT_ID = "responders_user_select" @@ -35,7 +36,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = self.get_alert_group(slack_team_identity, payload) if not self.is_authorized(alert_group): @@ -53,7 +55,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = _get_alert_group_from_payload(payload) selected_user = _get_selected_user_from_payload(payload) @@ -99,7 +102,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = _get_alert_group_from_payload(payload) selected_user = _get_selected_user_from_payload(payload) @@ -132,7 +136,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: alert_group = _get_alert_group_from_payload(payload) selected_user = _get_selected_user_from_payload(payload) diff --git a/engine/apps/slack/scenarios/notified_user_not_in_channel.py b/engine/apps/slack/scenarios/notified_user_not_in_channel.py index d3f045d498..f1af583135 100644 --- a/engine/apps/slack/scenarios/notified_user_not_in_channel.py +++ b/engine/apps/slack/scenarios/notified_user_not_in_channel.py @@ -6,6 +6,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) @@ -20,7 +21,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: logger.info("Gracefully handle NotifiedUserNotInChannelStep. Do nothing.") pass diff --git a/engine/apps/slack/scenarios/onboarding.py b/engine/apps/slack/scenarios/onboarding.py index d84dc1ef5d..9ca5305b1a 100644 --- a/engine/apps/slack/scenarios/onboarding.py +++ b/engine/apps/slack/scenarios/onboarding.py @@ -6,6 +6,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) @@ -19,7 +20,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: logger.info("InOpenStep, doing nothing.") @@ -29,7 +31,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: pass diff --git a/engine/apps/slack/scenarios/paging.py b/engine/apps/slack/scenarios/paging.py index b9191234e1..ee38dc3664 100644 --- a/engine/apps/slack/scenarios/paging.py +++ b/engine/apps/slack/scenarios/paging.py @@ -121,7 +121,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: input_id_prefix = _generate_input_id_prefix() @@ -136,6 +137,11 @@ def process_scenario( "submit_routing_uid": FinishDirectPaging.routing_uid(), DataKey.USERS: {}, } + # We have access to predefined org only in StartDirectPaging, since it's a slash command. + # Chatops-Proxy adds a special header to slash commands payload to define the organization. + # Other Paging steps are triggered by buttons and actions, + # so we don't have access to predefined org and use private metadata instead. + private_metadata = _inject_predefined_org_to_private_metadata(predefined_org, private_metadata) initial_payload = {"view": {"private_metadata": json.dumps(private_metadata)}} view = render_dialog(slack_user_identity, slack_team_identity, initial_payload, initial=True) self._slack_client.views_open( @@ -153,13 +159,15 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: message = _get_message_from_payload(payload) private_metadata = json.loads(payload["view"]["private_metadata"]) + predefined_org = _get_predefined_org_from_private_metadata(private_metadata, slack_team_identity) channel_id = private_metadata["channel_id"] input_id_prefix = private_metadata["input_id_prefix"] - selected_organization = _get_selected_org_from_payload( + selected_organization = predefined_org or _get_selected_org_from_payload( payload, input_id_prefix, slack_team_identity, slack_user_identity ) @@ -245,7 +253,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: updated_payload = reset_items(payload) view = render_dialog(slack_user_identity, slack_team_identity, updated_payload) @@ -263,7 +272,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: view = render_dialog(slack_user_identity, slack_team_identity, payload) self._slack_client.views_update( @@ -283,7 +293,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: private_metadata = json.loads(payload["view"]["private_metadata"]) selected_user = _get_selected_user_from_payload(payload, private_metadata["input_id_prefix"]) @@ -336,7 +347,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: policy, key, user_pk = self._parse_action(payload) @@ -361,7 +373,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: metadata = json.loads(payload["view"]["private_metadata"]) @@ -409,7 +422,7 @@ def render_dialog( private_metadata = json.loads(payload["view"]["private_metadata"]) submit_routing_uid = private_metadata.get("submit_routing_uid") - # Get organizations available to user + predefined_org = _get_predefined_org_from_private_metadata(private_metadata, slack_team_identity) available_organizations = _get_available_organizations(slack_team_identity, slack_user_identity) if initial: @@ -417,15 +430,17 @@ def render_dialog( new_input_id_prefix = _generate_input_id_prefix() new_private_metadata = private_metadata new_private_metadata["input_id_prefix"] = new_input_id_prefix - selected_organization = available_organizations.first() + selected_organization = predefined_org if predefined_org else available_organizations.first() is_team_selected, selected_team = False, None else: # setup form using data/state old_input_id_prefix, new_input_id_prefix, new_private_metadata = _get_and_change_input_id_prefix_from_metadata( private_metadata ) - selected_organization = _get_selected_org_from_payload( - payload, old_input_id_prefix, slack_team_identity, slack_user_identity + selected_organization = ( + predefined_org + if predefined_org + else _get_selected_org_from_payload(payload, old_input_id_prefix, slack_team_identity, slack_user_identity) ) is_team_selected, selected_team = _get_selected_team_from_payload(payload, old_input_id_prefix) @@ -441,8 +456,9 @@ def render_dialog( blocks.append(_get_message_input(payload)) - # Add organization select if more than one organization available for user - if len(available_organizations) > 1: + # Add organization select if org is not defined on chatops-proxy (it's should happen only in OSS) + # and user has access to multiple orgs. + if not predefined_org and len(available_organizations) > 1: organization_select = _get_organization_select( available_organizations, selected_organization, new_input_id_prefix ) @@ -570,6 +586,32 @@ def _get_selected_org_from_payload( return Organization.objects.filter(pk=selected_org_id).first() +def _inject_predefined_org_to_private_metadata( + predefined_org: typing.Optional["Organization"], private_metadata: dict +) -> dict: + """ + Injects predefined organization to private metadata. + Predefined org is org defined by chatops-proxy for slash commands. + """ + if predefined_org: + private_metadata["organization_id"] = predefined_org.pk + return private_metadata + + +def _get_predefined_org_from_private_metadata( + private_metadata: dict, + slack_team_identity: "SlackTeamIdentity", +) -> typing.Optional["Organization"]: + """ + Returns organization from private metadata. + """ + org_id = private_metadata.get("organization_id") + if not org_id: + return None + + return slack_team_identity.organizations.filter(pk=org_id).first() + + def _get_team_select_blocks( slack_user_identity: "SlackUserIdentity", organization: "Organization", diff --git a/engine/apps/slack/scenarios/profile_update.py b/engine/apps/slack/scenarios/profile_update.py index 3c46d9baa0..fa7cd9838b 100644 --- a/engine/apps/slack/scenarios/profile_update.py +++ b/engine/apps/slack/scenarios/profile_update.py @@ -6,6 +6,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class ProfileUpdateStep(scenario_step.ScenarioStep): @@ -13,7 +14,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Any update in Slack Profile. diff --git a/engine/apps/slack/scenarios/resolution_note.py b/engine/apps/slack/scenarios/resolution_note.py index 67ddea664e..50f907e79b 100644 --- a/engine/apps/slack/scenarios/resolution_note.py +++ b/engine/apps/slack/scenarios/resolution_note.py @@ -38,6 +38,8 @@ if typing.TYPE_CHECKING: from apps.alerts.models import AlertGroup, ResolutionNote, ResolutionNoteSlackMessage from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization + logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -65,7 +67,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.alerts.models import ResolutionNote, ResolutionNoteSlackMessage from apps.slack.models import SlackMessage, SlackUserIdentity @@ -357,7 +360,8 @@ def process_scenario( slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", payload: EventPayload, - data: ScenarioData | None = None, + # TODO: data is incompatible override, parent class has a different set of arguments + data: ScenarioData | None = None, # type: ignore ) -> None: if data: # Argument "data" is used when step is called from other step, e.g. AddRemoveThreadMessageStep @@ -642,7 +646,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.alerts.models import AlertGroup, ResolutionNote, ResolutionNoteSlackMessage diff --git a/engine/apps/slack/scenarios/scenario_step.py b/engine/apps/slack/scenarios/scenario_step.py index 37fa2911b8..99811ebba2 100644 --- a/engine/apps/slack/scenarios/scenario_step.py +++ b/engine/apps/slack/scenarios/scenario_step.py @@ -48,7 +48,18 @@ def process_scenario( slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: + """ + process_scenario executes the logic of the step on slack interaction. + Args: + slack_user_identity: SlackUserIdentity who interacted with slack + slack_team_identity: Slack Workspace where interaction happened + payload: EventPayload from slack + predefined_org: + Organization where interaction happened. + It's optionally defined by chatops-proxy for slash commands and should be used only in SlashCommands steps + """ pass @classmethod diff --git a/engine/apps/slack/scenarios/schedules.py b/engine/apps/slack/scenarios/schedules.py index 4762d90ce9..105b8c47cd 100644 --- a/engine/apps/slack/scenarios/schedules.py +++ b/engine/apps/slack/scenarios/schedules.py @@ -20,6 +20,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class EditScheduleShiftNotifyStep(scenario_step.ScenarioStep): @@ -32,7 +33,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: action_type = payload["actions"][0]["type"] if action_type == BlockActionType.BUTTON: diff --git a/engine/apps/slack/scenarios/shift_swap_requests.py b/engine/apps/slack/scenarios/shift_swap_requests.py index d9c54bf198..d114f615b4 100644 --- a/engine/apps/slack/scenarios/shift_swap_requests.py +++ b/engine/apps/slack/scenarios/shift_swap_requests.py @@ -15,6 +15,7 @@ if typing.TYPE_CHECKING: from apps.schedules.models import ShiftSwapRequest from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -197,7 +198,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: from apps.schedules import exceptions from apps.schedules.models import ShiftSwapRequest diff --git a/engine/apps/slack/scenarios/slack_channel.py b/engine/apps/slack/scenarios/slack_channel.py index 882815e128..b9d8aa1955 100644 --- a/engine/apps/slack/scenarios/slack_channel.py +++ b/engine/apps/slack/scenarios/slack_channel.py @@ -9,6 +9,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class SlackChannelCreatedOrRenamedEventStep(scenario_step.ScenarioStep): @@ -16,7 +17,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Create or rename channel @@ -41,7 +43,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Delete channel @@ -63,7 +66,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Archive channel @@ -84,7 +88,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: UnArchive channel diff --git a/engine/apps/slack/scenarios/slack_channel_integration.py b/engine/apps/slack/scenarios/slack_channel_integration.py index a4d1f63760..a0bbc13bae 100644 --- a/engine/apps/slack/scenarios/slack_channel_integration.py +++ b/engine/apps/slack/scenarios/slack_channel_integration.py @@ -7,6 +7,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -17,7 +18,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: Any new message in channel. diff --git a/engine/apps/slack/scenarios/slack_usergroup.py b/engine/apps/slack/scenarios/slack_usergroup.py index 865ec688e4..79f409fdab 100644 --- a/engine/apps/slack/scenarios/slack_usergroup.py +++ b/engine/apps/slack/scenarios/slack_usergroup.py @@ -7,6 +7,7 @@ if typing.TYPE_CHECKING: from apps.slack.models import SlackTeamIdentity, SlackUserIdentity + from apps.user_management.models import Organization class SlackUserGroupEventStep(scenario_step.ScenarioStep): @@ -14,7 +15,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: creation user groups or changes in user groups except its members. @@ -45,7 +47,8 @@ def process_scenario( self, slack_user_identity: "SlackUserIdentity", slack_team_identity: "SlackTeamIdentity", - payload: EventPayload, + payload: "EventPayload", + predefined_org: typing.Optional["Organization"] = None, ) -> None: """ Triggered by action: changed members in user group. diff --git a/engine/apps/slack/tests/test_interactive_api_endpoint.py b/engine/apps/slack/tests/test_interactive_api_endpoint.py index 2c117f78bd..a8be48c3a2 100644 --- a/engine/apps/slack/tests/test_interactive_api_endpoint.py +++ b/engine/apps/slack/tests/test_interactive_api_endpoint.py @@ -29,15 +29,18 @@ SLACK_USER_ID = "iurtiurituritu" -def _make_request(payload): +def _make_request(payload, predefined_org=None): + headers = { + "HTTP_X_SLACK_SIGNATURE": "asdfasdf", + "HTTP_X_SLACK_REQUEST_TIMESTAMP": "xxcxcvx", + } + if predefined_org: + headers["HTTP_X_CHATOPS_STACK_ID"] = predefined_org.stack_id return APIClient().post( "/slack/interactive_api_endpoint/", format="json", data=payload, - **{ - "HTTP_X_SLACK_SIGNATURE": "asdfasdf", - "HTTP_X_SLACK_REQUEST_TIMESTAMP": "xxcxcvx", - }, + **headers, ) @@ -312,7 +315,52 @@ def test_grafana_escalate( response = _make_request(payload) assert response.status_code == status.HTTP_200_OK - mock_process_scenario.assert_called_once_with(slack_user_identity, slack_team_identity, payload) + mock_process_scenario.assert_called_once_with( + slack_user_identity, slack_team_identity, payload, predefined_org=None + ) + + +@patch("apps.slack.views.SlackEventApiEndpointView.verify_signature", return_value=True) +@patch.object(StartDirectPaging, "process_scenario") +@pytest.mark.django_db +def test_grafana_escalate_with_org_from_chatops_proxy_defines_org( + mock_process_scenario, + _mock_verify_signature, + make_organization, + make_slack_user_identity, + make_user, + slack_team_identity, +): + """ + Check StartDirectPaging.process_scenario gets called when a user types /grafana escalate. + UnifiedSlackApp commands are prefixed with /grafana. + """ + organization = make_organization(slack_team_identity=slack_team_identity) + slack_user_identity = make_slack_user_identity(slack_team_identity=slack_team_identity, slack_id=SLACK_USER_ID) + make_user(organization=organization, slack_user_identity=slack_user_identity) + + payload = { + "token": "gIkuvaNzQIHg97ATvDxqgjtO", + "team_id": slack_team_identity.slack_id, + "team_domain": "example", + "enterprise_id": "E0001", + "enterprise_name": "Globular%20Construct%20Inc", + "channel_id": "C2147483705", + "channel_name": "test", + "user_id": slack_user_identity.slack_id, + "user_name": "Steve", + "command": "/grafana", + "text": "escalate", + "response_url": "https://hooks.slack.com/commands/1234/5678", + "trigger_id": "13345224609.738474920.8088930838d88f008e0", + "api": "api_value", + } + response = _make_request(payload, predefined_org=organization) + + assert response.status_code == status.HTTP_200_OK + mock_process_scenario.assert_called_once_with( + slack_user_identity, slack_team_identity, payload, predefined_org=organization + ) @patch("apps.slack.views.SlackEventApiEndpointView.verify_signature", return_value=True) @@ -353,4 +401,6 @@ def test_escalate( response = _make_request(payload) assert response.status_code == status.HTTP_200_OK - mock_process_scenario.assert_called_once_with(slack_user_identity, slack_team_identity, payload) + mock_process_scenario.assert_called_once_with( + slack_user_identity, slack_team_identity, payload, predefined_org=None + ) diff --git a/engine/apps/slack/tests/test_scenario_steps/test_paging.py b/engine/apps/slack/tests/test_scenario_steps/test_paging.py index a46ce6abdf..0e80ac322b 100644 --- a/engine/apps/slack/tests/test_scenario_steps/test_paging.py +++ b/engine/apps/slack/tests/test_scenario_steps/test_paging.py @@ -27,26 +27,43 @@ from apps.user_management.models import Organization -def make_slack_payload(organization, team=None, user=None, current_users=None, actions=None): +def make_paging_view_slack_payload( + selected_org=None, predefined_org=None, team=None, user=None, current_users=None, actions=None +): + """ + Helper function to create a payload for paging view. + Args: + selected_org: selected organization + predefined_org: predefined organization parsed from chatops-proxy headers + team: selected team object. + user: selected user object. + current_users: Dictionary of current users. + actions: List of actions. + """ + organization = selected_org or predefined_org + if organization is None: + raise Exception("either selected or predifined org must be defined") + private_metadata = { + "input_id_prefix": "", + "channel_id": "123", + "submit_routing_uid": "FinishStepUID", + DataKey.USERS: current_users or {}, + } + if predefined_org: + private_metadata["organization_id"] = str(predefined_org.pk) payload = { "channel_id": "123", "trigger_id": "111", "view": { "id": "view-id", - "private_metadata": make_private_metadata( - { - "input_id_prefix": "", - "channel_id": "123", - "submit_routing_uid": "FinishStepUID", - DataKey.USERS: current_users or {}, - }, - organization, - ), + "private_metadata": make_private_metadata(private_metadata, organization), "state": { "values": { DIRECT_PAGING_ORG_SELECT_ID: { OnPagingOrgChange.routing_uid(): { - "selected_option": {"value": make_value({"id": organization.pk}, organization)} + "selected_option": { + "value": make_value({"id": organization.pk if selected_org else None}, organization) + } } }, DIRECT_PAGING_TEAM_SELECT_ID: { @@ -84,6 +101,50 @@ def test_initial_state( assert metadata[DataKey.USERS] == {} +@pytest.mark.django_db +def test_org_predefined( + make_organization_and_user_with_slack_identities, +): + """ + See get_org_from_chatops_proxy_header function. + """ + org, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() + payload = {"channel_id": "123", "trigger_id": "111"} + + step = StartDirectPaging(slack_team_identity, user=user) + with patch.object(step._slack_client, "views_open") as mock_slack_api_call: + step.process_scenario(slack_user_identity, slack_team_identity, payload, predefined_org=org) + + view = mock_slack_api_call.call_args.kwargs["view"] + metadata = json.loads(view["private_metadata"]) + # Test that organization is injected to private metadata if it is defined by chatops-proxy. + assert metadata["organization_id"] == org.pk + # Test that organization select is not present if org defined by chatops-proxy. + for block in view["blocks"]: + if block.get("block_id") == DIRECT_PAGING_ORG_SELECT_ID: + raise AssertionError("Organization select block should not be present in the view") + + +@pytest.mark.django_db +def test_page_team_with_predefined_org(make_organization_and_user_with_slack_identities, make_team): + organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() + team = make_team(organization) + payload = make_paging_view_slack_payload(predefined_org=organization, team=team) + + step = FinishDirectPaging(slack_team_identity) + with patch("apps.slack.scenarios.paging.direct_paging") as mock_direct_paging: + with patch.object(step._slack_client, "api_call"): + step.process_scenario(slack_user_identity, slack_team_identity, payload) + + mock_direct_paging.assert_called_once_with( + organization=organization, + from_user=user, + message="The Message", + team=team, + users=[], + ) + + @pytest.mark.parametrize("role", (LegacyAccessControlRole.VIEWER, LegacyAccessControlRole.NONE)) @pytest.mark.django_db def test_initial_unauthorized(make_organization_and_user_with_slack_identities, role): @@ -126,7 +187,7 @@ def test_add_user_no_warning(make_organization_and_user_with_slack_identities, m on_call_shift.add_rolling_users([[user]]) schedule.refresh_ical_file() - payload = make_slack_payload(organization=organization, user=user) + payload = make_paging_view_slack_payload(selected_org=organization, user=user) step = OnPagingUserChange(slack_team_identity) with patch.object(step._slack_client, "views_update") as mock_slack_api_call: @@ -161,7 +222,7 @@ def test_add_user_maximum_exceeded(make_organization_and_user_with_slack_identit on_call_shift.add_rolling_users([[user]]) schedule.refresh_ical_file() - payload = make_slack_payload(organization=organization, user=user) + payload = make_paging_view_slack_payload(selected_org=organization, user=user) step = OnPagingUserChange(slack_team_identity) with patch("apps.slack.scenarios.paging.PRIVATE_METADATA_MAX_LENGTH", 100): @@ -188,7 +249,7 @@ def test_add_user_maximum_exceeded(make_organization_and_user_with_slack_identit def test_add_user_raise_warning(make_organization_and_user_with_slack_identities): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() # user is not on call - payload = make_slack_payload(organization=organization, user=user) + payload = make_paging_view_slack_payload(selected_org=organization, user=user) step = OnPagingUserChange(slack_team_identity) with patch.object(step._slack_client, "views_push") as mock_slack_api_call: @@ -209,8 +270,8 @@ def test_add_user_raise_warning(make_organization_and_user_with_slack_identities @pytest.mark.django_db def test_change_user_policy(make_organization_and_user_with_slack_identities): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() - payload = make_slack_payload( - organization=organization, + payload = make_paging_view_slack_payload( + selected_org=organization, actions=[ { "selected_option": { @@ -231,8 +292,8 @@ def test_change_user_policy(make_organization_and_user_with_slack_identities): @pytest.mark.django_db def test_remove_user(make_organization_and_user_with_slack_identities): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() - payload = make_slack_payload( - organization=organization, + payload = make_paging_view_slack_payload( + selected_org=organization, actions=[ { "selected_option": { @@ -255,7 +316,7 @@ def test_remove_user(make_organization_and_user_with_slack_identities): @pytest.mark.django_db def test_trigger_paging_no_team_or_user_selected(make_organization_and_user_with_slack_identities): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() - payload = make_slack_payload(organization=organization) + payload = make_paging_view_slack_payload(selected_org=organization) step = FinishDirectPaging(slack_team_identity, user=user) @@ -277,7 +338,7 @@ def test_trigger_paging_unauthorized(make_organization_and_user_with_slack_ident organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities( role=role ) - payload = make_slack_payload(organization=organization) + payload = make_paging_view_slack_payload(selected_org=organization) step = FinishDirectPaging(slack_team_identity) with patch.object(step._slack_client, "api_call"): @@ -294,7 +355,9 @@ def test_trigger_paging_unauthorized(make_organization_and_user_with_slack_ident def test_trigger_paging_additional_responders(make_organization_and_user_with_slack_identities, make_team): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() team = make_team(organization) - payload = make_slack_payload(organization=organization, team=team, current_users={str(user.pk): Policy.IMPORTANT}) + payload = make_paging_view_slack_payload( + selected_org=organization, team=team, current_users={str(user.pk): Policy.IMPORTANT} + ) step = FinishDirectPaging(slack_team_identity) with patch("apps.slack.scenarios.paging.direct_paging") as mock_direct_paging: @@ -314,7 +377,7 @@ def test_trigger_paging_additional_responders(make_organization_and_user_with_sl def test_page_team(make_organization_and_user_with_slack_identities, make_team): organization, user, slack_team_identity, slack_user_identity = make_organization_and_user_with_slack_identities() team = make_team(organization) - payload = make_slack_payload(organization=organization, team=team) + payload = make_paging_view_slack_payload(selected_org=organization, team=team) step = FinishDirectPaging(slack_team_identity) with patch("apps.slack.scenarios.paging.direct_paging") as mock_direct_paging: diff --git a/engine/apps/slack/views.py b/engine/apps/slack/views.py index 7f23b70c4b..37eb70dacf 100644 --- a/engine/apps/slack/views.py +++ b/engine/apps/slack/views.py @@ -365,7 +365,8 @@ def post(self, request): Step = route["step"] logger.info("Routing to {}".format(Step)) step = Step(slack_team_identity, organization, user) - step.process_scenario(slack_user_identity, slack_team_identity, payload) + org = get_org_from_chatops_proxy_header(request, slack_team_identity) + step.process_scenario(slack_user_identity, slack_team_identity, payload, predefined_org=org) step_was_found = True if payload_type == route_payload_type: @@ -592,3 +593,19 @@ def post(self, request): return Response({"error": e.error_message}, status=400) return Response(status=200) + + +def get_org_from_chatops_proxy_header(request, slack_team_identity) -> Organization | None: + """ + get_org_from_chatops_proxy_header extracts organization from the X-Chatops-Stack-ID header injected by chatops-proxy + """ + stack_id = request.META.get("HTTP_X_CHATOPS_STACK_ID") + if not stack_id: + return None + + try: + # get only orgs linked to the slack workspace to avoid tampering + return slack_team_identity.organizations.get(stack_id=stack_id) + except Organization.DoesNotExist: + logger.info(f"SlackEventApiEndpointView: get_org_from_header: organization with stack_id {stack_id} not found") + return None From 87d79822502b5da8ad982da92050a38c74bbc077 Mon Sep 17 00:00:00 2001 From: Vadim Stepanov Date: Fri, 19 Jul 2024 12:53:06 +0100 Subject: [PATCH 10/24] Unified Slack app reinstall (#4682) # What this PR does Adds a button to reinstall the Slack app to migrate to Unified Slack App. Also adds backend support for this. Screenshot 2024-07-18 at 18 33 08 ## Which issue(s) this PR closes Related to https://github.com/grafana/oncall-gateway/issues/252 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- engine/apps/api/serializers/organization.py | 2 +- engine/apps/slack/installation.py | 6 ++---- ...amidentity__unified_slack_app_installed.py | 18 +++++++++++++++++ .../apps/slack/models/slack_team_identity.py | 12 +++++++++++ engine/apps/slack/tests/test_installation.py | 20 +++++++++++++++++++ .../models/organization/organization.types.ts | 3 +-- .../tabs/SlackSettings/SlackSettings.tsx | 14 +++++++++++++ 7 files changed, 68 insertions(+), 7 deletions(-) create mode 100644 engine/apps/slack/migrations/0005_slackteamidentity__unified_slack_app_installed.py diff --git a/engine/apps/api/serializers/organization.py b/engine/apps/api/serializers/organization.py index 6279c0a19d..cac0edd12a 100644 --- a/engine/apps/api/serializers/organization.py +++ b/engine/apps/api/serializers/organization.py @@ -13,7 +13,7 @@ class FastSlackTeamIdentitySerializer(serializers.ModelSerializer): class Meta: model = SlackTeamIdentity - fields = ["cached_name"] + fields = ["cached_name", "needs_reinstall"] class OrganizationSerializer(EagerLoadingMixin, serializers.ModelSerializer): diff --git a/engine/apps/slack/installation.py b/engine/apps/slack/installation.py index a48784bc2f..393a2a9a70 100644 --- a/engine/apps/slack/installation.py +++ b/engine/apps/slack/installation.py @@ -32,13 +32,11 @@ def install_slack_integration(organization, user, oauth_response): """ from apps.slack.models import SlackTeamIdentity - if organization.slack_team_identity is not None: + if organization.slack_team_identity and not organization.slack_team_identity.needs_reinstall: raise SlackInstallationExc("Organization already has Slack integration") slack_team_id = oauth_response["team"]["id"] - slack_team_identity, is_slack_team_identity_created = SlackTeamIdentity.objects.get_or_create( - slack_id=slack_team_id, - ) + slack_team_identity, _ = SlackTeamIdentity.objects.get_or_create(slack_id=slack_team_id) # update slack oauth fields by data from response slack_team_identity.update_oauth_fields(user, organization, oauth_response) write_chatops_insight_log( diff --git a/engine/apps/slack/migrations/0005_slackteamidentity__unified_slack_app_installed.py b/engine/apps/slack/migrations/0005_slackteamidentity__unified_slack_app_installed.py new file mode 100644 index 0000000000..f4798876b7 --- /dev/null +++ b/engine/apps/slack/migrations/0005_slackteamidentity__unified_slack_app_installed.py @@ -0,0 +1,18 @@ +# Generated by Django 4.2.11 on 2024-07-16 16:11 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('slack', '0004_auto_20230913_1020'), + ] + + operations = [ + migrations.AddField( + model_name='slackteamidentity', + name='_unified_slack_app_installed', + field=models.BooleanField(default=False, null=True), + ), + ] diff --git a/engine/apps/slack/models/slack_team_identity.py b/engine/apps/slack/models/slack_team_identity.py index a5e24c12f9..79e603a742 100644 --- a/engine/apps/slack/models/slack_team_identity.py +++ b/engine/apps/slack/models/slack_team_identity.py @@ -1,6 +1,7 @@ import logging import typing +from django.conf import settings from django.db import models from django.db.models import JSONField @@ -48,6 +49,9 @@ class SlackTeamIdentity(models.Model): # response after oauth.access. This field is used to reinstall app to another OnCall workspace cached_reinstall_data = JSONField(null=True, default=None) + # Do not use directly, use the "needs_reinstall" property instead + _unified_slack_app_installed = models.BooleanField(null=True, default=False) + class Meta: ordering = ("datetime",) @@ -74,6 +78,10 @@ def update_oauth_fields(self, user, organization, oauth_response): self.installed_by = slack_user_identity self.cached_reinstall_data = None self.installed_via_granular_permissions = True + + if settings.UNIFIED_SLACK_APP_ENABLED: + self._unified_slack_app_installed = True + self.save() def get_cached_channels(self, search_term=None, slack_id=None): @@ -129,6 +137,10 @@ def app_id(self): self.save(update_fields=["cached_app_id"]) return self.cached_app_id + @property + def needs_reinstall(self): + return settings.UNIFIED_SLACK_APP_ENABLED and not self._unified_slack_app_installed + def get_users_from_slack_conversation_for_organization(self, channel_id, organization): sc = SlackClient(self) members = self.get_conversation_members(sc, channel_id) diff --git a/engine/apps/slack/tests/test_installation.py b/engine/apps/slack/tests/test_installation.py index 7075c276e8..23f313e1f1 100644 --- a/engine/apps/slack/tests/test_installation.py +++ b/engine/apps/slack/tests/test_installation.py @@ -95,6 +95,26 @@ def test_install_slack_integration_raises_exception_for_existing_integration( install_slack_integration(organization, user, SLACK_OAUTH_ACCESS_RESPONSE) +@pytest.mark.django_db +def test_install_slack_integration_legacy(settings, make_organization_and_user, make_slack_team_identity): + settings.UNIFIED_SLACK_APP_ENABLED = True + + slack_team_identity = make_slack_team_identity( + slack_id=SLACK_OAUTH_ACCESS_RESPONSE["team"]["id"], _unified_slack_app_installed=False + ) + organization, user = make_organization_and_user() + organization.slack_team_identity = slack_team_identity + organization.save() + + install_slack_integration(organization, user, SLACK_OAUTH_ACCESS_RESPONSE) + slack_team_identity.refresh_from_db() + assert slack_team_identity.needs_reinstall is False + + # raises exception if organization already re-installed the app + with pytest.raises(SlackInstallationExc): + install_slack_integration(organization, user, SLACK_OAUTH_ACCESS_RESPONSE) + + @patch("apps.slack.tasks.clean_slack_integration_leftovers.apply_async", return_value=None) @pytest.mark.django_db def test_uninstall_slack_integration( diff --git a/grafana-plugin/src/models/organization/organization.types.ts b/grafana-plugin/src/models/organization/organization.types.ts index 889c461f13..2c01dc4b34 100644 --- a/grafana-plugin/src/models/organization/organization.types.ts +++ b/grafana-plugin/src/models/organization/organization.types.ts @@ -15,9 +15,8 @@ export interface Organization { name: string; stack_slug: string; slack_team_identity: { - general_log_channel_id: string; - general_log_channel_pk: string; cached_name: string; + needs_reinstall: boolean; }; slack_channel: SlackChannel | null; is_resolution_note_required: boolean; diff --git a/grafana-plugin/src/pages/settings/tabs/ChatOps/tabs/SlackSettings/SlackSettings.tsx b/grafana-plugin/src/pages/settings/tabs/ChatOps/tabs/SlackSettings/SlackSettings.tsx index 1933359ff4..278a1eabce 100644 --- a/grafana-plugin/src/pages/settings/tabs/ChatOps/tabs/SlackSettings/SlackSettings.tsx +++ b/grafana-plugin/src/pages/settings/tabs/ChatOps/tabs/SlackSettings/SlackSettings.tsx @@ -201,6 +201,20 @@ class _SlackSettings extends Component { + {currentOrganization.slack_team_identity.needs_reinstall && ( + <> + Unified Slack App + + + + + + + )} ); }; From 1491e28226036108ec8c99561fb1b96c57aef8d8 Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Fri, 19 Jul 2024 08:56:22 -0300 Subject: [PATCH 11/24] Update regex to jinja route conversion to correctly escape double quotes (#4705) Related to this [thread](https://raintank-corp.slack.com/archives/C0229FD3CE9/p1721309907422709). --- engine/apps/api/serializers/channel_filter.py | 3 ++- engine/apps/api/tests/test_channel_filter.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/engine/apps/api/serializers/channel_filter.py b/engine/apps/api/serializers/channel_filter.py index 7ff0f0f12e..408759fb91 100644 --- a/engine/apps/api/serializers/channel_filter.py +++ b/engine/apps/api/serializers/channel_filter.py @@ -147,7 +147,8 @@ def get_filtering_term_as_jinja2(self, obj): elif obj.filtering_term_type == ChannelFilter.FILTERING_TERM_TYPE_REGEX: # Four curly braces will result in two curly braces in the final string # rf"..." is a raw f string, to keep original filtering_term - return rf'{{{{ payload | json_dumps | regex_search("{obj.filtering_term}") }}}}' + escaped_quotes = obj.filtering_term.replace('"', '\\"') if obj.filtering_term else "" + return rf'{{{{ payload | json_dumps | regex_search("{escaped_quotes}") }}}}' elif obj.filtering_labels and obj.filtering_term_type == ChannelFilter.FILTERING_TERM_TYPE_LABELS: # required labels labels = [ diff --git a/engine/apps/api/tests/test_channel_filter.py b/engine/apps/api/tests/test_channel_filter.py index 9cd4e08806..f36fec634d 100644 --- a/engine/apps/api/tests/test_channel_filter.py +++ b/engine/apps/api/tests/test_channel_filter.py @@ -509,8 +509,10 @@ def test_channel_filter_convert_from_regex_to_jinja2( make_channel_filter(alert_receive_channel, is_default=True) - # r"..." used to keep this string as raw string - regex_filtering_term = r"\".*\": \"This alert was sent by user for demonstration purposes\"" + # regex as set by Terraform/API (not a raw string, but a string with escaped characters) + # see ChannelFilterSerializer in apps.public_api.serializers.routes.py + regex_filtering_term = '".*": "This alert was sent by user for demonstration purposes"' + # r"..." to define the expected jinja2 template translation final_filtering_term = r'{{ payload | json_dumps | regex_search("\".*\": \"This alert was sent by user for demonstration purposes\"") }}' payload = {"description": "This alert was sent by user for demonstration purposes"} From ab700fd4a18e2acc5157fc2fb889c12909aed6c6 Mon Sep 17 00:00:00 2001 From: Matias Bordese Date: Fri, 19 Jul 2024 08:56:26 -0300 Subject: [PATCH 12/24] Update OrderedModel serializer not to save previously updated order (#4706) We are noticing some [issues](https://ops.grafana-ops.net/goto/sQFLv4XSg?orgId=1) when updating routes via Terraform because when receiving multiple concurrent requests updating position, the order is updated in a transaction but it is then tried to save again in the `.save` call, which could lead to `IntegrityError`s, because the order may have been updated in a concurrent request meanwhile. Test run with the reproduced error (before the serializer update): ``` _____________________________ test_ordered_model_swap_all_to_zero_via_serializer _____________________________ @pytest.mark.django_db(transaction=True) def test_ordered_model_swap_all_to_zero_via_serializer(): THREADS = 300 exceptions = [] TestOrderedModel.objects.all().delete() # clear table instances = [TestOrderedModel.objects.create(test_field="test") for _ in range(THREADS)] # generate random non-unique orders random.seed(42) positions = [random.randint(0, THREADS - 1) for _ in range(THREADS)] def update_order_to_zero(idx): try: instance = instances[idx] serializer = TestOrderedModelSerializer( instance, data={"order": 0, "extra_field": idx}, partial=True ) serializer.is_valid(raise_exception=True) serializer.save() instance.swap(positions[idx]) except Exception as e: exceptions.append(e) threads = [threading.Thread(target=update_order_to_zero, args=(0,)) for _ in range(THREADS)] for thread in threads: thread.start() for thread in threads: thread.join() # can only check that orders are still sequential and that there are no exceptions # can't check the exact order because it changes depending on the order of execution > assert not exceptions E assert not [IntegrityError(1062, "Duplicate entry 'test-0' for key 'base_testorderedmodel.unique_test_field_order'"), IntegrityEr...rder'"), IntegrityError(1062, "Duplicate entry 'test-0' for key 'base_testorderedmodel.unique_test_field_order'"), ...] THREADS = 300 exceptions = [IntegrityError(1062, "Duplicate entry 'test-0' for key 'base_testorderedmodel.unique_test_field_order'"), IntegrityEr...rder'"), IntegrityError(1062, "Duplicate entry 'test-0' for key 'base_testorderedmodel.unique_test_field_order'"), ...] instances = [, , , , , ...] positions = [57, 12, 140, 125, 114, 71, ...] thread = threads = [, , , ...] update_order_to_zero = .update_order_to_zero at 0x7f02086274c0> common/tests/test_ordered_model.py:481: AssertionError ``` --- engine/common/ordered_model/serializer.py | 35 ++++++++++++++++++- engine/common/tests/test_ordered_model.py | 42 +++++++++++++++++++++++ 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/engine/common/ordered_model/serializer.py b/engine/common/ordered_model/serializer.py index d5c36ed162..b8aeaa3872 100644 --- a/engine/common/ordered_model/serializer.py +++ b/engine/common/ordered_model/serializer.py @@ -1,4 +1,5 @@ from rest_framework import serializers +from rest_framework.utils import model_meta from common.api_helpers.exceptions import BadRequest @@ -28,6 +29,38 @@ def create(self, validated_data): return instance + def _update(self, instance, validated_data): + # customize the update method to make sure order field is not saved again + # (which could trigger an integrity error if there was another concurrent update changing order) + serializers.raise_errors_on_nested_writes("update", self, validated_data) + info = model_meta.get_field_info(instance) + + # Simply set each attribute on the instance, and then save it. + # Note that unlike `.create()` we don't need to treat many-to-many + # relationships as being a special case. During updates we already + # have an instance pk for the relationships to be associated with. + m2m_fields = [] + update_fields = [] + for attr, value in validated_data.items(): + if attr in info.relations and info.relations[attr].to_many: + m2m_fields.append((attr, value)) + else: + setattr(instance, attr, value) + update_fields.append(attr) + + # NOTE: this is the only difference, update changed fields to avoid saving order field again + if update_fields: + instance.save(update_fields=update_fields) + + # Note that many-to-many fields are set after updating instance. + # Setting m2m fields triggers signals which could potentially change + # updated instance and we do not want it to collide with .update() + for attr, value in m2m_fields: + field = getattr(instance, attr) + field.set(value) + + return instance + def update(self, instance, validated_data): # Remove "manual_order" and "order" fields from validated_data, so they are not passed to update method. manual_order = validated_data.pop("manual_order", False) @@ -38,7 +71,7 @@ def update(self, instance, validated_data): self._adjust_order(instance, manual_order, order, created=False) # Proceed with the update. - return super().update(instance, validated_data) + return self._update(instance, validated_data) @staticmethod def _adjust_order(instance, manual_order, order, created): diff --git a/engine/common/tests/test_ordered_model.py b/engine/common/tests/test_ordered_model.py index b9efd98fea..77a58051b1 100644 --- a/engine/common/tests/test_ordered_model.py +++ b/engine/common/tests/test_ordered_model.py @@ -5,6 +5,7 @@ from django.db import models from common.ordered_model.ordered_model import OrderedModel +from common.ordered_model.serializer import OrderedModelSerializer class TestOrderedModel(OrderedModel): @@ -436,3 +437,44 @@ def delete(idx): assert not exceptions assert _orders_are_sequential() assert list(TestOrderedModel.objects.values_list("extra_field", flat=True)) == expected_extra_field_values + + +class TestOrderedModelSerializer(OrderedModelSerializer): + class Meta: + model = TestOrderedModel + fields = OrderedModelSerializer.Meta.fields + ["test_field", "extra_field"] + + +@pytest.mark.skipif(SKIP_CONCURRENT, reason="OrderedModel concurrent tests are skipped to speed up tests") +@pytest.mark.django_db(transaction=True) +def test_ordered_model_swap_all_to_zero_via_serializer(): + THREADS = 300 + exceptions = [] + + TestOrderedModel.objects.all().delete() # clear table + instances = [TestOrderedModel.objects.create(test_field="test") for _ in range(THREADS)] + + # generate random non-unique orders + random.seed(42) + positions = [random.randint(0, THREADS - 1) for _ in range(THREADS)] + + def update_order_to_zero(idx): + try: + instance = instances[idx] + serializer = TestOrderedModelSerializer(instance, data={"order": 0, "extra_field": idx}, partial=True) + serializer.is_valid(raise_exception=True) + serializer.save() + instance.swap(positions[idx]) + except Exception as e: + exceptions.append(e) + + threads = [threading.Thread(target=update_order_to_zero, args=(0,)) for _ in range(THREADS)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + # can only check that orders are still sequential and that there are no exceptions + # can't check the exact order because it changes depending on the order of execution + assert not exceptions + assert _orders_are_sequential() From ec360f08ddf260a80eb00dc0505bc45ff2b6aaa1 Mon Sep 17 00:00:00 2001 From: Michael Derynck Date: Fri, 19 Jul 2024 08:32:22 -0600 Subject: [PATCH 13/24] Fix bug for incorrect arguments used to invoke cleanup_empty_deleted_integrations (#4707) # What this PR does - Fixes a bug where cleanup_empty_deleted_integrations was not being passed the correct arguments - Removes start_cleanup_organizations now that it is no longer in use ## Which issue(s) this PR closes Related to [issue link here] ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- engine/apps/grafana_plugin/tasks/sync.py | 15 +-------------- engine/settings/celery_task_routes.py | 1 - 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/engine/apps/grafana_plugin/tasks/sync.py b/engine/apps/grafana_plugin/tasks/sync.py index c33c85414c..829fb923e5 100644 --- a/engine/apps/grafana_plugin/tasks/sync.py +++ b/engine/apps/grafana_plugin/tasks/sync.py @@ -166,26 +166,13 @@ def cleanup_empty_deleted_integrations(organization_pk, dry_run=True): integration.hard_delete() -@shared_dedicated_queue_retry_task(autoretry_for=(Exception,), max_retries=0) -def start_cleanup_organizations(): - # TODO: Remove next release after tasks revoked - cleanup_threshold = timezone.now() - INACTIVE_PERIOD - organization_qs = Organization.objects.filter(last_time_synced__lte=cleanup_threshold) - organization_pks = organization_qs.values_list("pk", flat=True) - logger.debug(f"Found {len(organization_pks)} organizations") - max_countdown = CLEANUP_PERIOD.seconds - for idx, organization_pk in enumerate(organization_pks): - countdown = idx % max_countdown # Spread orgs evenly - cleanup_organization_async.apply_async((organization_pk,), countdown=countdown) - - @shared_dedicated_queue_retry_task(autoretry_for=(Exception,), max_retries=0) def start_cleanup_deleted_integrations(): cleanup_threshold = timezone.now() - CLEANUP_PERIOD channels_qs = AlertReceiveChannel.objects_with_deleted.filter(deleted_at__gte=cleanup_threshold) organization_pks = set(channels_qs.values_list("organization_id", flat=True)) logger.debug(f"Found {len(organization_pks)} organizations") - for organization_pk in enumerate(organization_pks): + for _, organization_pk in enumerate(organization_pks): cleanup_empty_deleted_integrations.apply_async( (organization_pk, False), ) diff --git a/engine/settings/celery_task_routes.py b/engine/settings/celery_task_routes.py index ea866c1a7a..cce98b6fe4 100644 --- a/engine/settings/celery_task_routes.py +++ b/engine/settings/celery_task_routes.py @@ -145,7 +145,6 @@ "apps.chatops_proxy.tasks.sync_org_with_chatops_proxy": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.cleanup_organization_async": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.cleanup_empty_deleted_integrations": {"queue": "long"}, - "apps.grafana_plugin.tasks.sync.start_cleanup_organizations": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.start_cleanup_deleted_integrations": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.start_cleanup_deleted_organizations": {"queue": "long"}, "apps.grafana_plugin.tasks.sync.start_sync_organizations": {"queue": "long"}, From 696aca2d803cef38b7fffa03825c195af9677b65 Mon Sep 17 00:00:00 2001 From: Vadim Stepanov Date: Mon, 22 Jul 2024 11:30:28 +0100 Subject: [PATCH 14/24] Make it clear alert groups can't be searched (#4713) # What this PR does * Make the filter input say `Filter results...` instead of `Search or filter results...` on the alert group page; disallow custom input there so it's only possible to choose among existing filters * Remove outdated `/filters?search=` functionality from internal API ## Which issue(s) this PR closes Related to https://github.com/grafana/oncall-private/issues/2679 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- engine/apps/api/views/alert_group.py | 4 +++- engine/apps/api/views/alert_receive_channel.py | 1 + engine/apps/api/views/escalation_chain.py | 5 +---- engine/apps/api/views/schedule.py | 5 +---- engine/apps/api/views/user.py | 5 +---- engine/apps/api/views/webhooks.py | 5 +---- grafana-plugin/e2e-tests/utils/alertGroup.ts | 2 +- .../src/containers/RemoteFilters/RemoteFilters.tsx | 2 +- grafana-plugin/src/models/filters/filters.ts | 5 ----- 9 files changed, 10 insertions(+), 24 deletions(-) diff --git a/engine/apps/api/views/alert_group.py b/engine/apps/api/views/alert_group.py index 96d49aded3..95afdba17e 100644 --- a/engine/apps/api/views/alert_group.py +++ b/engine/apps/api/views/alert_group.py @@ -747,7 +747,6 @@ def filters(self, request): "href": api_root + "teams/", "global": True, }, - {"name": "search", "type": "search"}, {"name": "integration", "type": "options", "href": api_root + "alert_receive_channels/?filters=true"}, {"name": "escalation_chain", "type": "options", "href": api_root + "escalation_chains/?filters=true"}, { @@ -811,6 +810,9 @@ def filters(self, request): }, ] + if settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED: + filter_options = [{"name": "search", "type": "search"}] + filter_options + if is_labels_feature_enabled(self.request.auth.organization): filter_options.append( { diff --git a/engine/apps/api/views/alert_receive_channel.py b/engine/apps/api/views/alert_receive_channel.py index 21ea9209f5..c4103e9d2d 100644 --- a/engine/apps/api/views/alert_receive_channel.py +++ b/engine/apps/api/views/alert_receive_channel.py @@ -474,6 +474,7 @@ def filters(self, request): api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", diff --git a/engine/apps/api/views/escalation_chain.py b/engine/apps/api/views/escalation_chain.py index af670c2d88..952da9223b 100644 --- a/engine/apps/api/views/escalation_chain.py +++ b/engine/apps/api/views/escalation_chain.py @@ -183,10 +183,10 @@ def details(self, request, pk): @action(methods=["get"], detail=False) def filters(self, request): - filter_name = request.query_params.get("search", None) api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", @@ -195,7 +195,4 @@ def filters(self, request): }, ] - if filter_name is not None: - filter_options = list(filter(lambda f: filter_name in f["name"], filter_options)) - return Response(filter_options) diff --git a/engine/apps/api/views/schedule.py b/engine/apps/api/views/schedule.py index a14fc427a5..261851d6e6 100644 --- a/engine/apps/api/views/schedule.py +++ b/engine/apps/api/views/schedule.py @@ -577,10 +577,10 @@ def mention_options(self, request): @action(methods=["get"], detail=False) def filters(self, request): - filter_name = request.query_params.get("search", None) api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", @@ -610,7 +610,4 @@ def filters(self, request): }, ] - if filter_name is not None: - filter_options = list(filter(lambda f: filter_name in f["name"], filter_options)) - return Response(filter_options) diff --git a/engine/apps/api/views/user.py b/engine/apps/api/views/user.py index 85b8182578..3bbc50ac07 100644 --- a/engine/apps/api/views/user.py +++ b/engine/apps/api/views/user.py @@ -870,10 +870,10 @@ def export_token(self, request, pk) -> Response: ) @action(methods=["get"], detail=False) def filters(self, request): - filter_name = request.query_params.get("search", None) api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", @@ -882,9 +882,6 @@ def filters(self, request): }, ] - if filter_name is not None: - filter_options = list(filter(lambda f: filter_name in f["name"], filter_options)) - return Response(filter_options) diff --git a/engine/apps/api/views/webhooks.py b/engine/apps/api/views/webhooks.py index 0deb30dd34..f32b31ff55 100644 --- a/engine/apps/api/views/webhooks.py +++ b/engine/apps/api/views/webhooks.py @@ -140,10 +140,10 @@ def get_object_from_organization(self): @action(methods=["get"], detail=False) def filters(self, request): - filter_name = request.query_params.get("search", None) api_root = "/api/internal/v1/" filter_options = [ + {"name": "search", "type": "search"}, { "name": "team", "type": "team_select", @@ -161,9 +161,6 @@ def filters(self, request): } ) - if filter_name is not None: - filter_options = list(filter(lambda f: filter_name in f["name"], filter_options)) - return Response(filter_options) @action(methods=["get"], detail=True) diff --git a/grafana-plugin/e2e-tests/utils/alertGroup.ts b/grafana-plugin/e2e-tests/utils/alertGroup.ts index 77696e5507..b346da71e8 100644 --- a/grafana-plugin/e2e-tests/utils/alertGroup.ts +++ b/grafana-plugin/e2e-tests/utils/alertGroup.ts @@ -49,7 +49,7 @@ export const filterAlertGroupsTableByIntegrationAndGoToDetailPage = async ( const selectElement = await selectDropdownValue({ page, selectType: 'grafanaSelect', - placeholderText: 'Search or filter results...', + placeholderText: 'Filter results...', value: 'Integration', }); await selectElement.type(integrationName); diff --git a/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx b/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx index 416bbcfd58..61a83f9f56 100644 --- a/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx +++ b/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx @@ -173,7 +173,7 @@ class _RemoteFilters extends Component { { ); }; + renderFilterBlock = (filterOption: FilterOption) => { + const { theme, extraInformation } = this.props; + const showInputLabel = this.getExtraInformationField(filterOption, 'showInputLabel'); + const isInputClearable = this.getExtraInformationField(filterOption, 'isClearable'); + + const styles = getStyles(theme); + + const filterElement = ( +
+ + + {filterOption.display_name || capitalCase(filterOption.name)} + {filterOption.description && ( + + + + + + )} + + + + {this.renderFilterOption(filterOption)} + + +
+ ); + + if (extraInformation?.[filterOption.name]?.portal?.current) { + return ReactDOM.createPortal(filterElement, extraInformation[filterOption.name].portal.current); + } + + return filterElement; + }; + + getExtraInformationField = (filterOption: FilterOption, key: keyof FilterExtraInformationValues) => { + return filterExtraInformation(this.props.extraInformation?.[filterOption.name])?.[key]; + }; + + extractDefaultValuesFromExtraInformation = (): { [key: string]: FilterExtraInformationValues } => { + const { extraInformation } = this.props; + + return extraInformation + ? Object.keys(extraInformation).reduce((acc, key) => { + if (extraInformation[key].value) { + acc[key] = extraInformation[key].value; + } + + return acc; + }, {}) + : {}; + }; + handleSearch = (query: string) => { const { filters } = this.state; @@ -324,12 +384,10 @@ class _RemoteFilters extends Component { const value = getValueForDateRangeFilterType(values[filter.name]); return ( - ); @@ -465,7 +523,6 @@ const getStyles = (theme: GrafanaTheme2) => { filters: css` display: flex; gap: 10px; - padding: 10px; border: 1px solid ${theme.colors.border.weak} border-radius: 2px; flex-wrap: wrap; diff --git a/grafana-plugin/src/containers/RemoteFilters/TimeRangePickerWrapper.tsx b/grafana-plugin/src/containers/RemoteFilters/TimeRangePickerWrapper.tsx new file mode 100644 index 0000000000..974b916f19 --- /dev/null +++ b/grafana-plugin/src/containers/RemoteFilters/TimeRangePickerWrapper.tsx @@ -0,0 +1,122 @@ +import React, { useState } from 'react'; + +import { dateMath, DateTime, TimeRange, toUtc } from '@grafana/data'; +import { TimeZone } from '@grafana/schema'; +import { TimeRangePicker } from '@grafana/ui'; +import { noop } from 'lodash'; + +interface TimeRangePickerProps { + value: TimeRange; + timeZone: string; + + onChange: (timeRange: TimeRange) => void; +} + +export function TimeRangePickerWrapper(props: TimeRangePickerProps) { + const { onChange } = props; + + const [value, setValue] = useState(props.value); + const [timezone, setTimezone] = useState(props.timeZone); + + return ( + + ); + + function onPickerChange(timeRange: TimeRange) { + setValue(timeRange); + onChange(timeRange); + } + + function onZoom() { + const zoomedTimeRange = getZoomedTimeRange(value, 2); + onPickerChange(zoomedTimeRange); + } + + function onMoveBackward() { + onPickerChange(getShiftedTimeRange(TimeRangeDirection.Backward, value, Date.now())); + } + + function onMoveForward() { + onPickerChange(getShiftedTimeRange(TimeRangeDirection.Forward, value, Date.now())); + } + + function onTimezoneChange(timeZone: TimeZone) { + setTimezone(timeZone); + onPickerChange(evaluateTimeRange(value.from, value.to, timeZone)); + } +} + +function evaluateTimeRange( + from: string | DateTime, + to: string | DateTime, + timeZone: TimeZone, + fiscalYearStartMonth?: number, + delay?: string +): TimeRange { + const hasDelay = delay && to === 'now'; + + return { + from: dateMath.parse(from, false, timeZone, fiscalYearStartMonth)!, + to: dateMath.parse(hasDelay ? 'now-' + delay : to, true, timeZone, fiscalYearStartMonth)!, + raw: { + from: from, + to: to, + }, + }; +} + +function getZoomedTimeRange(timeRange: TimeRange, factor: number): TimeRange { + const timespan = timeRange.to.valueOf() - timeRange.from.valueOf(); + const center = timeRange.to.valueOf() - timespan / 2; + // If the timepsan is 0, zooming out would do nothing, so we force a zoom out to 30s + const newTimespan = timespan === 0 ? 30000 : timespan * factor; + + const to = center + newTimespan / 2; + const from = center - newTimespan / 2; + + return { from: toUtc(from), to: toUtc(to), raw: { from: toUtc(from), to: toUtc(to) } }; +} + +enum TimeRangeDirection { + Backward, + Forward, +} + +function getShiftedTimeRange(dir: TimeRangeDirection, timeRange: TimeRange, upperLimit: number): TimeRange { + const oldTo = timeRange.to.valueOf(); + const oldFrom = timeRange.from.valueOf(); + const halfSpan = (oldTo - oldFrom) / 2; + + let fromRaw: number; + let toRaw: number; + if (dir === TimeRangeDirection.Backward) { + fromRaw = oldFrom - halfSpan; + toRaw = oldTo - halfSpan; + } else { + fromRaw = oldFrom + halfSpan; + toRaw = oldTo + halfSpan; + + if (toRaw > upperLimit && oldTo < upperLimit) { + toRaw = upperLimit; + fromRaw = oldFrom; + } + } + + const from = toUtc(fromRaw); + const to = toUtc(toRaw); + return { + from, + to, + raw: { from, to }, + }; +} diff --git a/grafana-plugin/src/models/alertgroup/alertgroup.ts b/grafana-plugin/src/models/alertgroup/alertgroup.ts index ca4c11c52c..bc22ed4bd7 100644 --- a/grafana-plugin/src/models/alertgroup/alertgroup.ts +++ b/grafana-plugin/src/models/alertgroup/alertgroup.ts @@ -172,6 +172,7 @@ export class AlertGroupStore { this.liveUpdatesPaused = value; } + @WithGlobalNotification({ failure: PROCESSING_REQUEST_ERROR }) @AutoLoadingState(ActionKey.UPDATE_FILTERS_AND_FETCH_INCIDENTS) async updateIncidentFiltersAndRefetchIncidentsAndStats(params: any, keepCursor = false) { if (!keepCursor) { diff --git a/grafana-plugin/src/models/filters/filters.types.ts b/grafana-plugin/src/models/filters/filters.types.ts index ae7cd85218..fcd2edf035 100644 --- a/grafana-plugin/src/models/filters/filters.types.ts +++ b/grafana-plugin/src/models/filters/filters.types.ts @@ -1,9 +1,22 @@ +import React from 'react'; + import { SelectOption } from 'state/types'; export interface FiltersValues { [key: string]: any; } +export interface FilterExtraInformationValues { + isClearable?: boolean; + value?: any; + portal?: React.RefObject; + showInputLabel?: boolean; +} + +export interface FilterExtraInformation { + [key: string]: FilterExtraInformationValues; +} + export interface FilterOption { name: string; type: 'search' | 'options' | 'boolean' | 'daterange' | 'team_select'; diff --git a/grafana-plugin/src/pages/incidents/Incidents.tsx b/grafana-plugin/src/pages/incidents/Incidents.tsx index bcefaf3ffa..4b873b3c65 100644 --- a/grafana-plugin/src/pages/incidents/Incidents.tsx +++ b/grafana-plugin/src/pages/incidents/Incidents.tsx @@ -1,14 +1,14 @@ import React, { SyntheticEvent } from 'react'; import { css, cx } from '@emotion/css'; -import { GrafanaTheme2, SelectableValue } from '@grafana/data'; +import { GrafanaTheme2, durationToMilliseconds, parseDuration, SelectableValue } from '@grafana/data'; import { LabelTag } from '@grafana/labels'; import { Button, HorizontalGroup, Icon, - LoadingPlaceholder, RadioButtonGroup, + RefreshPicker, Tooltip, VerticalGroup, withTheme2, @@ -79,16 +79,18 @@ interface IncidentsPageState { isSelectorColumnMenuOpen: boolean; isHorizontalScrolling: boolean; isFirstIncidentsFetchDone: boolean; + refreshInterval: string; } -const POLLING_NUM_SECONDS = 10; - const PAGINATION_OPTIONS = [ { label: '25', value: 25 }, { label: '50', value: 50 }, { label: '100', value: 100 }, ]; +const REFRESH_OPTIONS = ['5s', '10s', '15s', '30s', '1m', '5m']; +const REFRESH_DEFAULT_VALUE = '10s'; + const TABLE_SCROLL_OPTIONS: SelectableValue[] = [ { value: false, @@ -124,10 +126,12 @@ class _IncidentsPage extends React.Component(); + this.filtersPortalRef = React.createRef(); this.state = { selectedIncidentIds: [], showAddAlertGroupForm: false, + refreshInterval: REFRESH_DEFAULT_VALUE, pagination: { start, end: start + pageSize, @@ -138,6 +142,7 @@ class _IncidentsPage extends React.Component; private rootElRef: React.RefObject; private pollingIntervalId: ReturnType = undefined; @@ -160,25 +165,46 @@ class _IncidentsPage extends React.Component
Alert Groups - - - + +
+
+ + + + +
{this.renderIncidentFilters()} @@ -322,32 +348,54 @@ class _IncidentsPage extends React.Component { return this.renderCards(...args, store, theme); }} grafanaTeamStore={store.grafanaTeamStore} - defaultFilters={{ - team: [], - status: [IncidentStatus.Firing, IncidentStatus.Acknowledged], - mine: false, - started_at: 'now-30d_now', - }} />
); } + onRefresh = async () => { + this.clearPollingInterval(); + await this.props.store.alertGroupStore.fetchIncidentsAndStats(true); + this.setPollingInterval(); + }; + + onIntervalRefreshChange = (value: string) => { + this.clearPollingInterval(); + this.setState({ refreshInterval: value }, () => value && this.setPollingInterval()); + }; + handleOnClickEscalateTo = () => { this.setState({ showAddAlertGroupForm: true }); }; handleFiltersChange = async (filters: IncidentsFiltersType, isOnMount: boolean) => { - const { - store: { alertGroupStore }, - } = this.props; - + const { alertGroupStore } = this.props.store; const { start } = this.state.pagination; + // Clear polling whenever filters change + this.clearPollingInterval(); + this.setState({ filters, selectedIncidentIds: [], @@ -357,7 +405,12 @@ class _IncidentsPage extends React.Component 0; - const isLoading = LoaderHelper.isLoading(store.loaderStore, [ - ActionKey.FETCH_INCIDENTS, - ActionKey.FETCH_INCIDENTS_POLLING, - ActionKey.FETCH_INCIDENTS_AND_STATS, - ActionKey.INCIDENTS_BULK_UPDATE, - ]); - const styles = getStyles(theme); + const hasSelected = selectedIncidentIds.length > 0; const isBulkUpdate = LoaderHelper.isLoading(store.loaderStore, ActionKey.INCIDENTS_BULK_UPDATE); return ( @@ -493,7 +539,7 @@ class _IncidentsPage extends React.Component )} - + {hasSelected ? `${selectedIncidentIds.length} Alert Group${selectedIncidentIds.length > 1 ? 's' : ''} selected` : 'No Alert Groups selected'} @@ -501,10 +547,6 @@ class _IncidentsPage extends React.Component
- - - - { + if (!this.state.refreshInterval) { + return; + } + + const pollingNum = durationToMilliseconds(parseDuration(this.state.refreshInterval)); + this.pollingIntervalId = setTimeout(async () => { const isBrowserWindowInactive = document.hidden; const { liveUpdatesPaused } = this.props.store.alertGroupStore; @@ -984,7 +1032,7 @@ class _IncidentsPage extends React.Component { width: 400px; `, + alertsSelected: css` + white-space: nowrap; + `, + + rightSideFilters: css` + display: flex; + gap: 8px; + `, + bau: css` ${[1, 2, 3].map( (num) => ` diff --git a/grafana-plugin/src/types.ts b/grafana-plugin/src/types.ts index 7bf6c041c0..576c1cac84 100644 --- a/grafana-plugin/src/types.ts +++ b/grafana-plugin/src/types.ts @@ -1,4 +1,4 @@ -import { AppRootProps as BaseAppRootProps, AppPluginMeta, CurrentUserDTO, PluginConfigPageProps } from '@grafana/data'; +import { AppRootProps as BaseAppRootProps, AppPluginMeta, PluginConfigPageProps, CurrentUserDTO } from '@grafana/data'; export type OnCallPluginMetaJSONData = { stackId: number; From a53f20735522d5457121660c1ea7cf613d62f55c Mon Sep 17 00:00:00 2001 From: Vadim Stepanov Date: Tue, 23 Jul 2024 17:12:00 +0100 Subject: [PATCH 18/24] Alert group search feature flags (#4579) # What this PR does Adds two feature flags to experiment with the alert group search feature (on top of the existing `FEATURE_ALERT_GROUP_SEARCH_ENABLED` flag): * `FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS` - search window size in days * `ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX` - enable a workaround that effectively forces the `alert_group_list_index` index to be used for alert group list requests ## Which issue(s) this PR closes Related to https://github.com/grafana/oncall-private/issues/2679 ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- engine/apps/api/tests/test_alert_group.py | 57 +++++++++++++++++++ engine/apps/api/views/alert_group.py | 49 +++++++++++++--- engine/common/api_helpers/paginators.py | 3 +- engine/common/utils.py | 2 +- engine/settings/base.py | 5 ++ .../RemoteFilters/RemoteFilters.tsx | 4 +- 6 files changed, 109 insertions(+), 11 deletions(-) diff --git a/engine/apps/api/tests/test_alert_group.py b/engine/apps/api/tests/test_alert_group.py index fa4417ee99..85a9af0988 100644 --- a/engine/apps/api/tests/test_alert_group.py +++ b/engine/apps/api/tests/test_alert_group.py @@ -24,6 +24,7 @@ from apps.api.serializers.alert import AlertFieldsCacheSerializerMixin from apps.api.serializers.alert_group import AlertGroupFieldsCacheSerializerMixin from apps.base.models import UserNotificationPolicyLogRecord +from common.api_helpers.filters import DateRangeFilterMixin alert_raw_request_data = { "evalMatches": [ @@ -975,6 +976,62 @@ def test_get_filter_labels( assert response.json()["results"][0]["pk"] == alert_groups[0].public_primary_key +@pytest.mark.django_db +def test_get_title_search( + settings, + make_organization_and_user_with_plugin_token, + make_alert_receive_channel, + make_channel_filter, + make_alert_group, + make_alert, + make_user_auth_headers, +): + settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED = True + settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS = 30 + organization, user, token = make_organization_and_user_with_plugin_token() + + alert_receive_channel = make_alert_receive_channel(organization) + channel_filter = make_channel_filter(alert_receive_channel, is_default=True) + + alert_groups = [] + for i in range(3): + alert_group = make_alert_group( + alert_receive_channel, channel_filter=channel_filter, web_title_cache=f"testing {i+1}" + ) + # alert groups starting every months going back + alert_group.started_at = timezone.now() - datetime.timedelta(days=10 + 30 * i) + alert_group.save(update_fields=["started_at"]) + make_alert(alert_group=alert_group, raw_request_data=alert_raw_request_data) + alert_groups.append(alert_group) + + client = APIClient() + url = reverse("api-internal:alertgroup-list") + + # check that the search returns alert groups started in the last 30 days + response = client.get( + url + "?search=testing", + format="json", + **make_user_auth_headers(user, token), + ) + assert response.status_code == status.HTTP_200_OK + assert len(response.json()["results"]) == 1 + assert response.json()["results"][0]["pk"] == alert_groups[0].public_primary_key + + # check that the search returns alert groups started in the last 30 days of specified date range + response = client.get( + url + + "?search=testing&started_at={}_{}".format( + (timezone.now() - datetime.timedelta(days=500)).strftime(DateRangeFilterMixin.DATE_FORMAT), + (timezone.now() - datetime.timedelta(days=30)).strftime(DateRangeFilterMixin.DATE_FORMAT), + ), + format="json", + **make_user_auth_headers(user, token), + ) + assert response.status_code == status.HTTP_200_OK + assert len(response.json()["results"]) == 1 + assert response.json()["results"][0]["pk"] == alert_groups[1].public_primary_key + + @pytest.mark.django_db @pytest.mark.parametrize( "role,expected_status", diff --git a/engine/apps/api/views/alert_group.py b/engine/apps/api/views/alert_group.py index 95afdba17e..5fd6f511fd 100644 --- a/engine/apps/api/views/alert_group.py +++ b/engine/apps/api/views/alert_group.py @@ -1,8 +1,10 @@ import typing +from datetime import timedelta from django.conf import settings from django.core.exceptions import ObjectDoesNotExist from django.db.models import Count, Max, Q +from django.utils import timezone from django_filters import rest_framework as filters from drf_spectacular.utils import extend_schema, inline_serializer from rest_framework import mixins, serializers, status, viewsets @@ -228,6 +230,30 @@ def retrieve(self, request, *args, **kwargs): return Response(data={"error_code": "wrong_team"}, status=status.HTTP_403_FORBIDDEN) +class AlertGroupSearchFilter(SearchFilter): + def filter_queryset(self, request, queryset, view): + search_fields = self.get_search_fields(view, request) + search_terms = self.get_search_terms(request) + if not search_fields or not search_terms: + return queryset + + if settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS: + started_at = request.query_params.get("started_at") + end = DateRangeFilterMixin.parse_custom_datetime_range(started_at)[1] if started_at else timezone.now() + queryset = queryset.filter( + started_at__gte=end - timedelta(days=settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS) + ) + + return super().filter_queryset(request, queryset, view) + + def get_search_fields(self, view, request): + return ( + ["=public_primary_key", "=inside_organization_number", "web_title_cache"] + if settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED + else [] + ) + + class AlertGroupView( PreviewTemplateMixin, AlertGroupTeamFilteringMixin, @@ -275,12 +301,7 @@ class AlertGroupView( pagination_class = AlertGroupCursorPaginator - filter_backends = [SearchFilter, filters.DjangoFilterBackend] - search_fields = ( - ["=public_primary_key", "=inside_organization_number", "web_title_cache"] - if settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED - else [] - ) + filter_backends = [AlertGroupSearchFilter, filters.DjangoFilterBackend] filterset_class = AlertGroupFilter def get_serializer_class(self): @@ -311,6 +332,13 @@ def get_queryset(self, ignore_filtering_by_available_teams=False): alert_receive_channels_ids = list(alert_receive_channels_qs.values_list("id", flat=True)) queryset = AlertGroup.objects.filter(channel__in=alert_receive_channels_ids) + if settings.ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX: + # workaround related to MySQL "ORDER BY LIMIT Query Optimizer Bug" + # read more: https://hackmysql.com/infamous-order-by-limit-query-optimizer-bug/ + # this achieves the same effect as "FORCE INDEX (alert_group_list_index)" when + # paired with "ORDER BY started_at_optimized DESC" (ordering is performed in AlertGroupCursorPaginator). + queryset = queryset.extra({"started_at_optimized": "alerts_alertgroup.started_at + 0"}) + # Filter by labels. Since alert group labels are "static" filter by names, not IDs. label_query = self.request.query_params.getlist("label", []) kv_pairs = parse_label_query(label_query) @@ -811,7 +839,14 @@ def filters(self, request): ] if settings.FEATURE_ALERT_GROUP_SEARCH_ENABLED: - filter_options = [{"name": "search", "type": "search"}] + filter_options + description = "Search by alert group ID, number or title." + if settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS: + description += ( + f" The search is limited to alert groups started in the last " + f"{settings.FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS} days of the specified date range." + ) + + filter_options = [{"name": "search", "type": "search", "description": description}] + filter_options if is_labels_feature_enabled(self.request.auth.organization): filter_options.append( diff --git a/engine/common/api_helpers/paginators.py b/engine/common/api_helpers/paginators.py index 3020b9d057..3692fde7aa 100644 --- a/engine/common/api_helpers/paginators.py +++ b/engine/common/api_helpers/paginators.py @@ -1,5 +1,6 @@ import typing +from django.conf import settings from rest_framework.pagination import BasePagination, CursorPagination, PageNumberPagination from rest_framework.response import Response @@ -85,4 +86,4 @@ class FifteenPageSizePaginator(PathPrefixedPagePagination): class AlertGroupCursorPaginator(PathPrefixedCursorPagination): page_size = 25 - ordering = "-started_at" # ordering by "-started_at", so it uses the right index (see AlertGroup.Meta.indexes) + ordering = "-started_at_optimized" if settings.ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX else "-started_at" diff --git a/engine/common/utils.py b/engine/common/utils.py index b902fe219e..395a1306bd 100644 --- a/engine/common/utils.py +++ b/engine/common/utils.py @@ -129,7 +129,7 @@ def getenv_boolean(variable_name: str, default: bool) -> bool: return value.lower() in ("true", "1") -def getenv_integer(variable_name: str, default: int) -> int: +def getenv_integer(variable_name: str, default: int | None) -> int | None: value = os.environ.get(variable_name) if value is None: return default diff --git a/engine/settings/base.py b/engine/settings/base.py index fea5c687a4..f5b793dd75 100644 --- a/engine/settings/base.py +++ b/engine/settings/base.py @@ -72,6 +72,7 @@ # Enable labels feature for organizations from the list. Use OnCall organization ID, for this flag FEATURE_LABELS_ENABLED_PER_ORG = getenv_list("FEATURE_LABELS_ENABLED_PER_ORG", default=list()) FEATURE_ALERT_GROUP_SEARCH_ENABLED = getenv_boolean("FEATURE_ALERT_GROUP_SEARCH_ENABLED", default=False) +FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS = getenv_integer("FEATURE_ALERT_GROUP_SEARCH_CUTOFF_DAYS", default=None) FEATURE_NOTIFICATION_BUNDLE_ENABLED = getenv_boolean("FEATURE_NOTIFICATION_BUNDLE_ENABLED", default=False) TWILIO_API_KEY_SID = os.environ.get("TWILIO_API_KEY_SID") @@ -188,6 +189,10 @@ class DatabaseTypes: pymysql.install_as_MySQLdb() +ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX = DATABASE_TYPE == DatabaseTypes.MYSQL and getenv_boolean( + "ALERT_GROUPS_DISABLE_PREFER_ORDERING_INDEX", default=False +) + # Redis REDIS_USERNAME = os.getenv("REDIS_USERNAME", "") REDIS_PASSWORD = os.getenv("REDIS_PASSWORD") diff --git a/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx b/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx index 309f2a79b3..51d5c11198 100644 --- a/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx +++ b/grafana-plugin/src/containers/RemoteFilters/RemoteFilters.tsx @@ -248,13 +248,13 @@ class _RemoteFilters extends Component { }; handleSearch = (query: string) => { - const { filters } = this.state; + const { filters, filterOptions } = this.state; const searchFilter = filters.find((filter: FilterOption) => filter.name === 'search'); const newFilters = filters; if (!searchFilter) { - newFilters.push({ name: 'search', type: 'search' }); + newFilters.push(filterOptions.find((filter: FilterOption) => filter.name === 'search')); } else { this.searchRef.current.focus(); } From ed57c30c24304d12ab3b6283f33c377ba8d01af1 Mon Sep 17 00:00:00 2001 From: Michael Derynck Date: Tue, 23 Jul 2024 14:43:28 -0600 Subject: [PATCH 19/24] Prepare for changes to OnCall plugin initialization (#4715) # What this PR does Upcoming changes to OnCall plugin initialization will be expecting `grafana_token` from all requests coming from the OnCall plugin. It is not currently used it is being added so the plugin can tolerate the update without errors. ## Checklist - [ ] Unit, integration, and e2e (if applicable) tests updated - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [ ] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- grafana-plugin/src/plugin.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grafana-plugin/src/plugin.json b/grafana-plugin/src/plugin.json index eafdaf1ba0..58824a43dc 100644 --- a/grafana-plugin/src/plugin.json +++ b/grafana-plugin/src/plugin.json @@ -143,7 +143,7 @@ "headers": [ { "name": "X-Instance-Context", - "content": "{ \"stack_id\": \"{{ printf \"%.0f\" .JsonData.stackId }}\", \"org_id\": \"{{ printf \"%.0f\" .JsonData.orgId }}\" }" + "content": "{ \"stack_id\": \"{{ printf \"%.0f\" .JsonData.stackId }}\", \"org_id\": \"{{ printf \"%.0f\" .JsonData.orgId }}\", \"grafana_token\": \"{{ .SecureJsonData.grafanaToken }}\" }" }, { "name": "Authorization", From 53556bd98b332e7096af9b2b3e3c32900c4e014a Mon Sep 17 00:00:00 2001 From: Dominik Broj Date: Wed, 24 Jul 2024 07:14:02 +0200 Subject: [PATCH 20/24] show chatops proxy error (#4719) # What this PR does show chatops proxy error if present in query params ## Which issue(s) this PR closes Related to https://github.com/grafana/oncall-gateway/issues/255 ## Checklist - [ ] Unit, integration, and e2e (if applicable) tests updated - [x] Documentation added (or `pr:no public docs` PR label added if not required) - [x] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- .../pages/settings/tabs/ChatOps/ChatOps.helpers.ts | 11 +++++++++++ .../src/pages/settings/tabs/ChatOps/ChatOps.tsx | 3 +++ 2 files changed, 14 insertions(+) create mode 100644 grafana-plugin/src/pages/settings/tabs/ChatOps/ChatOps.helpers.ts diff --git a/grafana-plugin/src/pages/settings/tabs/ChatOps/ChatOps.helpers.ts b/grafana-plugin/src/pages/settings/tabs/ChatOps/ChatOps.helpers.ts new file mode 100644 index 0000000000..4313ec5f7a --- /dev/null +++ b/grafana-plugin/src/pages/settings/tabs/ChatOps/ChatOps.helpers.ts @@ -0,0 +1,11 @@ +import { LocationHelper } from 'utils/LocationHelper'; +import { openErrorNotification } from 'utils/utils'; + +export const handleChatOpsQueryParamError = () => { + const error = LocationHelper.getQueryParam('error'); + + if (error) { + openErrorNotification(error); + LocationHelper.update({ error: undefined }, 'partial'); + } +}; diff --git a/grafana-plugin/src/pages/settings/tabs/ChatOps/ChatOps.tsx b/grafana-plugin/src/pages/settings/tabs/ChatOps/ChatOps.tsx index b43e751031..1441017b65 100644 --- a/grafana-plugin/src/pages/settings/tabs/ChatOps/ChatOps.tsx +++ b/grafana-plugin/src/pages/settings/tabs/ChatOps/ChatOps.tsx @@ -15,6 +15,8 @@ import { useStore } from 'state/useStore'; import { withMobXProviderContext } from 'state/withStore'; import { LocationHelper } from 'utils/LocationHelper'; +import { handleChatOpsQueryParamError } from './ChatOps.helpers'; + import styles from './ChatOps.module.css'; const cx = cn.bind(styles); @@ -38,6 +40,7 @@ export class _ChatOpsPage extends React.Component { componentDidMount() { const tab = LocationHelper.getQueryParam('chatOpsTab'); + handleChatOpsQueryParamError(); this.handleChatopsTabChange(tab || ChatOpsTab.Slack); } From 4877b9d9273b8bc3ca2ffa24f1228e75460ba0d4 Mon Sep 17 00:00:00 2001 From: Rares Mardare Date: Wed, 24 Jul 2024 10:48:39 +0300 Subject: [PATCH 21/24] Set unique key on each rendered route (#4721) # What this PR does - Set the unique key on `Expanded` and `Collapsed` containers, otherwise React has problems figuring out state for each of them when you append a new route - Fixed NPE found by Faro - Tweaked warnings display of `Routing template not set` and `Routing labels not set` ## Which issue(s) this PR closes Closes https://github.com/grafana/oncall/issues/4720 --- .../IntegrationContainers/RouteHeading.tsx | 34 ++++++++++++------- .../RouteLabelsDisplay/RouteLabelsDisplay.tsx | 2 +- .../src/pages/integration/Integration.tsx | 2 ++ 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/grafana-plugin/src/containers/IntegrationContainers/RouteHeading.tsx b/grafana-plugin/src/containers/IntegrationContainers/RouteHeading.tsx index f55317cee5..38fd9d06e1 100644 --- a/grafana-plugin/src/containers/IntegrationContainers/RouteHeading.tsx +++ b/grafana-plugin/src/containers/IntegrationContainers/RouteHeading.tsx @@ -55,32 +55,42 @@ const RouteHeadingDisplay: React.FC<{ channelFilter: ChannelFilter }> = ({ chann const styles = useStyles2(getStyles); const hasLabels = store.hasFeature(AppFeature.Labels); + const renderWarning = (text: string) => ( + <> +
+ +
+ {text} + + ); + if (channelFilter?.filtering_term || channelFilter?.filtering_labels) { return ( <> - - {channelFilter.filtering_term} - + {channelFilter.filtering_term && ( + + {channelFilter.filtering_term} + + )} + + {/* Show missing template warning */} + {!channelFilter.filtering_term && renderWarning('Routing template not set')} - + {channelFilter.filtering_labels?.length > 0 && } + + {/* Show missing labels warning */} + {!channelFilter.filtering_labels?.length && renderWarning('Routing labels not set')} ); } - return ( - <> -
- -
- Routing template not set - - ); + return renderWarning('Routing template not set'); }; const getStyles = (theme: GrafanaTheme2) => { diff --git a/grafana-plugin/src/containers/RouteLabelsDisplay/RouteLabelsDisplay.tsx b/grafana-plugin/src/containers/RouteLabelsDisplay/RouteLabelsDisplay.tsx index cd3c64fcd5..18692e91c7 100644 --- a/grafana-plugin/src/containers/RouteLabelsDisplay/RouteLabelsDisplay.tsx +++ b/grafana-plugin/src/containers/RouteLabelsDisplay/RouteLabelsDisplay.tsx @@ -82,6 +82,6 @@ export const RouteLabelsDisplay: React.FC = ({ labels, }; const getIsAddBtnDisabled = (labels: Array = []) => { - const lastItem = labels.at(-1); + const lastItem = labels?.at(-1); return lastItem && (lastItem.key?.id === undefined || lastItem.value?.id === undefined); }; diff --git a/grafana-plugin/src/pages/integration/Integration.tsx b/grafana-plugin/src/pages/integration/Integration.tsx index 6f2ec9b696..8d78a98bc0 100644 --- a/grafana-plugin/src/pages/integration/Integration.tsx +++ b/grafana-plugin/src/pages/integration/Integration.tsx @@ -654,6 +654,7 @@ class _IntegrationPage extends React.Component ( ( Date: Wed, 24 Jul 2024 17:53:06 +0800 Subject: [PATCH 22/24] Introduce slash command matcher (#4717) With the Unified Slack app we now have two ways of calling commands. 1. Legacy one when command invoked directly: /escalate 2. Unified one: /grafana escalate On top of that we have different slach commands for each environment: /escalate-local, /escalate-dev, etc. It was leading to a weird command to escalate via Unified App in dev u need to type: /grafana-dev escalate-develop. To support both, I introduced a matcher function for SlashCommandRoutes. It allows to simplify handling of such cases without complex workarounds in an EventAPIEndpoint. # What this PR does ## Which issue(s) this PR closes Related to [issue link here] ## Checklist - [x] Unit, integration, and e2e (if applicable) tests updated - [ ] Documentation added (or `pr:no public docs` PR label added if not required) - [ ] Added the relevant release notes label (see labels prefixed w/ `release:`). These labels dictate how your PR will show up in the autogenerated release notes. --- engine/apps/slack/scenarios/paging.py | 11 +++++++++-- engine/apps/slack/slash_command.py | 7 ++++++- engine/apps/slack/tests/test_slash_command.py | 1 + engine/apps/slack/types/scenario_routes.py | 6 +++++- engine/apps/slack/views.py | 2 +- 5 files changed, 22 insertions(+), 5 deletions(-) diff --git a/engine/apps/slack/scenarios/paging.py b/engine/apps/slack/scenarios/paging.py index ee38dc3664..72219b23ce 100644 --- a/engine/apps/slack/scenarios/paging.py +++ b/engine/apps/slack/scenarios/paging.py @@ -15,6 +15,7 @@ from apps.slack.constants import DIVIDER, PRIVATE_METADATA_MAX_LENGTH from apps.slack.errors import SlackAPIChannelNotFoundError from apps.slack.scenarios import scenario_step +from apps.slack.slash_command import SlashCommand from apps.slack.types import ( Block, BlockActionType, @@ -115,7 +116,13 @@ def get_current_items( class StartDirectPaging(scenario_step.ScenarioStep): """Handle slash command invocation and show initial dialog.""" - command_name = [settings.SLACK_DIRECT_PAGING_SLASH_COMMAND] + @staticmethod + def matcher(slash_command: SlashCommand) -> bool: + # Check if command is /escalate. It's a legacy command we keep for smooth transition. + is_legacy_command = slash_command.command == settings.SLACK_DIRECT_PAGING_SLASH_COMMAND + # Check if command is /grafana escalate. It's a new command from unified app. + is_unified_app_command = slash_command.is_grafana_command and slash_command.subcommand == "escalate" + return is_legacy_command or is_unified_app_command def process_scenario( self, @@ -995,8 +1002,8 @@ def _generate_input_id_prefix() -> str: }, { "payload_type": PayloadType.SLASH_COMMAND, - "command_name": StartDirectPaging.command_name, "step": StartDirectPaging, + "matcher": StartDirectPaging.matcher, }, { "payload_type": PayloadType.VIEW_SUBMISSION, diff --git a/engine/apps/slack/slash_command.py b/engine/apps/slack/slash_command.py index 89131d5bae..1bb349da25 100644 --- a/engine/apps/slack/slash_command.py +++ b/engine/apps/slack/slash_command.py @@ -22,7 +22,8 @@ def __init__(self, command, args): @property def subcommand(self): """ - Return first arg as subcommand + Return first arg as action subcommand: part of command which defines action + Example: /grafana escalate -> escalate """ return self.args[0] if len(self.args) > 0 else None @@ -34,3 +35,7 @@ def parse(payload: SlashCommandPayload): command = payload["command"].lstrip("/") args = payload["text"].split() return SlashCommand(command, args) + + @property + def is_grafana_command(self): + return self.command in ["grafana", "grafana-dev", "grafana-ops", "grafana-prod"] diff --git a/engine/apps/slack/tests/test_slash_command.py b/engine/apps/slack/tests/test_slash_command.py index a821a8a44d..ffe1bb6902 100644 --- a/engine/apps/slack/tests/test_slash_command.py +++ b/engine/apps/slack/tests/test_slash_command.py @@ -14,6 +14,7 @@ def test_parse(): assert slash_command.command == "grafana" assert slash_command.args == ["escalate"] assert slash_command.subcommand == "escalate" + assert slash_command.is_grafana_command def test_parse_command_without_subcommand(): diff --git a/engine/apps/slack/types/scenario_routes.py b/engine/apps/slack/types/scenario_routes.py index 503a62efd5..6458ac5105 100644 --- a/engine/apps/slack/types/scenario_routes.py +++ b/engine/apps/slack/types/scenario_routes.py @@ -1,11 +1,15 @@ import typing +from apps.slack.slash_command import SlashCommand + from .common import EventType, PayloadType if typing.TYPE_CHECKING: from apps.slack.scenarios.scenario_step import ScenarioStep from apps.slack.types import BlockActionType, InteractiveMessageActionType +MatcherType = typing.Callable[[SlashCommand], bool] + class ScenarioRoute: class _Base(typing.TypedDict): @@ -32,7 +36,7 @@ class MessageActionScenarioRoute(_Base): class SlashCommandScenarioRoute(_Base): payload_type: typing.Literal[PayloadType.SLASH_COMMAND] - command_name: typing.List[str] + matcher: MatcherType class ViewSubmissionScenarioRoute(_Base): payload_type: typing.Literal[PayloadType.VIEW_SUBMISSION] diff --git a/engine/apps/slack/views.py b/engine/apps/slack/views.py index 37eb70dacf..1812927a48 100644 --- a/engine/apps/slack/views.py +++ b/engine/apps/slack/views.py @@ -361,7 +361,7 @@ def post(self, request): cmd = SlashCommand.parse(payload) # Check both command and subcommand for backward compatibility # So both /grafana escalate and /escalate will work. - if cmd.command in route["command_name"] or cmd.subcommand in route["command_name"]: + if route["matcher"](cmd): Step = route["step"] logger.info("Routing to {}".format(Step)) step = Step(slack_team_identity, organization, user) From 9da5b94455be75ff2a114d246adfecdd96f582f8 Mon Sep 17 00:00:00 2001 From: Innokentii Konstantinov Date: Wed, 24 Jul 2024 17:59:56 +0800 Subject: [PATCH 23/24] Fix app redirect (#4725) --- engine/apps/chatops_proxy/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/apps/chatops_proxy/utils.py b/engine/apps/chatops_proxy/utils.py index bf3bc7cdaf..1c242bf608 100644 --- a/engine/apps/chatops_proxy/utils.py +++ b/engine/apps/chatops_proxy/utils.py @@ -30,7 +30,7 @@ def get_installation_link_from_chatops_proxy(user) -> typing.Optional[str]: link, _ = client.get_slack_oauth_link( org.stack_id, user.user_id, - urljoin(org.web_link, "settings?tab=TeamsSettings&chatOpsTab=Slack"), + urljoin(org.web_link, "settings?tab=ChatOps&chatOpsTab=Slack"), APP_TYPE_ONCALL, ) return link From b27a158eb0b25881aa9a5cf985c93fcb2c4c4238 Mon Sep 17 00:00:00 2001 From: Vadim Stepanov Date: Wed, 24 Jul 2024 11:44:47 +0100 Subject: [PATCH 24/24] Fix `/escalate` predefined org (#4723) Related to https://github.com/grafana/oncall-gateway/issues/276 Co-authored-by: Innokentii Konstantinov --- engine/apps/slack/scenarios/paging.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/engine/apps/slack/scenarios/paging.py b/engine/apps/slack/scenarios/paging.py index 72219b23ce..7a2b2a3251 100644 --- a/engine/apps/slack/scenarios/paging.py +++ b/engine/apps/slack/scenarios/paging.py @@ -312,19 +312,20 @@ def process_scenario( if not user_is_oncall(selected_user): # display additional confirmation modal metadata = json.loads(payload["view"]["private_metadata"]) - private_metadata = make_private_metadata( - { - "state": payload["view"]["state"], - "input_id_prefix": metadata["input_id_prefix"], - "channel_id": metadata["channel_id"], - "submit_routing_uid": metadata["submit_routing_uid"], - DataKey.USERS: metadata[DataKey.USERS], - }, - selected_user.organization, - ) + private_metadata = { + "state": payload["view"]["state"], + "input_id_prefix": metadata["input_id_prefix"], + "channel_id": metadata["channel_id"], + "submit_routing_uid": metadata["submit_routing_uid"], + DataKey.USERS: metadata[DataKey.USERS], + } + # keep predefined organization in private metadata + if "organization_id" in metadata: + private_metadata["organization_id"] = metadata["organization_id"] view = _display_confirm_participant_invitation_view( - OnPagingConfirmUserChange.routing_uid(), private_metadata + OnPagingConfirmUserChange.routing_uid(), + make_private_metadata(private_metadata, selected_user.organization), ) self._slack_client.views_push(trigger_id=payload["trigger_id"], view=view) else: @@ -392,6 +393,10 @@ def process_scenario( "submit_routing_uid": metadata["submit_routing_uid"], DataKey.USERS: metadata[DataKey.USERS], } + # keep predefined organization in private metadata + if "organization_id" in metadata: + private_metadata["organization_id"] = metadata["organization_id"] + previous_view_payload = { "view": { "state": metadata["state"],