From d2ace795cb2eb46eeae4ee946fcbf204fb9caa72 Mon Sep 17 00:00:00 2001 From: Tony Xiao Date: Tue, 31 Dec 2024 10:31:41 -0500 Subject: [PATCH] chore(typing): Add typing to organization-events endpoint (#82656) This adds typing to the `organization-events` endpoint and specifically wanted to make sure the referrers set is typed. --- pyproject.toml | 1 - .../api/endpoints/organization_events.py | 112 +++++++++++------- src/sentry/search/events/types.py | 3 + src/sentry/snuba/errors.py | 47 ++++---- .../snuba/metrics_enhanced_performance.py | 46 +++---- src/sentry/snuba/metrics_performance.py | 39 +++--- src/sentry/snuba/referrer.py | 22 ++-- src/sentry/snuba/spans_eap.py | 3 +- src/sentry/snuba/types.py | 40 +++++++ 9 files changed, 196 insertions(+), 117 deletions(-) create mode 100644 src/sentry/snuba/types.py diff --git a/pyproject.toml b/pyproject.toml index 545bf6864d16f9..ff6fcc6a0c2c9f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -131,7 +131,6 @@ module = [ "sentry.api.endpoints.index", "sentry.api.endpoints.internal.mail", "sentry.api.endpoints.organization_details", - "sentry.api.endpoints.organization_events", "sentry.api.endpoints.organization_events_facets_performance", "sentry.api.endpoints.organization_events_meta", "sentry.api.endpoints.organization_events_spans_performance", diff --git a/src/sentry/api/endpoints/organization_events.py b/src/sentry/api/endpoints/organization_events.py index 0e7952cc8318de..80577c317c5ed3 100644 --- a/src/sentry/api/endpoints/organization_events.py +++ b/src/sentry/api/endpoints/organization_events.py @@ -35,6 +35,7 @@ ) from sentry.snuba.metrics.extraction import MetricSpecType from sentry.snuba.referrer import Referrer +from sentry.snuba.types import DatasetQuery from sentry.snuba.utils import dataset_split_decision_inferred_from_query, get_dataset from sentry.types.ratelimit import RateLimit, RateLimitCategory from sentry.utils.snuba import SnubaError @@ -55,7 +56,7 @@ class DiscoverDatasetSplitException(Exception): pass -ALLOWED_EVENTS_REFERRERS = { +ALLOWED_EVENTS_REFERRERS: set[str] = { Referrer.API_ORGANIZATION_EVENTS.value, Referrer.API_ORGANIZATION_EVENTS_V2.value, Referrer.API_DASHBOARDS_TABLEWIDGET.value, @@ -173,7 +174,6 @@ class DiscoverDatasetSplitException(Exception): Referrer.ISSUE_DETAILS_STREAMLINE_LIST.value, } -API_TOKEN_REFERRER = Referrer.API_AUTH_TOKEN_EVENTS.value LEGACY_RATE_LIMIT = dict(limit=30, window=1, concurrent_limit=15) # reduced limit will be the future default for all organizations not explicitly on increased limit @@ -276,8 +276,7 @@ class OrganizationEventsEndpoint(OrganizationEventsV2EndpointBase): enforce_rate_limit = True - def rate_limits(*args, **kwargs) -> dict[str, dict[RateLimitCategory, RateLimit]]: - return rate_limit_events(*args, **kwargs) + rate_limits = rate_limit_events def get_features(self, organization: Organization, request: Request) -> Mapping[str, bool]: feature_names = [ @@ -298,11 +297,13 @@ def get_features(self, organization: Organization, request: Request) -> Mapping[ actor=request.user, ) - all_features = ( - batch_features.get(f"organization:{organization.id}", {}) - if batch_features is not None - else {} - ) + all_features: dict[str, bool] = {} + + if batch_features is not None: + for feature_name, result in batch_features.get( + f"organization:{organization.id}", {} + ).items(): + all_features[feature_name] = bool(result) for feature_name in feature_names: if feature_name not in all_features: @@ -382,7 +383,7 @@ def get(self, request: Request, organization) -> Response: } ) except InvalidParams as err: - raise ParseError(err) + raise ParseError(detail=str(err)) batch_features = self.get_features(organization, request) @@ -421,7 +422,9 @@ def get(self, request: Request, organization) -> Response: # Force the referrer to "api.auth-token.events" for events requests authorized through a bearer token if request.auth: - referrer = API_TOKEN_REFERRER + referrer = Referrer.API_AUTH_TOKEN_EVENTS.value + elif referrer is None: + referrer = Referrer.API_ORGANIZATION_EVENTS.value elif referrer not in ALLOWED_EVENTS_REFERRERS: if referrer: with sentry_sdk.isolation_scope() as scope: @@ -436,11 +439,16 @@ def get(self, request: Request, organization) -> Response: use_rpc = request.GET.get("useRpc", "0") == "1" sentry_sdk.set_tag("performance.use_rpc", use_rpc) - def _data_fn(scoped_dataset, offset, limit, query) -> dict[str, Any]: + def _data_fn( + dataset_query: DatasetQuery, + offset: int, + limit: int, + query: str | None, + ): if use_rpc and dataset == spans_eap: return spans_rpc.run_table_query( params=snuba_params, - query_string=query, + query_string=query or "", selected_columns=self.get_field_list(organization, request), orderby=self.get_orderby(request), offset=offset, @@ -452,9 +460,9 @@ def _data_fn(scoped_dataset, offset, limit, query) -> dict[str, Any]: ), ) query_source = self.get_request_source(request) - return scoped_dataset.query( + return dataset_query( selected_columns=self.get_field_list(organization, request), - query=query, + query=query or "", snuba_params=snuba_params, equations=self.get_equation_list(organization, request), orderby=self.get_orderby(request), @@ -463,24 +471,30 @@ def _data_fn(scoped_dataset, offset, limit, query) -> dict[str, Any]: referrer=referrer, auto_fields=True, auto_aggregations=True, - use_aggregate_conditions=use_aggregate_conditions, allow_metric_aggregates=allow_metric_aggregates, + use_aggregate_conditions=use_aggregate_conditions, transform_alias_to_input_format=True, # Whether the flag is enabled or not, regardless of the referrer has_metrics=use_metrics, use_metrics_layer=batch_features.get("organizations:use-metrics-layer", False), on_demand_metrics_enabled=on_demand_metrics_enabled, on_demand_metrics_type=on_demand_metrics_type, - query_source=query_source, fallback_to_transactions=features.has( "organizations:performance-discover-dataset-selector", organization, actor=request.user, ), + query_source=query_source, ) @sentry_sdk.tracing.trace - def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_widget_id): + def _dashboards_data_fn( + scoped_dataset_query: DatasetQuery, + offset: int, + limit: int, + scoped_query: str | None, + dashboard_widget_id: str, + ): try: widget = DashboardWidget.objects.get(id=dashboard_widget_id) does_widget_have_split = widget.discover_widget_split is not None @@ -491,27 +505,29 @@ def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_w ) if does_widget_have_split and not has_override_feature: + dataset_query: DatasetQuery + # This is essentially cached behaviour and we skip the check if widget.discover_widget_split == DashboardWidgetTypes.ERROR_EVENTS: - split_dataset = errors + dataset_query = errors.query elif widget.discover_widget_split == DashboardWidgetTypes.TRANSACTION_LIKE: # We can't add event.type:transaction for now because of on-demand. - split_dataset = scoped_dataset + dataset_query = scoped_dataset_query else: - split_dataset = discover + dataset_query = discover.query - return _data_fn(split_dataset, offset, limit, scoped_query) + return _data_fn(dataset_query, offset, limit, scoped_query) with handle_query_errors(): try: - error_results = _data_fn(errors, offset, limit, scoped_query) + error_results = _data_fn(errors.query, offset, limit, scoped_query) # Widget has not split the discover dataset yet, so we need to check if there are errors etc. has_errors = len(error_results["data"]) > 0 except SnubaError: has_errors = False error_results = None - original_results = _data_fn(scoped_dataset, offset, limit, scoped_query) + original_results = _data_fn(scoped_dataset_query, offset, limit, scoped_query) if original_results.get("data") is not None: dataset_meta = original_results.get("meta", {}) else: @@ -528,7 +544,9 @@ def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_w if has_errors and has_other_data and not using_metrics: # In the case that the original request was not using the metrics dataset, we cannot be certain that other data is solely transactions. sentry_sdk.set_tag("third_split_query", True) - transaction_results = _data_fn(transactions, offset, limit, scoped_query) + transaction_results = _data_fn( + transactions.query, offset, limit, scoped_query + ) has_transactions = len(transaction_results["data"]) > 0 decision = self.save_split_decision( @@ -536,7 +554,7 @@ def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_w ) if decision == DashboardWidgetTypes.DISCOVER: - return _data_fn(discover, offset, limit, scoped_query) + return _data_fn(discover.query, offset, limit, scoped_query) elif decision == DashboardWidgetTypes.TRANSACTION_LIKE: original_results["meta"]["discoverSplitDecision"] = ( DashboardWidgetTypes.get_type_name( @@ -554,13 +572,19 @@ def _dashboards_data_fn(scoped_dataset, offset, limit, scoped_query, dashboard_w except Exception as e: # Swallow the exception if it was due to the discover split, and try again one more time. if isinstance(e, ParseError): - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) sentry_sdk.capture_exception(e) - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) @sentry_sdk.tracing.trace - def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_saved_query_id): + def _discover_data_fn( + scoped_dataset_query: DatasetQuery, + offset: int, + limit: int, + scoped_query: str | None, + discover_saved_query_id: str, + ): try: discover_query = DiscoverSavedQuery.objects.get( id=discover_saved_query_id, organization=organization @@ -569,7 +593,7 @@ def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_save discover_query.dataset is not DiscoverSavedQueryTypes.DISCOVER ) if does_widget_have_split: - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) dataset_inferred_from_query = dataset_split_decision_inferred_from_query( self.get_field_list(organization, request), @@ -580,9 +604,11 @@ def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_save # See if we can infer which dataset based on selected columns and query string. with handle_query_errors(): - if dataset_inferred_from_query is not None: + if ( + dataset := SAVED_QUERY_DATASET_MAP.get(dataset_inferred_from_query) + ) is not None: result = _data_fn( - SAVED_QUERY_DATASET_MAP[dataset_inferred_from_query], + dataset.query, offset, limit, scoped_query, @@ -606,11 +632,11 @@ def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_save with ThreadPoolExecutor(max_workers=3) as exe: futures = { exe.submit( - _data_fn, get_dataset(dataset_), offset, limit, scoped_query - ): dataset_ - for dataset_ in [ - "errors", - "transactions", + _data_fn, dataset_query, offset, limit, scoped_query + ): dataset_name + for dataset_name, dataset_query in [ + ("errors", errors.query), + ("transactions", transactions.query), ] } @@ -664,10 +690,10 @@ def _discover_data_fn(scoped_dataset, offset, limit, scoped_query, discover_save except Exception as e: # Swallow the exception if it was due to the discover split, and try again one more time. if isinstance(e, ParseError): - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) sentry_sdk.capture_exception(e) - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset_query, offset, limit, scoped_query) def data_fn_factory(scoped_dataset): """ @@ -681,17 +707,17 @@ def data_fn_factory(scoped_dataset): dashboard_widget_id = request.GET.get("dashboardWidgetId", None) discover_saved_query_id = request.GET.get("discoverSavedQueryId", None) - def fn(offset, limit) -> dict[str, Any]: + def fn(offset, limit): if save_discover_dataset_decision and discover_saved_query_id: return _discover_data_fn( - scoped_dataset, offset, limit, scoped_query, discover_saved_query_id + scoped_dataset.query, offset, limit, scoped_query, discover_saved_query_id ) if not (metrics_enhanced and dashboard_widget_id): - return _data_fn(scoped_dataset, offset, limit, scoped_query) + return _data_fn(scoped_dataset.query, offset, limit, scoped_query) return _dashboards_data_fn( - scoped_dataset, offset, limit, scoped_query, dashboard_widget_id + scoped_dataset.query, offset, limit, scoped_query, dashboard_widget_id ) return fn diff --git a/src/sentry/search/events/types.py b/src/sentry/search/events/types.py index 8d11eacdeee8e4..abba86f4a621a5 100644 --- a/src/sentry/search/events/types.py +++ b/src/sentry/search/events/types.py @@ -65,9 +65,12 @@ class QueryFramework: class EventsMeta(TypedDict): + datasetReason: NotRequired[str] fields: dict[str, str] tips: NotRequired[dict[str, str | None]] isMetricsData: NotRequired[bool] + isMetricsExtractedData: NotRequired[bool] + discoverSplitDecision: NotRequired[str] class EventsResponse(TypedDict): diff --git a/src/sentry/snuba/errors.py b/src/sentry/snuba/errors.py index 8466a1f40c2c78..96563bd52e6883 100644 --- a/src/sentry/snuba/errors.py +++ b/src/sentry/snuba/errors.py @@ -4,6 +4,7 @@ from typing import cast import sentry_sdk +from snuba_sdk import Column, Condition from sentry.discover.arithmetic import categorize_columns from sentry.exceptions import InvalidSearchQuery @@ -30,29 +31,31 @@ def query( - selected_columns, - query, - snuba_params, - equations=None, - orderby=None, - offset=None, - limit=50, - referrer=None, - auto_fields=False, - auto_aggregations=False, - include_equation_fields=False, - allow_metric_aggregates=False, - use_aggregate_conditions=False, - conditions=None, - functions_acl=None, - transform_alias_to_input_format=False, - sample=None, - has_metrics=False, - use_metrics_layer=False, - skip_tag_resolution=False, - on_demand_metrics_enabled=False, + selected_columns: list[str], + query: str, + snuba_params: SnubaParams, + equations: list[str] | None = None, + orderby: list[str] | None = None, + offset: int | None = None, + limit: int = 50, + referrer: str | None = None, + auto_fields: bool = False, + auto_aggregations: bool = False, + include_equation_fields: bool = False, + allow_metric_aggregates: bool = False, + use_aggregate_conditions: bool = False, + conditions: list[Condition] | None = None, + functions_acl: list[str] | None = None, + transform_alias_to_input_format: bool = False, + sample: float | None = None, + has_metrics: bool = False, + use_metrics_layer: bool = False, + skip_tag_resolution: bool = False, + extra_columns: list[Column] | None = None, + on_demand_metrics_enabled: bool = False, on_demand_metrics_type: MetricSpecType | None = None, - fallback_to_transactions=False, + dataset: Dataset = Dataset.Events, + fallback_to_transactions: bool = False, query_source: QuerySource | None = None, ) -> EventsResponse: if not selected_columns: diff --git a/src/sentry/snuba/metrics_enhanced_performance.py b/src/sentry/snuba/metrics_enhanced_performance.py index 2273781467e894..0fcdd654a2f19c 100644 --- a/src/sentry/snuba/metrics_enhanced_performance.py +++ b/src/sentry/snuba/metrics_enhanced_performance.py @@ -19,6 +19,7 @@ from sentry.snuba.metrics_performance import timeseries_query as metrics_timeseries_query from sentry.snuba.metrics_performance import top_events_timeseries as metrics_top_events_timeseries from sentry.snuba.query_sources import QuerySource +from sentry.snuba.types import DatasetQuery from sentry.utils.snuba import SnubaTSResult @@ -48,7 +49,7 @@ def query( on_demand_metrics_type: MetricSpecType | None = None, fallback_to_transactions: bool = False, query_source: QuerySource | None = None, -): +) -> EventsResponse: metrics_compatible = not equations dataset_reason = discover.DEFAULT_DATASET_REASON @@ -57,22 +58,22 @@ def query( result = metrics_query( selected_columns, query, - snuba_params, - equations, - orderby, - offset, - limit, - referrer, - auto_fields, - auto_aggregations, - use_aggregate_conditions, - allow_metric_aggregates, - conditions, - functions_acl, - transform_alias_to_input_format, - has_metrics, - use_metrics_layer, - on_demand_metrics_enabled, + snuba_params=snuba_params, + equations=equations, + orderby=orderby, + offset=offset, + limit=limit, + referrer=referrer, + auto_fields=auto_fields, + auto_aggregations=auto_aggregations, + use_aggregate_conditions=use_aggregate_conditions, + allow_metric_aggregates=allow_metric_aggregates, + conditions=conditions, + functions_acl=functions_acl, + transform_alias_to_input_format=transform_alias_to_input_format, + has_metrics=has_metrics, + use_metrics_layer=use_metrics_layer, + on_demand_metrics_enabled=on_demand_metrics_enabled, on_demand_metrics_type=on_demand_metrics_type, query_source=query_source, ) @@ -90,13 +91,13 @@ def query( # Either metrics failed, or this isn't a query we can enhance with metrics if not metrics_compatible: - dataset: types.ModuleType = discover + dataset_query: DatasetQuery = discover.query if fallback_to_transactions: - dataset = transactions + dataset_query = transactions.query sentry_sdk.set_tag("performance.dataset", "transactions") else: sentry_sdk.set_tag("performance.dataset", "discover") - results = dataset.query( + results = dataset_query( selected_columns, query, snuba_params=snuba_params, @@ -120,7 +121,10 @@ def query( return results - return {} + return { + "data": [], + "meta": {"fields": {}}, + } def timeseries_query( diff --git a/src/sentry/snuba/metrics_performance.py b/src/sentry/snuba/metrics_performance.py index 68e00c52bfccf8..2acd67232bc7bd 100644 --- a/src/sentry/snuba/metrics_performance.py +++ b/src/sentry/snuba/metrics_performance.py @@ -6,7 +6,7 @@ from typing import Any, Literal, overload import sentry_sdk -from snuba_sdk import Column +from snuba_sdk import Column, Condition from sentry.discover.arithmetic import categorize_columns from sentry.exceptions import IncompatibleMetricsQuery @@ -30,29 +30,32 @@ def query( - selected_columns, - query, - snuba_params=None, - equations=None, - orderby=None, - offset=None, - limit=50, - referrer=None, - auto_fields=False, - auto_aggregations=False, - use_aggregate_conditions=False, - allow_metric_aggregates=True, - conditions=None, - functions_acl=None, - transform_alias_to_input_format=False, + selected_columns: list[str], + query: str, + snuba_params: SnubaParams, + equations: list[str] | None = None, + orderby: list[str] | None = None, + offset: int | None = None, + limit: int = 50, + referrer: str | None = None, + auto_fields: bool = False, + auto_aggregations: bool = False, + include_equation_fields: bool = False, + allow_metric_aggregates: bool = True, + use_aggregate_conditions: bool = False, + conditions: list[Condition] | None = None, + functions_acl: list[str] | None = None, + transform_alias_to_input_format: bool = False, + sample: float | None = None, has_metrics: bool = True, use_metrics_layer: bool = False, + skip_tag_resolution: bool = False, on_demand_metrics_enabled: bool = False, on_demand_metrics_type: MetricSpecType | None = None, granularity: int | None = None, fallback_to_transactions=False, query_source: QuerySource | None = None, -): +) -> EventsResponse: with sentry_sdk.start_span(op="mep", name="MetricQueryBuilder"): metrics_query = MetricsQueryBuilder( dataset=Dataset.PerformanceMetrics, @@ -78,6 +81,8 @@ def query( on_demand_metrics_type=on_demand_metrics_type, ), ) + if referrer is None: + referrer = "" metrics_referrer = referrer + ".metrics-enhanced" results = metrics_query.run_query(referrer=metrics_referrer, query_source=query_source) with sentry_sdk.start_span(op="mep", name="query.transform_results"): diff --git a/src/sentry/snuba/referrer.py b/src/sentry/snuba/referrer.py index a4f1cf20319c04..3a740223d27aae 100644 --- a/src/sentry/snuba/referrer.py +++ b/src/sentry/snuba/referrer.py @@ -1,7 +1,7 @@ from __future__ import annotations import logging -from enum import Enum, unique +from enum import StrEnum, unique from sentry.utils import metrics @@ -9,7 +9,7 @@ @unique -class Referrer(Enum): +class Referrer(StrEnum): ALERTRULESERIALIZER_TEST_QUERY_PRIMARY = "alertruleserializer.test_query.primary" ALERTRULESERIALIZER_TEST_QUERY = "alertruleserializer.test_query" ANOMALY_DETECTION_HISTORICAL_DATA_QUERY = "anomaly_detection_historical_data_query" @@ -530,32 +530,32 @@ class Referrer(Enum): # Performance Cache Module API_PERFORMANCE_CACHE_LANDING_CACHE_THROUGHPUT_CHART = ( - "api.performance.cache.landing-cache-throughput-chart", + "api.performance.cache.landing-cache-throughput-chart" ) API_PERFORMANCE_CACHE_LANDING_CACHE_TRANSACTION_LIST = ( - "api.performance.cache.landing-cache-transaction-list", + "api.performance.cache.landing-cache-transaction-list" ) API_PERFORMANCE_CACHE_LANDING_CACHE_TRANSACTION_DURATION = ( - "api.performance.cache.landing-cache-transaction-duration", + "api.performance.cache.landing-cache-transaction-duration" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_METRICS_RIBBON = ( - "api.performance.cache.samples-cache-metrics-ribbon", + "api.performance.cache.samples-cache-metrics-ribbon" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_TRANSACTION_DURATION_CHART = ( - "api.performance.cache.samples-cache-transaction-duration-chart", + "api.performance.cache.samples-cache-transaction-duration-chart" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_TRANSACTION_DURATION = ( - "api.performance.cache.samples-cache-transaction-duration", + "api.performance.cache.samples-cache-transaction-duration" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_SPAN_SAMPLES = ( - "api.performance.cache.samples-cache-span-samples", + "api.performance.cache.samples-cache-span-samples" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_SPAN_SAMPLES_TRANSACTION_DURATION = ( - "api.performance.cache.samples-cache-span-samples-transaction-duration", + "api.performance.cache.samples-cache-span-samples-transaction-duration" ) API_PERFORMANCE_CACHE_SAMPLES_CACHE_HIT_MISS_CHART = ( - "api.performance.cache.samples-cache-hit-miss-chart", + "api.performance.cache.samples-cache-hit-miss-chart" ) # Performance Queues Module diff --git a/src/sentry/snuba/spans_eap.py b/src/sentry/snuba/spans_eap.py index e28cd30e3edd94..35ebede9dc6d7e 100644 --- a/src/sentry/snuba/spans_eap.py +++ b/src/sentry/snuba/spans_eap.py @@ -50,8 +50,7 @@ def query( dataset: Dataset = Dataset.Discover, fallback_to_transactions: bool = False, query_source: QuerySource | None = None, - enable_rpc: bool | None = False, -): +) -> EventsResponse: builder = SpansEAPQueryBuilder( Dataset.EventsAnalyticsPlatform, {}, diff --git a/src/sentry/snuba/types.py b/src/sentry/snuba/types.py new file mode 100644 index 00000000000000..ae6c599819ad0c --- /dev/null +++ b/src/sentry/snuba/types.py @@ -0,0 +1,40 @@ +from typing import Protocol + +from snuba_sdk import Column, Condition + +from sentry.search.events.types import EventsResponse, SnubaParams +from sentry.snuba.dataset import Dataset +from sentry.snuba.metrics.extraction import MetricSpecType +from sentry.snuba.query_sources import QuerySource + + +class DatasetQuery(Protocol): + def __call__( + self, + selected_columns: list[str], + query: str, + snuba_params: SnubaParams, + equations: list[str] | None = None, + orderby: list[str] | None = None, + offset: int | None = None, + limit: int = 50, + referrer: str | None = None, + auto_fields: bool = False, + auto_aggregations: bool = False, + include_equation_fields: bool = False, + allow_metric_aggregates: bool = False, + use_aggregate_conditions: bool = False, + conditions: list[Condition] | None = None, + functions_acl: list[str] | None = None, + transform_alias_to_input_format: bool = False, + sample: float | None = None, + has_metrics: bool = False, + use_metrics_layer: bool = False, + skip_tag_resolution: bool = False, + extra_columns: list[Column] | None = None, + on_demand_metrics_enabled: bool = False, + on_demand_metrics_type: MetricSpecType | None = None, + dataset: Dataset = Dataset.Discover, + fallback_to_transactions: bool = False, + query_source: QuerySource | None = None, + ) -> EventsResponse: ...