diff --git a/quesma/queryparser/pancake_sql_query_generation_test.go b/quesma/queryparser/pancake_sql_query_generation_test.go index b1ec0cbd3..bc3b06352 100644 --- a/quesma/queryparser/pancake_sql_query_generation_test.go +++ b/quesma/queryparser/pancake_sql_query_generation_test.go @@ -53,8 +53,9 @@ func TestPancakeQueryGeneration(t *testing.T) { if filters(test.TestName) { t.Skip("Fix filters") } - if test.TestName == "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram(file:opensearch-visualize/pipeline_agg_req,nr:22)" { - t.Skip("error: filter(s)/range/dataRange aggregation must be the last bucket aggregation") + + if test.TestName == "Line, Y-axis: Min, Buckets: Date Range, X-Axis: Terms, Split Chart: Date Histogram(file:kibana-visualize/agg_req,nr:9)" { + t.Skip("Date range is broken, fix in progress (PR #971)") } if test.TestName == "Terms with order by top metrics(file:kibana-visualize/agg_req,nr:8)" { @@ -66,10 +67,6 @@ func TestPancakeQueryGeneration(t *testing.T) { t.Skip("Was skipped before. Wrong key in max_bucket, should be an easy fix") } - if test.TestName == "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram(file:opensearch-visualize/pipeline_agg_req,nr:24)" { - t.Skip("Was skipped before, no expected results") - } - // TODO: add test for filter(s) both at the beginning and end of aggregation tree fmt.Println("i:", i, "test:", test.TestName) diff --git a/quesma/queryparser/pancake_transformer.go b/quesma/queryparser/pancake_transformer.go index c8064df5a..17be0789a 100644 --- a/quesma/queryparser/pancake_transformer.go +++ b/quesma/queryparser/pancake_transformer.go @@ -408,14 +408,14 @@ func (a *pancakeTransformer) aggregationTreeToPancakes(topLevel pancakeAggregati // TODO: if both top_hits/top_metrics, and filters, it probably won't work... // Care: order of these two functions is unfortunately important. // Should be fixed after this TODO - newFiltersPancakes := a.createFiltersPancakes(&newPancake) + newCombinatorPancakes := a.createCombinatorPancakes(&newPancake) additionalTopHitPancakes, err := a.createTopHitAndTopMetricsPancakes(&newPancake) if err != nil { return nil, err } pancakeResults = append(pancakeResults, additionalTopHitPancakes...) - pancakeResults = append(pancakeResults, newFiltersPancakes...) + pancakeResults = append(pancakeResults, newCombinatorPancakes...) } return @@ -423,33 +423,46 @@ func (a *pancakeTransformer) aggregationTreeToPancakes(topLevel pancakeAggregati // createFiltersPancakes only does something, if first layer aggregation is Filters. // It creates new pancakes for each filter in that aggregation, and updates `pancake` to have only first filter. -func (a *pancakeTransformer) createFiltersPancakes(pancake *pancakeModel) (newPancakes []*pancakeModel) { +func (a *pancakeTransformer) createCombinatorPancakes(pancake *pancakeModel) (newPancakes []*pancakeModel) { if len(pancake.layers) == 0 || pancake.layers[0].nextBucketAggregation == nil { return } firstLayer := pancake.layers[0] - filters, isFilters := firstLayer.nextBucketAggregation.queryType.(bucket_aggregations.Filters) - canSimplyAddFilterToWhereClause := len(firstLayer.currentMetricAggregations) == 0 && len(firstLayer.currentPipelineAggregations) == 0 - areNewPancakesReallyNeeded := len(pancake.layers) > 1 // if there is only one layer, it's better to get it done with combinators. + combinator, isCombinator := firstLayer.nextBucketAggregation.queryType.(bucket_aggregations.CombinatorAggregationInterface) + if !isCombinator { + return + } + + noMoreBucket := len(pancake.layers) <= 1 || (len(pancake.layers) == 2 && pancake.layers[1].nextBucketAggregation == nil) + noMetricOnFirstLayer := len(firstLayer.currentMetricAggregations) == 0 && len(firstLayer.currentPipelineAggregations) == 0 + canSimplyAddCombinatorToWhereClause := noMoreBucket && noMetricOnFirstLayer + if canSimplyAddCombinatorToWhereClause { + return + } - if !isFilters || !canSimplyAddFilterToWhereClause || !areNewPancakesReallyNeeded || len(filters.Filters) == 0 { + areNewPancakesReallyNeeded := len(pancake.layers) > 1 // if there is only one layer above combinator, it easily can be done with 1 pancake, no need for more + groups := combinator.CombinatorGroups() + if !areNewPancakesReallyNeeded || len(groups) == 0 { return } - // First create N-1 new pancakes, each with different filter - for i := 1; i < len(filters.Filters); i++ { + combinatorSplit := combinator.CombinatorSplit() + combinatorGroups := combinator.CombinatorGroups() + // First create N-1 new pancakes [1...N), each with different filter + // (important to update the first (0th) pancake at the end) + for i := 1; i < len(groups); i++ { newPancake := pancake.Clone() bucketAggr := newPancake.layers[0].nextBucketAggregation.ShallowClone() - bucketAggr.queryType = filters.NewFiltersSingleFilter(i) + bucketAggr.queryType = combinatorSplit[i] newPancake.layers[0] = newPancakeModelLayer(&bucketAggr) - newPancake.whereClause = model.And([]model.Expr{newPancake.whereClause, filters.Filters[i].Sql.WhereClause}) + newPancake.whereClause = model.And([]model.Expr{newPancake.whereClause, combinatorGroups[i].WhereClause}) newPancakes = append(newPancakes, newPancake) } - // Then update original to have 1 filter as well - pancake.layers[0].nextBucketAggregation.queryType = filters.NewFiltersSingleFilter(0) - pancake.whereClause = model.And([]model.Expr{pancake.whereClause, filters.Filters[0].Sql.WhereClause}) + // Update original + pancake.layers[0].nextBucketAggregation.queryType = combinatorSplit[0] + pancake.whereClause = model.And([]model.Expr{pancake.whereClause, combinatorGroups[0].WhereClause}) return } diff --git a/quesma/testdata/kibana-visualize/aggregation_requests.go b/quesma/testdata/kibana-visualize/aggregation_requests.go index 715e74bfb..faceabb0b 100644 --- a/quesma/testdata/kibana-visualize/aggregation_requests.go +++ b/quesma/testdata/kibana-visualize/aggregation_requests.go @@ -1784,4 +1784,367 @@ var AggregationTests = []testdata.AggregationTestCase{ WHERE "aggr__0__order_1_rank"<=13 ORDER BY "aggr__0__order_1_rank" ASC, "aggr__0__1__order_1_rank" ASC`, }, + { // [9] + TestName: "Line, Y-axis: Min, Buckets: Date Range, X-Axis: Terms, Split Chart: Date Histogram", + QueryRequestJson: ` + { + "_source": { + "excludes": [] + }, + "aggs": { + "2": { + "aggs": { + "3": { + "aggs": { + "1": { + "min": { + "field": "FlightDelayMin" + } + }, + "4": { + "aggs": { + "1": { + "min": { + "field": "FlightDelayMin" + } + } + }, + "date_histogram": { + "field": "timestamp", + "fixed_interval": "30d", + "min_doc_count": 1, + "time_zone": "Europe/Warsaw" + } + } + }, + "terms": { + "field": "DistanceKilometers", + "order": { + "1": "desc" + }, + "shard_size": 25, + "size": 5 + } + } + }, + "date_range": { + "field": "timestamp", + "ranges": [ + { + "from": "now-1w/w", + "to": "now" + }, + { + "from": "now-1d" + } + ], + "time_zone": "Europe/Warsaw" + } + } + }, + "fields": [ + { + "field": "@timestamp", + "format": "date_time" + }, + { + "field": "timestamp", + "format": "date_time" + } + ], + "query": { + "bool": { + "filter": [ + { + "range": { + "timestamp": { + "format": "strict_date_optional_time", + "gte": "2009-11-12T08:31:26.584Z", + "lte": "2024-11-12T08:31:26.584Z" + } + } + } + ], + "must": [], + "must_not": [], + "should": [] + } + }, + "runtime_mappings": { + "hour_of_day": { + "script": { + "source": "emit(doc['timestamp'].value.getHour());" + }, + "type": "long" + } + }, + "script_fields": {}, + "size": 0, + "stored_fields": [ + "*" + ], + "track_total_hits": true + }`, + ExpectedResponse: ` + { + "_shards": { + "failed": 0, + "skipped": 0, + "successful": 1, + "total": 1 + }, + "aggregations": { + "2": { + "buckets": [ + { + "3": { + "buckets": [ + { + "1": { + "value": 360.0 + }, + "4": { + "buckets": [ + { + "1": { + "value": 360.0 + }, + "doc_count": 1, + "key": 1728856800000, + "key_as_string": "2024-10-13T22:00:00.000" + } + ] + }, + "doc_count": 1, + "key": 1502.8392333984375 + }, + { + "1": { + "value": 360.0 + }, + "4": { + "buckets": [ + { + "1": { + "value": 360.0 + }, + "doc_count": 1, + "key": 1728856800000, + "key_as_string": "2024-10-13T22:00:00.000" + } + ] + }, + "doc_count": 1, + "key": 2649.456787109375 + }, + { + "1": { + "value": 360.0 + }, + "4": { + "buckets": [ + { + "1": { + "value": 360.0 + }, + "doc_count": 1, + "key": 1728856800000, + "key_as_string": "2024-10-13T22:00:00.000" + } + ] + }, + "doc_count": 1, + "key": 6280.2021484375 + } + ], + "doc_count_error_upper_bound": -1, + "sum_other_doc_count": 2666 + }, + "doc_count": 2671, + "from": 1730674800000.0, + "from_as_string": "2024-11-04T00:00:00.000+01:00", + "key": "2024-11-04T00:00:00.000+01:00-2024-11-12T10:15:15.067+01:00", + "to": 1731402915067.0, + "to_as_string": "2024-11-12T10:15:15.067+01:00" + }, + { + "3": { + "buckets": [ + { + "1": { + "value": 360.0 + }, + "4": { + "buckets": [ + { + "1": { + "value": 360.0 + }, + "doc_count": 1, + "key": 1728856800000, + "key_as_string": "2024-10-13T22:00:00.000" + } + ] + }, + "doc_count": 1, + "key": 6287.01806640625 + } + ], + "doc_count_error_upper_bound": -1, + "sum_other_doc_count": 333 + }, + "doc_count": 338, + "from": 1731316515067.0, + "from_as_string": "2024-11-11T10:15:15.067+01:00", + "key": "2024-11-11T10:15:15.067+01:00-*" + } + ] + } + }, + "hits": { + "hits": [], + "max_score": null, + "total": { + "relation": "eq", + "value": 2671 + } + }, + "timed_out": false, + "took": 129 + }`, + ExpectedPancakeResults: []model.QueryResultRow{ + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", int64(2671)), + model.NewQueryResultCol("aggr__2__3__parent_count", int64(2671)), + model.NewQueryResultCol("aggr__2__3__key_0", 1502.8392333984375), + model.NewQueryResultCol("aggr__2__3__count", int64(1)), + model.NewQueryResultCol("metric__2__3__1_col_0", 360.0), + model.NewQueryResultCol("aggr__2__3__4__key_0", int64(1728864000000/2592000000)), + model.NewQueryResultCol("aggr__2__3__4__count", int64(1)), + model.NewQueryResultCol("metric__2__3__4__1_col_0", 360.0), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", int64(2671)), + model.NewQueryResultCol("aggr__2__3__parent_count", int64(2671)), + model.NewQueryResultCol("aggr__2__3__key_0", 2649.456787109375), + model.NewQueryResultCol("aggr__2__3__count", int64(1)), + model.NewQueryResultCol("metric__2__3__1_col_0", 360.0), + model.NewQueryResultCol("aggr__2__3__4__key_0", int64(1728864000000/2592000000)), + model.NewQueryResultCol("aggr__2__3__4__count", int64(1)), + model.NewQueryResultCol("metric__2__3__4__1_col_0", 360.0), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", int64(2671)), + model.NewQueryResultCol("aggr__2__3__parent_count", int64(2671)), + model.NewQueryResultCol("aggr__2__3__key_0", 6280.2021484375), + model.NewQueryResultCol("aggr__2__3__count", int64(1)), + model.NewQueryResultCol("metric__2__3__1_col_0", 360.0), + model.NewQueryResultCol("aggr__2__3__4__key_0", int64(1728864000000/2592000000)), + model.NewQueryResultCol("aggr__2__3__4__count", int64(1)), + model.NewQueryResultCol("metric__2__3__4__1_col_0", 360.0), + }}, + }, + ExpectedPancakeSQL: ` + SELECT "aggr__2__count", "aggr__2__3__parent_count", "aggr__2__3__key_0", + "aggr__2__3__count", "metric__2__3__1_col_0", "aggr__2__3__4__key_0", + "aggr__2__3__4__count", "metric__2__3__4__1_col_0" + FROM ( + SELECT "aggr__2__count", "aggr__2__3__parent_count", "aggr__2__3__key_0", + "aggr__2__3__count", "metric__2__3__1_col_0", "aggr__2__3__4__key_0", + "aggr__2__3__4__count", "metric__2__3__4__1_col_0", + dense_rank() OVER (ORDER BY "metric__2__3__1_col_0" DESC, + "aggr__2__3__key_0" ASC) AS "aggr__2__3__order_1_rank", + dense_rank() OVER (PARTITION BY "aggr__2__3__key_0" ORDER BY + "aggr__2__3__4__key_0" ASC) AS "aggr__2__3__4__order_1_rank" + FROM ( + SELECT sum(countIf(("timestamp">=toInt64(toUnixTimestamp(toStartOfWeek( + subDate(now(), INTERVAL 1 week)))) AND "timestamp"=toInt64(toUnixTimestamp(toStartOfWeek(subDate( + now(), INTERVAL 1 week)))) AND "timestamp"=toInt64(toUnixTimestamp(toStartOfWeek(subDate( + now(), INTERVAL 1 week)))) AND "timestamp"=toInt64( + toUnixTimestamp(toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND + "timestamp"=toInt64(toUnixTimestamp(toStartOfWeek(subDate(now(), + INTERVAL 1 week)))) AND "timestamp"=toInt64(toUnixTimestamp( + toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND "timestamp"=fromUnixTimestamp64Milli(1258014686584) AND "timestamp" + <=fromUnixTimestamp64Milli(1731400286584)) AND ("timestamp">=toInt64( + toUnixTimestamp(toStartOfWeek(subDate(now(), INTERVAL 1 week)))) AND + "timestamp"=toInt64(toUnixTimestamp(subDate(now(), + INTERVAL 1 day))))) OVER () AS "aggr__2__count", + sum(countIf("timestamp">=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1 + day))))) OVER () AS "aggr__2__3__parent_count", + "DistanceKilometers" AS "aggr__2__3__key_0", + sum(countIf("timestamp">=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1 + day))))) OVER (PARTITION BY "aggr__2__3__key_0") AS "aggr__2__3__count", + minOrNull(minOrNullIf("FlightDelayMin", "timestamp">=toInt64( + toUnixTimestamp(subDate(now(), INTERVAL 1 day))))) OVER (PARTITION BY + "aggr__2__3__key_0") AS "metric__2__3__1_col_0", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 2592000000) AS + "aggr__2__3__4__key_0", + countIf("timestamp">=toInt64(toUnixTimestamp(subDate(now(), INTERVAL 1 day + )))) AS "aggr__2__3__4__count", + minOrNullIf("FlightDelayMin", "timestamp">=toInt64(toUnixTimestamp(subDate + (now(), INTERVAL 1 day)))) AS "metric__2__3__4__1_col_0" + FROM __quesma_table_name + WHERE (("timestamp">=fromUnixTimestamp64Milli(1258014686584) AND "timestamp" + <=fromUnixTimestamp64Milli(1731400286584)) AND "timestamp">=toInt64( + toUnixTimestamp(subDate(now(), INTERVAL 1 day)))) + GROUP BY "DistanceKilometers" AS "aggr__2__3__key_0", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 2592000000) AS + "aggr__2__3__4__key_0")) + WHERE "aggr__2__3__order_1_rank"<=6 + ORDER BY "aggr__2__3__order_1_rank" ASC, "aggr__2__3__4__order_1_rank" ASC`, + }, + ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ + { + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", int64(2671)), + model.NewQueryResultCol("aggr__2__3__parent_count", int64(338)), + model.NewQueryResultCol("aggr__2__3__key_0", 6287.01806640625), + model.NewQueryResultCol("aggr__2__3__count", int64(1)), + model.NewQueryResultCol("metric__2__3__1_col_0", 360.0), + model.NewQueryResultCol("aggr__2__3__4__key_0", int64(1728864000000/2592000000)), + model.NewQueryResultCol("aggr__2__3__4__count", int64(1)), + model.NewQueryResultCol("metric__2__3__4__1_col_0", 360.0), + }}, + }, + }, + }, } diff --git a/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go b/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go index bc3bff162..bb683c855 100644 --- a/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go +++ b/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go @@ -4511,7 +4511,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "date_histogram": { "field": "timestamp", "fixed_interval": "12h", - "min_doc_count": 1 + "min_doc_count": 1, + "time_zone": "Europe/Warsaw" } } }, @@ -4602,7 +4603,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 6, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" }, { "1-metric": { @@ -4610,7 +4611,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 9, "key": 1714903200000, - "key_as_string": "2024-05-05T12:00:00.000+02:00" + "key_as_string": "2024-05-05T10:00:00.000" } ] }, @@ -4619,7 +4620,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, { "1": { - "value": 22680.0 + "value": null }, "1-bucket": { "buckets": [ @@ -4629,7 +4630,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" }, { "1-metric": { @@ -4637,7 +4638,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 2, "key": 1714989600000, - "key_as_string": "2024-05-06T12:00:00.000+02:00" + "key_as_string": "2024-05-06T10:00:00.000" }, { "1-metric": { @@ -4645,7 +4646,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 3, "key": 1715076000000, - "key_as_string": "2024-05-07T12:00:00.000+02:00" + "key_as_string": "2024-05-07T10:00:00.000" } ] }, @@ -4654,7 +4655,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, { "1": { - "value": 82940.0 + "value": 27400.0 }, "1-bucket": { "buckets": [ @@ -4664,7 +4665,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" } ] }, @@ -4692,7 +4693,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1715076000000, - "key_as_string": "2024-05-07T12:00:00.000+02:00" + "key_as_string": "2024-05-07T10:00:00.000" }, { "1-metric": { @@ -4700,7 +4701,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1715205600000, - "key_as_string": "2024-05-09T00:00:00.000+02:00" + "key_as_string": "2024-05-08T22:00:00.000" } ] }, @@ -4719,7 +4720,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1715162400000, - "key_as_string": "2024-05-08T12:00:00.000+02:00" + "key_as_string": "2024-05-08T10:00:00.000" } ] }, @@ -4728,7 +4729,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, { "1": { - "value": 178320.0 + "value": null }, "1-bucket": { "buckets": [ @@ -4738,7 +4739,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1714903200000, - "key_as_string": "2024-05-05T12:00:00.000+02:00" + "key_as_string": "2024-05-05T10:00:00.000" }, { "1-metric": { @@ -4746,7 +4747,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 2, "key": 1715076000000, - "key_as_string": "2024-05-07T12:00:00.000+02:00" + "key_as_string": "2024-05-07T10:00:00.000" } ] }, @@ -4755,7 +4756,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, { "1": { - "value": 135880.0 + "value": null }, "1-bucket": { "buckets": [ @@ -4765,7 +4766,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 3, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" }, { "1-metric": { @@ -4773,7 +4774,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1715248800000, - "key_as_string": "2024-05-09T12:00:00.000+02:00" + "key_as_string": "2024-05-09T10:00:00.000" } ] }, @@ -4792,7 +4793,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 2, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" }, { "1-metric": { @@ -4800,7 +4801,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 6, "key": 1714903200000, - "key_as_string": "2024-05-05T12:00:00.000+02:00" + "key_as_string": "2024-05-05T10:00:00.000" }, { "1-metric": { @@ -4808,7 +4809,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 8, "key": 1714989600000, - "key_as_string": "2024-05-06T12:00:00.000+02:00" + "key_as_string": "2024-05-06T10:00:00.000" }, { "1-metric": { @@ -4816,7 +4817,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 7, "key": 1715076000000, - "key_as_string": "2024-05-07T12:00:00.000+02:00" + "key_as_string": "2024-05-07T10:00:00.000" } ] }, @@ -4843,234 +4844,210 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "timed_out": false, "took": 40 }`, - /* - ExpectedResults: [][]model.QueryResultRow{ - {{Cols: []model.QueryResultCol{model.NewQueryResultCol("hits", uint64(1865))}}}, - {}, // NoDBQuery - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 6920.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 1000.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 27400.0), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 6), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`count()`, 9), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), - model.NewQueryResultCol(`count()`, 2), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`count()`, 3), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol(`count()`, 15), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol(`count()`, 6), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), - model.NewQueryResultCol(`count()`, 1), - }}, - }, - {}, // NoDBQuery - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 43320.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715205600000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 44080.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715162400000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 50040.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715248800000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 72640.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715205600000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715162400000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`count()`, 2), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 3), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715248800000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 2), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`count()`, 6), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), - model.NewQueryResultCol(`count()`, 8), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`count()`, 7), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol(`count()`, 2), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol(`count()`, 3), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol(`count()`, 4), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol(`count()`, 23), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`count(if("bytes">=0 AND "bytes"<1000,1,NULL))`, 168), - model.NewQueryResultCol(`count(if("bytes">=1000 AND "bytes"<2000,1,NULL))`, 94), - model.NewQueryResultCol(`count()`, 1865), - }}, - }, - },*/ - ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: "TODO", + ExpectedPancakeSQL: ` + SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", + "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", + "metric__2__3__1-bucket__1-metric_col_0" + FROM ( + SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", + "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", + "metric__2__3__1-bucket__1-metric_col_0", + dense_rank() OVER (ORDER BY "aggr__2__3__key_0" ASC) AS + "aggr__2__3__order_1_rank", + dense_rank() OVER (PARTITION BY "aggr__2__3__key_0" ORDER BY + "aggr__2__3__1-bucket__key_0" ASC) AS "aggr__2__3__1-bucket__order_1_rank" + FROM ( + SELECT sum(countIf(("bytes">=0 AND "bytes"<1000))) OVER () AS + "aggr__2__count", floor("bytes"/200)*200 AS "aggr__2__3__key_0", + sum(countIf(("bytes">=0 AND "bytes"<1000))) OVER (PARTITION BY + "aggr__2__3__key_0") AS "aggr__2__3__count", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS + "aggr__2__3__1-bucket__key_0", + countIf(("bytes">=0 AND "bytes"<1000)) AS "aggr__2__3__1-bucket__count", + avgOrNullIf("memory", ("bytes">=0 AND "bytes"<1000)) AS + "metric__2__3__1-bucket__1-metric_col_0" + FROM __quesma_table_name + WHERE ("bytes">=0 AND "bytes"<1000) + GROUP BY floor("bytes"/200)*200 AS "aggr__2__3__key_0", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS + "aggr__2__3__1-bucket__key_0")) + ORDER BY "aggr__2__3__order_1_rank" ASC, + "aggr__2__3__1-bucket__order_1_rank" ASC`, + ExpectedPancakeResults: []model.QueryResultRow{ + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 0), + model.NewQueryResultCol("aggr__2__3__count", uint64(15)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(6)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 0), + model.NewQueryResultCol("aggr__2__3__count", uint64(15)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714910400000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(9)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 6920.0), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 200), + model.NewQueryResultCol("aggr__2__3__count", uint64(6)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 200), + model.NewQueryResultCol("aggr__2__3__count", uint64(6)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714996800000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(2)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 200), + model.NewQueryResultCol("aggr__2__3__count", uint64(6)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715083200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(3)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 600), + model.NewQueryResultCol("aggr__2__3__count", uint64(1)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 27400), + }}, + }, + ExpectedAdditionalPancakeSQLs: []string{` + SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", + "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", + "metric__2__3__1-bucket__1-metric_col_0" + FROM ( + SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", + "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", + "metric__2__3__1-bucket__1-metric_col_0", + dense_rank() OVER (ORDER BY "aggr__2__3__key_0" ASC) AS + "aggr__2__3__order_1_rank", + dense_rank() OVER (PARTITION BY "aggr__2__3__key_0" ORDER BY + "aggr__2__3__1-bucket__key_0" ASC) AS "aggr__2__3__1-bucket__order_1_rank" + FROM ( + SELECT sum(countIf(("bytes">=1000 AND "bytes"<2000))) OVER () AS + "aggr__2__count", floor("bytes"/200)*200 AS "aggr__2__3__key_0", + sum(countIf(("bytes">=1000 AND "bytes"<2000))) OVER (PARTITION BY + "aggr__2__3__key_0") AS "aggr__2__3__count", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS + "aggr__2__3__1-bucket__key_0", + countIf(("bytes">=1000 AND "bytes"<2000)) AS "aggr__2__3__1-bucket__count", + avgOrNullIf("memory", ("bytes">=1000 AND "bytes"<2000)) AS + "metric__2__3__1-bucket__1-metric_col_0" + FROM __quesma_table_name + WHERE ("bytes">=1000 AND "bytes"<2000) + GROUP BY floor("bytes"/200)*200 AS "aggr__2__3__key_0", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS + "aggr__2__3__1-bucket__key_0")) + ORDER BY "aggr__2__3__order_1_rank" ASC, + "aggr__2__3__1-bucket__order_1_rank" ASC`, + }, + ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ + { + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1000), + model.NewQueryResultCol("aggr__2__3__count", uint64(2)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715083200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 43320.), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1000), + model.NewQueryResultCol("aggr__2__3__count", uint64(2)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715212800000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 44080.), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1200), + model.NewQueryResultCol("aggr__2__3__count", uint64(1)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715169600000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 50040.), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1400), + model.NewQueryResultCol("aggr__2__3__count", uint64(3)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714910400000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1400), + model.NewQueryResultCol("aggr__2__3__count", uint64(3)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715083200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(2)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1600), + model.NewQueryResultCol("aggr__2__3__count", uint64(4)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(3)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1600), + model.NewQueryResultCol("aggr__2__3__count", uint64(4)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715256000000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1800), + model.NewQueryResultCol("aggr__2__3__count", uint64(23)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(2)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1800), + model.NewQueryResultCol("aggr__2__3__count", uint64(23)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714910400000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(6)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 72640.0), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1800), + model.NewQueryResultCol("aggr__2__3__count", uint64(23)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714996800000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(8)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1800), + model.NewQueryResultCol("aggr__2__3__count", uint64(23)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715083200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(7)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + }, + }, }, }