diff --git a/quesma/queryparser/pancake_sql_query_generation_test.go b/quesma/queryparser/pancake_sql_query_generation_test.go index 9e9c1076d..c2aba2a13 100644 --- a/quesma/queryparser/pancake_sql_query_generation_test.go +++ b/quesma/queryparser/pancake_sql_query_generation_test.go @@ -52,9 +52,6 @@ func TestPancakeQueryGeneration(t *testing.T) { if filters(test.TestName) { t.Skip("Fix filters") } - if test.TestName == "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram(file:opensearch-visualize/pipeline_agg_req,nr:22)" { - t.Skip("error: filter(s)/range/dataRange aggregation must be the last bucket aggregation") - } if test.TestName == "Terms with order by top metrics(file:kibana-visualize/agg_req,nr:8)" { t.Skip("Need to implement order by top metrics (talk with Jacek, he has an idea)") @@ -69,10 +66,6 @@ func TestPancakeQueryGeneration(t *testing.T) { t.Skip("Was skipped before. Wrong key in max_bucket, should be an easy fix") } - if test.TestName == "complex sum_bucket. Reproduce: Visualize -> Vertical Bar: Metrics: Sum Bucket (Bucket: Date Histogram, Metric: Average), Buckets: X-Asis: Histogram(file:opensearch-visualize/pipeline_agg_req,nr:24)" { - t.Skip("Was skipped before, no expected results") - } - // TODO: add test for filter(s) both at the beginning and end of aggregation tree fmt.Println("i:", i, "test:", test.TestName) diff --git a/quesma/queryparser/pancake_transformer.go b/quesma/queryparser/pancake_transformer.go index c8064df5a..bc8a413b9 100644 --- a/quesma/queryparser/pancake_transformer.go +++ b/quesma/queryparser/pancake_transformer.go @@ -408,14 +408,14 @@ func (a *pancakeTransformer) aggregationTreeToPancakes(topLevel pancakeAggregati // TODO: if both top_hits/top_metrics, and filters, it probably won't work... // Care: order of these two functions is unfortunately important. // Should be fixed after this TODO - newFiltersPancakes := a.createFiltersPancakes(&newPancake) + newCombinatorPancakes := a.createCombinatorPancakes(&newPancake) additionalTopHitPancakes, err := a.createTopHitAndTopMetricsPancakes(&newPancake) if err != nil { return nil, err } pancakeResults = append(pancakeResults, additionalTopHitPancakes...) - pancakeResults = append(pancakeResults, newFiltersPancakes...) + pancakeResults = append(pancakeResults, newCombinatorPancakes...) } return @@ -423,33 +423,42 @@ func (a *pancakeTransformer) aggregationTreeToPancakes(topLevel pancakeAggregati // createFiltersPancakes only does something, if first layer aggregation is Filters. // It creates new pancakes for each filter in that aggregation, and updates `pancake` to have only first filter. -func (a *pancakeTransformer) createFiltersPancakes(pancake *pancakeModel) (newPancakes []*pancakeModel) { +func (a *pancakeTransformer) createCombinatorPancakes(pancake *pancakeModel) (newPancakes []*pancakeModel) { if len(pancake.layers) == 0 || pancake.layers[0].nextBucketAggregation == nil { return } firstLayer := pancake.layers[0] - filters, isFilters := firstLayer.nextBucketAggregation.queryType.(bucket_aggregations.Filters) - canSimplyAddFilterToWhereClause := len(firstLayer.currentMetricAggregations) == 0 && len(firstLayer.currentPipelineAggregations) == 0 - areNewPancakesReallyNeeded := len(pancake.layers) > 1 // if there is only one layer, it's better to get it done with combinators. + combinator, isCombinator := firstLayer.nextBucketAggregation.queryType.(bucket_aggregations.CombinatorAggregationInterface) + if !isCombinator { + return + } + + noMoreBucket := len(pancake.layers) == 1 || (len(pancake.layers) == 2 && pancake.layers[1].nextBucketAggregation == nil) + noMetricOnFirstLayer := len(firstLayer.currentMetricAggregations) == 0 && len(firstLayer.currentPipelineAggregations) == 0 + canSimplyAddCombinatorToWhereClause := noMoreBucket && noMetricOnFirstLayer + areNewPancakesReallyNeeded := len(pancake.layers) > 1 // if there is only one layer above combinator, it easily can be done with 1 pancake, no need for more - if !isFilters || !canSimplyAddFilterToWhereClause || !areNewPancakesReallyNeeded || len(filters.Filters) == 0 { + groups := combinator.CombinatorGroups() + if canSimplyAddCombinatorToWhereClause || !areNewPancakesReallyNeeded || len(groups) == 0 { return } - // First create N-1 new pancakes, each with different filter - for i := 1; i < len(filters.Filters); i++ { + combinatorSplit := combinator.CombinatorSplit() + combinatorGroups := combinator.CombinatorGroups() + // First create N-1 new pancakes, each with different filter (we use 0th group to create new, we'll update it at the end) + for i := 1; i < len(groups); i++ { newPancake := pancake.Clone() bucketAggr := newPancake.layers[0].nextBucketAggregation.ShallowClone() - bucketAggr.queryType = filters.NewFiltersSingleFilter(i) + bucketAggr.queryType = combinatorSplit[i] newPancake.layers[0] = newPancakeModelLayer(&bucketAggr) - newPancake.whereClause = model.And([]model.Expr{newPancake.whereClause, filters.Filters[i].Sql.WhereClause}) + newPancake.whereClause = model.And([]model.Expr{newPancake.whereClause, combinatorGroups[i].WhereClause}) newPancakes = append(newPancakes, newPancake) } // Then update original to have 1 filter as well - pancake.layers[0].nextBucketAggregation.queryType = filters.NewFiltersSingleFilter(0) - pancake.whereClause = model.And([]model.Expr{pancake.whereClause, filters.Filters[0].Sql.WhereClause}) + pancake.layers[0].nextBucketAggregation.queryType = combinatorSplit[0] + pancake.whereClause = model.And([]model.Expr{pancake.whereClause, combinatorGroups[0].WhereClause}) return } diff --git a/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go b/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go index bc3bff162..bb683c855 100644 --- a/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go +++ b/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go @@ -4511,7 +4511,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "date_histogram": { "field": "timestamp", "fixed_interval": "12h", - "min_doc_count": 1 + "min_doc_count": 1, + "time_zone": "Europe/Warsaw" } } }, @@ -4602,7 +4603,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 6, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" }, { "1-metric": { @@ -4610,7 +4611,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 9, "key": 1714903200000, - "key_as_string": "2024-05-05T12:00:00.000+02:00" + "key_as_string": "2024-05-05T10:00:00.000" } ] }, @@ -4619,7 +4620,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, { "1": { - "value": 22680.0 + "value": null }, "1-bucket": { "buckets": [ @@ -4629,7 +4630,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" }, { "1-metric": { @@ -4637,7 +4638,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 2, "key": 1714989600000, - "key_as_string": "2024-05-06T12:00:00.000+02:00" + "key_as_string": "2024-05-06T10:00:00.000" }, { "1-metric": { @@ -4645,7 +4646,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 3, "key": 1715076000000, - "key_as_string": "2024-05-07T12:00:00.000+02:00" + "key_as_string": "2024-05-07T10:00:00.000" } ] }, @@ -4654,7 +4655,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, { "1": { - "value": 82940.0 + "value": 27400.0 }, "1-bucket": { "buckets": [ @@ -4664,7 +4665,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" } ] }, @@ -4692,7 +4693,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1715076000000, - "key_as_string": "2024-05-07T12:00:00.000+02:00" + "key_as_string": "2024-05-07T10:00:00.000" }, { "1-metric": { @@ -4700,7 +4701,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1715205600000, - "key_as_string": "2024-05-09T00:00:00.000+02:00" + "key_as_string": "2024-05-08T22:00:00.000" } ] }, @@ -4719,7 +4720,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1715162400000, - "key_as_string": "2024-05-08T12:00:00.000+02:00" + "key_as_string": "2024-05-08T10:00:00.000" } ] }, @@ -4728,7 +4729,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, { "1": { - "value": 178320.0 + "value": null }, "1-bucket": { "buckets": [ @@ -4738,7 +4739,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1714903200000, - "key_as_string": "2024-05-05T12:00:00.000+02:00" + "key_as_string": "2024-05-05T10:00:00.000" }, { "1-metric": { @@ -4746,7 +4747,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 2, "key": 1715076000000, - "key_as_string": "2024-05-07T12:00:00.000+02:00" + "key_as_string": "2024-05-07T10:00:00.000" } ] }, @@ -4755,7 +4756,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, { "1": { - "value": 135880.0 + "value": null }, "1-bucket": { "buckets": [ @@ -4765,7 +4766,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 3, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" }, { "1-metric": { @@ -4773,7 +4774,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 1, "key": 1715248800000, - "key_as_string": "2024-05-09T12:00:00.000+02:00" + "key_as_string": "2024-05-09T10:00:00.000" } ] }, @@ -4792,7 +4793,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 2, "key": 1714860000000, - "key_as_string": "2024-05-05T00:00:00.000+02:00" + "key_as_string": "2024-05-04T22:00:00.000" }, { "1-metric": { @@ -4800,7 +4801,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 6, "key": 1714903200000, - "key_as_string": "2024-05-05T12:00:00.000+02:00" + "key_as_string": "2024-05-05T10:00:00.000" }, { "1-metric": { @@ -4808,7 +4809,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 8, "key": 1714989600000, - "key_as_string": "2024-05-06T12:00:00.000+02:00" + "key_as_string": "2024-05-06T10:00:00.000" }, { "1-metric": { @@ -4816,7 +4817,7 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ }, "doc_count": 7, "key": 1715076000000, - "key_as_string": "2024-05-07T12:00:00.000+02:00" + "key_as_string": "2024-05-07T10:00:00.000" } ] }, @@ -4843,234 +4844,210 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "timed_out": false, "took": 40 }`, - /* - ExpectedResults: [][]model.QueryResultRow{ - {{Cols: []model.QueryResultCol{model.NewQueryResultCol("hits", uint64(1865))}}}, - {}, // NoDBQuery - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 6920.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 1000.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 27400.0), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 6), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`count()`, 9), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), - model.NewQueryResultCol(`count()`, 2), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`count()`, 3), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 0.0), - model.NewQueryResultCol(`count()`, 15), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 200.0), - model.NewQueryResultCol(`count()`, 6), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 600.0), - model.NewQueryResultCol(`count()`, 1), - }}, - }, - {}, // NoDBQuery - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 43320.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715205600000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 44080.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715162400000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 50040.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715248800000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, 72640.0), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`avgOrNull("memory")`, nil), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715205600000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715162400000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`count()`, 2), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 3), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715248800000/43200000)), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714860000000/43200000)), - model.NewQueryResultCol(`count()`, 2), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714903200000/43200000)), - model.NewQueryResultCol(`count()`, 6), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1714989600000/43200000)), - model.NewQueryResultCol(`count()`, 8), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol("toInt64(toUnixTimestamp64Milli(`timestamp`)/43200000)", int64(1715076000000/43200000)), - model.NewQueryResultCol(`count()`, 7), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1000.0), - model.NewQueryResultCol(`count()`, 2), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1200.0), - model.NewQueryResultCol(`count()`, 1), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1400.0), - model.NewQueryResultCol(`count()`, 3), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1600.0), - model.NewQueryResultCol(`count()`, 4), - }}, - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`floor("bytes"/200)*200`, 1800.0), - model.NewQueryResultCol(`count()`, 23), - }}, - }, - { - {Cols: []model.QueryResultCol{ - model.NewQueryResultCol(`count(if("bytes">=0 AND "bytes"<1000,1,NULL))`, 168), - model.NewQueryResultCol(`count(if("bytes">=1000 AND "bytes"<2000,1,NULL))`, 94), - model.NewQueryResultCol(`count()`, 1865), - }}, - }, - },*/ - ExpectedPancakeResults: make([]model.QueryResultRow, 0), - ExpectedPancakeSQL: "TODO", + ExpectedPancakeSQL: ` + SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", + "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", + "metric__2__3__1-bucket__1-metric_col_0" + FROM ( + SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", + "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", + "metric__2__3__1-bucket__1-metric_col_0", + dense_rank() OVER (ORDER BY "aggr__2__3__key_0" ASC) AS + "aggr__2__3__order_1_rank", + dense_rank() OVER (PARTITION BY "aggr__2__3__key_0" ORDER BY + "aggr__2__3__1-bucket__key_0" ASC) AS "aggr__2__3__1-bucket__order_1_rank" + FROM ( + SELECT sum(countIf(("bytes">=0 AND "bytes"<1000))) OVER () AS + "aggr__2__count", floor("bytes"/200)*200 AS "aggr__2__3__key_0", + sum(countIf(("bytes">=0 AND "bytes"<1000))) OVER (PARTITION BY + "aggr__2__3__key_0") AS "aggr__2__3__count", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS + "aggr__2__3__1-bucket__key_0", + countIf(("bytes">=0 AND "bytes"<1000)) AS "aggr__2__3__1-bucket__count", + avgOrNullIf("memory", ("bytes">=0 AND "bytes"<1000)) AS + "metric__2__3__1-bucket__1-metric_col_0" + FROM __quesma_table_name + WHERE ("bytes">=0 AND "bytes"<1000) + GROUP BY floor("bytes"/200)*200 AS "aggr__2__3__key_0", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS + "aggr__2__3__1-bucket__key_0")) + ORDER BY "aggr__2__3__order_1_rank" ASC, + "aggr__2__3__1-bucket__order_1_rank" ASC`, + ExpectedPancakeResults: []model.QueryResultRow{ + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 0), + model.NewQueryResultCol("aggr__2__3__count", uint64(15)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(6)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 0), + model.NewQueryResultCol("aggr__2__3__count", uint64(15)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714910400000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(9)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 6920.0), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 200), + model.NewQueryResultCol("aggr__2__3__count", uint64(6)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 200), + model.NewQueryResultCol("aggr__2__3__count", uint64(6)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714996800000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(2)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 200), + model.NewQueryResultCol("aggr__2__3__count", uint64(6)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715083200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(3)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(168)), + model.NewQueryResultCol("aggr__2__3__key_0", 600), + model.NewQueryResultCol("aggr__2__3__count", uint64(1)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 27400), + }}, + }, + ExpectedAdditionalPancakeSQLs: []string{` + SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", + "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", + "metric__2__3__1-bucket__1-metric_col_0" + FROM ( + SELECT "aggr__2__count", "aggr__2__3__key_0", "aggr__2__3__count", + "aggr__2__3__1-bucket__key_0", "aggr__2__3__1-bucket__count", + "metric__2__3__1-bucket__1-metric_col_0", + dense_rank() OVER (ORDER BY "aggr__2__3__key_0" ASC) AS + "aggr__2__3__order_1_rank", + dense_rank() OVER (PARTITION BY "aggr__2__3__key_0" ORDER BY + "aggr__2__3__1-bucket__key_0" ASC) AS "aggr__2__3__1-bucket__order_1_rank" + FROM ( + SELECT sum(countIf(("bytes">=1000 AND "bytes"<2000))) OVER () AS + "aggr__2__count", floor("bytes"/200)*200 AS "aggr__2__3__key_0", + sum(countIf(("bytes">=1000 AND "bytes"<2000))) OVER (PARTITION BY + "aggr__2__3__key_0") AS "aggr__2__3__count", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS + "aggr__2__3__1-bucket__key_0", + countIf(("bytes">=1000 AND "bytes"<2000)) AS "aggr__2__3__1-bucket__count", + avgOrNullIf("memory", ("bytes">=1000 AND "bytes"<2000)) AS + "metric__2__3__1-bucket__1-metric_col_0" + FROM __quesma_table_name + WHERE ("bytes">=1000 AND "bytes"<2000) + GROUP BY floor("bytes"/200)*200 AS "aggr__2__3__key_0", + toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( + "timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS + "aggr__2__3__1-bucket__key_0")) + ORDER BY "aggr__2__3__order_1_rank" ASC, + "aggr__2__3__1-bucket__order_1_rank" ASC`, + }, + ExpectedAdditionalPancakeResults: [][]model.QueryResultRow{ + { + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1000), + model.NewQueryResultCol("aggr__2__3__count", uint64(2)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715083200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 43320.), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1000), + model.NewQueryResultCol("aggr__2__3__count", uint64(2)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715212800000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 44080.), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1200), + model.NewQueryResultCol("aggr__2__3__count", uint64(1)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715169600000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 50040.), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1400), + model.NewQueryResultCol("aggr__2__3__count", uint64(3)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714910400000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1400), + model.NewQueryResultCol("aggr__2__3__count", uint64(3)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715083200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(2)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1600), + model.NewQueryResultCol("aggr__2__3__count", uint64(4)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(3)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1600), + model.NewQueryResultCol("aggr__2__3__count", uint64(4)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715256000000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(1)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1800), + model.NewQueryResultCol("aggr__2__3__count", uint64(23)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714867200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(2)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1800), + model.NewQueryResultCol("aggr__2__3__count", uint64(23)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714910400000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(6)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", 72640.0), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1800), + model.NewQueryResultCol("aggr__2__3__count", uint64(23)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1714996800000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(8)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + {Cols: []model.QueryResultCol{ + model.NewQueryResultCol("aggr__2__count", uint64(94)), + model.NewQueryResultCol("aggr__2__3__key_0", 1800), + model.NewQueryResultCol("aggr__2__3__count", uint64(23)), + model.NewQueryResultCol("aggr__2__3__1-bucket__key_0", int64(1715083200000/43200000)), + model.NewQueryResultCol("aggr__2__3__1-bucket__count", uint64(7)), + model.NewQueryResultCol("metric__2__3__1-bucket__1-metric_col_0", nil), + }}, + }, + }, }, }