diff --git a/quesma/go.mod b/quesma/go.mod index d96579010..2dabc9ce2 100644 --- a/quesma/go.mod +++ b/quesma/go.mod @@ -19,7 +19,6 @@ require ( github.com/knadh/koanf/providers/file v1.1.2 github.com/knadh/koanf/v2 v2.1.1 github.com/markbates/goth v1.80.0 - github.com/relvacode/iso8601 v1.4.0 github.com/rs/zerolog v1.33.0 github.com/shirou/gopsutil/v3 v3.24.5 github.com/stretchr/testify v1.9.0 diff --git a/quesma/go.sum b/quesma/go.sum index 855d36c75..c584c6ba4 100644 --- a/quesma/go.sum +++ b/quesma/go.sum @@ -113,8 +113,6 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/relvacode/iso8601 v1.4.0 h1:GsInVSEJfkYuirYFxa80nMLbH2aydgZpIf52gYZXUJs= -github.com/relvacode/iso8601 v1.4.0/go.mod h1:FlNp+jz+TXpyRqgmM7tnzHHzBnz776kmAH2h3sZCn0I= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= diff --git a/quesma/kibana/dates.go b/quesma/kibana/dates.go index abd9facfe..787622799 100644 --- a/quesma/kibana/dates.go +++ b/quesma/kibana/dates.go @@ -3,6 +3,7 @@ package kibana import ( + "quesma/model" "quesma/util" "strconv" "time" @@ -17,19 +18,18 @@ func NewDateManager() DateManager { var acceptableDateTimeFormats = []string{"2006", "2006-01", "2006-01-02", "2006-01-02", "2006-01-02T15", "2006-01-02T15:04", "2006-01-02T15:04:05", "2006-01-02T15:04:05Z07", "2006-01-02T15:04:05Z07:00"} -// MissingInDateHistogramToUnixTimestamp parses date_histogram's missing field. -// If missing is present, it's in [strict_date_optional_time || epoch_millis] format +// parseStrictDateOptionalTimeOrEpochMillis parses date, which is in [strict_date_optional_time || epoch_millis] format // (https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html) -func (dm DateManager) MissingInDateHistogramToUnixTimestamp(missing any) (unixTimestamp int64, parsingSucceeded bool) { - if asInt, success := util.ExtractInt64Maybe(missing); success { +func (dm DateManager) parseStrictDateOptionalTimeOrEpochMillis(date any) (unixTimestamp int64, parsingSucceeded bool) { + if asInt, success := util.ExtractInt64Maybe(date); success { return asInt, true } - if asFloat, success := util.ExtractFloat64Maybe(missing); success { + if asFloat, success := util.ExtractFloat64Maybe(date); success { return int64(asFloat), true } - asString, success := missing.(string) + asString, success := date.(string) if !success { return -1, false } @@ -41,9 +41,9 @@ func (dm DateManager) MissingInDateHistogramToUnixTimestamp(missing any) (unixTi const yearOrTsDelimiter = 10000 if asInt, err := strconv.ParseInt(asString, 10, 64); err == nil && asInt >= yearOrTsDelimiter { - return dm.MissingInDateHistogramToUnixTimestamp(asInt) + return dm.parseStrictDateOptionalTimeOrEpochMillis(asInt) } else if asFloat, err := strconv.ParseFloat(asString, 64); err == nil && asFloat >= yearOrTsDelimiter { - return dm.MissingInDateHistogramToUnixTimestamp(asFloat) + return dm.parseStrictDateOptionalTimeOrEpochMillis(asFloat) } // It could be replaced with iso8601.ParseString() after the fixes to 1.4.0: @@ -56,3 +56,20 @@ func (dm DateManager) MissingInDateHistogramToUnixTimestamp(missing any) (unixTi return -1, false } + +// ParseMissingInDateHistogram parses date_histogram's missing field. +// If missing is present, it's in [strict_date_optional_time || epoch_millis] format +// (https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html) +func (dm DateManager) ParseMissingInDateHistogram(missing any) (unixTimestamp int64, parsingSucceeded bool) { + return dm.parseStrictDateOptionalTimeOrEpochMillis(missing) +} + +// ParseRange parses range filter. +// We assume it's in [strict_date_optional_time || epoch_millis] format (TODO: other formats) +// (https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html) +func (dm DateManager) ParseRange(Range any) (timestampExpr model.Expr, parsingSucceeded bool) { + if timestamp, success := dm.parseStrictDateOptionalTimeOrEpochMillis(Range); success { + return model.NewFunction("fromUnixTimestamp64Milli", model.NewLiteral(timestamp)), true + } + return nil, false +} diff --git a/quesma/kibana/dates_test.go b/quesma/kibana/dates_test.go index 6164c62b9..1d48e3443 100644 --- a/quesma/kibana/dates_test.go +++ b/quesma/kibana/dates_test.go @@ -8,7 +8,7 @@ import ( "testing" ) -func TestDateManager_MissingInDateHistogramToUnixTimestamp(t *testing.T) { +func TestDateManager_parseStrictDateOptionalTimeOrEpochMillis(t *testing.T) { tests := []struct { missing any wantUnixTimestamp int64 @@ -38,7 +38,7 @@ func TestDateManager_MissingInDateHistogramToUnixTimestamp(t *testing.T) { for _, tt := range tests { t.Run(fmt.Sprintf("%v", tt.missing), func(t *testing.T) { dm := NewDateManager() - gotUnixTs, gotParsingSucceeded := dm.MissingInDateHistogramToUnixTimestamp(tt.missing) + gotUnixTs, gotParsingSucceeded := dm.parseStrictDateOptionalTimeOrEpochMillis(tt.missing) assert.Equalf(t, tt.wantUnixTimestamp, gotUnixTs, "MissingInDateHistogramToUnixTimestamp(%v)", tt.missing) assert.Equalf(t, tt.wantParsingSucceeded, gotParsingSucceeded, "MissingInDateHistogramToUnixTimestamp(%v)", tt.missing) }) diff --git a/quesma/queryparser/pancake_aggregation_parser_buckets.go b/quesma/queryparser/pancake_aggregation_parser_buckets.go index 2cc2f3283..9830b499c 100644 --- a/quesma/queryparser/pancake_aggregation_parser_buckets.go +++ b/quesma/queryparser/pancake_aggregation_parser_buckets.go @@ -78,15 +78,14 @@ func (cw *ClickhouseQueryTranslator) pancakeTryBucketAggregation(aggregation *pa } field := cw.parseFieldField(dateHistogram, "date_histogram") - didWeAddMissing := false + weAddedMissing := false if missingRaw, exists := dateHistogram["missing"]; exists { if missing, ok := missingRaw.(string); ok { dateManager := kibana.NewDateManager() - timestamp, parsingTimestampOk := dateManager.MissingInDateHistogramToUnixTimestamp(missing) - if parsingTimestampOk { + if unixTimestamp, parsingOk := dateManager.ParseMissingInDateHistogram(missing); parsingOk { field = model.NewFunction("COALESCE", field, - model.NewFunction("toDateTime", model.NewLiteral(timestamp))) - didWeAddMissing = true + model.NewFunction("fromUnixTimestamp64Milli", model.NewLiteral(unixTimestamp))) + weAddedMissing = true } else { logger.ErrorWithCtx(cw.Ctx).Msgf("unknown format of missing in date_histogram: %v. Skipping it.", missing) } @@ -95,7 +94,8 @@ func (cw *ClickhouseQueryTranslator) pancakeTryBucketAggregation(aggregation *pa } } - if !didWeAddMissing { + if !weAddedMissing { + // if we don't add missing, we need to filter out nulls later aggregation.filterOutEmptyKeyBucket = true } diff --git a/quesma/queryparser/query_parser.go b/quesma/queryparser/query_parser.go index 9f637d636..bcd2f01c3 100644 --- a/quesma/queryparser/query_parser.go +++ b/quesma/queryparser/query_parser.go @@ -7,7 +7,9 @@ import ( "encoding/hex" "encoding/json" "fmt" + "github.com/k0kubun/pp" "quesma/clickhouse" + "quesma/kibana" "quesma/logger" "quesma/model" "quesma/model/bucket_aggregations" @@ -19,9 +21,6 @@ import ( "strconv" "strings" "unicode" - - "github.com/k0kubun/pp" - "github.com/relvacode/iso8601" ) type QueryMap = map[string]interface{} @@ -764,7 +763,6 @@ func (cw *ClickhouseQueryTranslator) parseDateMathExpression(expr string) (strin exp, err := ParseDateMathExpression(expr) if err != nil { - logger.Warn().Msgf("error parsing date math expression: %s", expr) return "", err } @@ -775,7 +773,6 @@ func (cw *ClickhouseQueryTranslator) parseDateMathExpression(expr string) (strin sql, err := builder.RenderSQL(exp) if err != nil { - logger.Warn().Msgf("error rendering date math expression: %s", expr) return "", err } @@ -792,84 +789,81 @@ func (cw *ClickhouseQueryTranslator) parseRange(queryMap QueryMap) model.SimpleQ return model.NewSimpleQuery(nil, false) } - for field, v := range queryMap { - field = cw.ResolveField(cw.Ctx, field) + for fieldName, v := range queryMap { + fieldName = cw.ResolveField(cw.Ctx, fieldName) + fieldType := cw.Table.GetDateTimeType(cw.Ctx, cw.ResolveField(cw.Ctx, fieldName)) stmts := make([]model.Expr, 0) if _, ok := v.(QueryMap); !ok { logger.WarnWithCtx(cw.Ctx).Msgf("invalid range type: %T, value: %v", v, v) continue } - isDatetimeInDefaultFormat := true // in 99% requests, format is "strict_date_optional_time", which we can parse with time.Parse(time.RFC3339Nano, ..) - if format, ok := v.(QueryMap)["format"]; ok && format == "epoch_millis" { - isDatetimeInDefaultFormat = false - } keysSorted := util.MapKeysSorted(v.(QueryMap)) for _, op := range keysSorted { - v := v.(QueryMap)[op] - var timeFormatFuncName string - var finalLHS, valueToCompare model.Expr - fieldType := cw.Table.GetDateTimeType(cw.Ctx, cw.ResolveField(cw.Ctx, field)) - vToPrint := sprint(v) - valueToCompare = model.NewLiteral(vToPrint) - finalLHS = model.NewColumnRef(field) - if !isDatetimeInDefaultFormat { - timeFormatFuncName = "toUnixTimestamp64Milli" - finalLHS = model.NewFunction(timeFormatFuncName, model.NewColumnRef(field)) - } else { - switch fieldType { - case clickhouse.DateTime64, clickhouse.DateTime: - if dateTime, ok := v.(string); ok { - // if it's a date, we need to parse it to Clickhouse's DateTime format - // how to check if it does not contain date math expression? - if _, err := iso8601.ParseString(dateTime); err == nil { - _, timeFormatFuncName = cw.parseDateTimeString(cw.Table, field, dateTime) - // TODO Investigate the quotation below - valueToCompare = model.NewFunction(timeFormatFuncName, model.NewLiteral(fmt.Sprintf("'%s'", dateTime))) - } else if op == "gte" || op == "lte" || op == "gt" || op == "lt" { - vToPrint, err = cw.parseDateMathExpression(vToPrint) - valueToCompare = model.NewLiteral(vToPrint) - if err != nil { - logger.WarnWithCtx(cw.Ctx).Msgf("error parsing date math expression: %s", vToPrint) - return model.NewSimpleQuery(nil, false) - } - } - } else if v == nil { - vToPrint = "NULL" - valueToCompare = model.NewLiteral("NULL") + valueRaw := v.(QueryMap)[op] + value := sprint(valueRaw) + defaultValue := model.NewLiteral(value) + dateManager := kibana.NewDateManager() + + // Three stages: + // 1. dateManager.ParseRange + // 2. cw.parseDateMathExpression + // 3. just a number + // Dates use 1-3 and finish as soon as any succeeds + // Numbers use just 3rd + + var finalValue model.Expr + doneParsing, isQuoted := false, len(value) > 2 && value[0] == '\'' && value[len(value)-1] == '\'' + switch fieldType { + case clickhouse.DateTime, clickhouse.DateTime64: + // TODO add support for "time_zone" parameter in ParseRange + finalValue, doneParsing = dateManager.ParseRange(value) // stage 1 + + if !doneParsing && (op == "gte" || op == "lte" || op == "gt" || op == "lt") { // stage 2 + parsed, err := cw.parseDateMathExpression(value) + if err == nil { + doneParsing = true + finalValue = model.NewLiteral(parsed) } - case clickhouse.Invalid: // assumes it is number that does not need formatting - if len(vToPrint) > 2 && vToPrint[0] == '\'' && vToPrint[len(vToPrint)-1] == '\'' { - isNumber := true - for _, c := range vToPrint[1 : len(vToPrint)-1] { - if !unicode.IsDigit(c) && c != '.' { - isNumber = false - } - } - if isNumber { - vToPrint = vToPrint[1 : len(vToPrint)-1] - } else { - logger.WarnWithCtx(cw.Ctx).Msgf("we use range with unknown literal %s, field %s", vToPrint, field) + } + + if !doneParsing && isQuoted { // stage 3 + finalValue, doneParsing = dateManager.ParseRange(value[1 : len(value)-1]) + } + case clickhouse.Invalid: + if isQuoted { + isNumber, unquoted := true, value[1:len(value)-1] + for _, c := range unquoted { + if !unicode.IsDigit(c) && c != '.' { + isNumber = false } - valueToCompare = model.NewLiteral(vToPrint) } - default: - logger.WarnWithCtx(cw.Ctx).Msgf("invalid DateTime type for field: %s, parsed dateTime value: %s", field, vToPrint) + if isNumber { + finalValue = model.NewLiteral(unquoted) + doneParsing = true + } } + default: + logger.ErrorWithCtx(cw.Ctx).Msgf("invalid DateTime type for field: %s, parsed dateTime value: %s", fieldName, value) + } + + if !doneParsing { + finalValue = defaultValue } + field := model.NewColumnRef(fieldName) switch op { case "gte": - stmt := model.NewInfixExpr(finalLHS, ">=", valueToCompare) + stmt := model.NewInfixExpr(field, ">=", finalValue) stmts = append(stmts, stmt) case "lte": - stmt := model.NewInfixExpr(finalLHS, "<=", valueToCompare) + stmt := model.NewInfixExpr(field, "<=", finalValue) stmts = append(stmts, stmt) case "gt": - stmt := model.NewInfixExpr(finalLHS, ">", valueToCompare) + stmt := model.NewInfixExpr(field, ">", finalValue) stmts = append(stmts, stmt) case "lt": - stmt := model.NewInfixExpr(finalLHS, "<", valueToCompare) + stmt := model.NewInfixExpr(field, "<", finalValue) stmts = append(stmts, stmt) case "format": // ignored @@ -885,21 +879,6 @@ func (cw *ClickhouseQueryTranslator) parseRange(queryMap QueryMap) model.SimpleQ return model.NewSimpleQuery(nil, false) } -// parseDateTimeString returns string used to parse DateTime in Clickhouse (depends on column type) - -func (cw *ClickhouseQueryTranslator) parseDateTimeString(table *clickhouse.Table, field, dateTime string) (string, string) { - typ := table.GetDateTimeType(cw.Ctx, cw.ResolveField(cw.Ctx, field)) - switch typ { - case clickhouse.DateTime64: - return "parseDateTime64BestEffort('" + dateTime + "')", "parseDateTime64BestEffort" - case clickhouse.DateTime: - return "parseDateTimeBestEffort('" + dateTime + "')", "parseDateTimeBestEffort" - default: - logger.Error().Msgf("invalid DateTime type: %T for field: %s, parsed dateTime value: %s", typ, field, dateTime) - return "", "" - } -} - // TODO: not supported: // - The field has "index" : false and "doc_values" : false set in the mapping // - The length of the field value exceeded an ignore_above setting in the mapping diff --git a/quesma/queryparser/query_parser_range_test.go b/quesma/queryparser/query_parser_range_test.go index c2b5760b5..9bb629a9d 100644 --- a/quesma/queryparser/query_parser_range_test.go +++ b/quesma/queryparser/query_parser_range_test.go @@ -32,7 +32,7 @@ var parseRangeTests = []parseRangeTest{ `CREATE TABLE ` + tableName + ` ( "message" String, "timestamp" DateTime64(3, 'UTC') ) ENGINE = Memory`, - `("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') AND "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z'))`, + `("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029))`, }, { "parseDateTimeBestEffort", @@ -46,7 +46,7 @@ var parseRangeTests = []parseRangeTest{ `CREATE TABLE ` + tableName + ` ( "message" String, "timestamp" DateTime ) ENGINE = Memory`, - `("timestamp">=parseDateTimeBestEffort('2024-02-02T13:47:16.029Z') AND "timestamp"<=parseDateTimeBestEffort('2024-02-09T13:47:16.029Z'))`, + `("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029))`, }, { "numeric range", @@ -72,7 +72,7 @@ var parseRangeTests = []parseRangeTest{ `CREATE TABLE ` + tableName + ` ( "message" String, "timestamp" DateTime64(3, 'UTC') ) ENGINE = Memory`, - `("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16') AND "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16'))`, + `("timestamp">=fromUnixTimestamp64Milli(1706881636000) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436000))`, }, } diff --git a/quesma/quesma/functionality/terms_enum/terms_enum_test.go b/quesma/quesma/functionality/terms_enum/terms_enum_test.go index c707a921c..12378c02d 100644 --- a/quesma/quesma/functionality/terms_enum/terms_enum_test.go +++ b/quesma/quesma/functionality/terms_enum/terms_enum_test.go @@ -109,8 +109,8 @@ func testHandleTermsEnumRequest(t *testing.T, requestBody []byte) { } qt := &queryparser.ClickhouseQueryTranslator{ClickhouseLM: lm, Table: table, Ctx: context.Background(), Schema: s.Tables[schema.TableName(testTableName)]} // Here we additionally verify that terms for `_tier` are **NOT** included in the SQL query - expectedQuery1 := `SELECT DISTINCT "client_name" FROM ` + testTableName + ` WHERE ("epoch_time">=parseDateTimeBestEffort('2024-02-27T12:25:00.000Z') AND "epoch_time"<=parseDateTimeBestEffort('2024-02-27T12:40:59.999Z')) LIMIT 13` - expectedQuery2 := `SELECT DISTINCT "client_name" FROM ` + testTableName + ` WHERE ("epoch_time"<=parseDateTimeBestEffort('2024-02-27T12:40:59.999Z') AND "epoch_time">=parseDateTimeBestEffort('2024-02-27T12:25:00.000Z')) LIMIT 13` + expectedQuery1 := `SELECT DISTINCT "client_name" FROM ` + testTableName + ` WHERE ("epoch_time">=fromUnixTimestamp64Milli(1709036700000) AND "epoch_time"<=fromUnixTimestamp64Milli(1709037659999)) LIMIT 13` + expectedQuery2 := `SELECT DISTINCT "client_name" FROM ` + testTableName + ` WHERE ("epoch_time">=fromUnixTimestamp64Milli(1709036700000) AND "epoch_time"<=fromUnixTimestamp64Milli(1709037659999)) LIMIT 13` // Once in a while `AND` conditions could be swapped, so we match both cases mock.ExpectQuery(fmt.Sprintf("%s|%s", regexp.QuoteMeta(expectedQuery1), regexp.QuoteMeta(expectedQuery2))). diff --git a/quesma/quesma/search_test.go b/quesma/quesma/search_test.go index 71972b15c..693259619 100644 --- a/quesma/quesma/search_test.go +++ b/quesma/quesma/search_test.go @@ -425,38 +425,38 @@ func TestHandlingDateTimeFields(t *testing.T) { dateTimeTimestampField: `SELECT toInt64(toUnixTimestamp("timestamp") / 60) AS "aggr__0__key_0", count(*) AS "aggr__0__count" FROM __quesma_table_name - WHERE ((("timestamp64">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') - AND "timestamp64"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')) AND - ("timestamp">=parseDateTimeBestEffort('2024-01-29T15:36:36.491Z') AND - "timestamp"<=parseDateTimeBestEffort('2024-01-29T18:11:36.491Z'))) AND NOT (( - "@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') AND - "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')))) + WHERE ((("timestamp64">=fromUnixTimestamp64Milli(1706542596491) AND + "timestamp64"<=fromUnixTimestamp64Milli(1706551896491)) AND ("timestamp">= + fromUnixTimestamp64Milli(1706542596491) AND "timestamp"<= + fromUnixTimestamp64Milli(1706551896491))) AND NOT (("@timestamp">= + fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<= + fromUnixTimestamp64Milli(1706551896491)))) GROUP BY toInt64(toUnixTimestamp("timestamp") / 60) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, dateTime64TimestampField: `SELECT toInt64(toUnixTimestamp64Milli("timestamp64") / 60000) AS "aggr__0__key_0", count(*) AS "aggr__0__count" FROM __quesma_table_name - WHERE ((("timestamp64">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') - AND "timestamp64"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')) AND - ("timestamp">=parseDateTimeBestEffort('2024-01-29T15:36:36.491Z') AND - "timestamp"<=parseDateTimeBestEffort('2024-01-29T18:11:36.491Z'))) AND NOT (( - "@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') AND - "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')))) + WHERE ((("timestamp64">=fromUnixTimestamp64Milli(1706542596491) AND + "timestamp64"<=fromUnixTimestamp64Milli(1706551896491)) AND ("timestamp">= + fromUnixTimestamp64Milli(1706542596491) AND "timestamp"<= + fromUnixTimestamp64Milli(1706551896491))) AND NOT (("@timestamp">= + fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<= + fromUnixTimestamp64Milli(1706551896491)))) GROUP BY toInt64(toUnixTimestamp64Milli("timestamp64") / 60000) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, dateTime64OurTimestampField: `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS "aggr__0__key_0" - , count(*) AS "aggr__0__count" - FROM __quesma_table_name - WHERE ((("timestamp64">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') - AND "timestamp64"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')) AND - ("timestamp">=parseDateTimeBestEffort('2024-01-29T15:36:36.491Z') AND - "timestamp"<=parseDateTimeBestEffort('2024-01-29T18:11:36.491Z'))) AND NOT (( - "@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') AND - "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')))) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS - "aggr__0__key_0" - ORDER BY "aggr__0__key_0" ASC`, + , count(*) AS "aggr__0__count" + FROM __quesma_table_name + WHERE ((("timestamp64">=fromUnixTimestamp64Milli(1706542596491) AND + "timestamp64"<=fromUnixTimestamp64Milli(1706551896491)) AND ("timestamp">= + fromUnixTimestamp64Milli(1706542596491) AND "timestamp"<= + fromUnixTimestamp64Milli(1706551896491))) AND NOT (("@timestamp">= + fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<= + fromUnixTimestamp64Milli(1706551896491)))) + GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS + "aggr__0__key_0" + ORDER BY "aggr__0__key_0" ASC`, } db, mock := util.InitSqlMockWithPrettySqlAndPrint(t, false) diff --git a/quesma/testdata/aggregation_requests.go b/quesma/testdata/aggregation_requests.go index b5a1be085..24793f61a 100644 --- a/quesma/testdata/aggregation_requests.go +++ b/quesma/testdata/aggregation_requests.go @@ -121,8 +121,7 @@ var AggregationTests = []AggregationTestCase{ ExpectedPancakeSQL: `SELECT maxOrNull("AvgTicketPrice") AS "metric__maxAgg_col_0", ` + `minOrNull("AvgTicketPrice") AS "metric__minAgg_col_0" ` + `FROM ` + TableName + ` ` + - `WHERE ("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') ` + - `AND "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z'))`, + `WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029))`, }, { // [1] TestName: "2 sibling count aggregations", @@ -313,8 +312,7 @@ var AggregationTests = []AggregationTestCase{ countIf("Cancelled"==true) AS "metric__0__3-bucket_col_0", countIf("FlightDelay"==true) AS "aggr__0__1-bucket__count" FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029)) GROUP BY "OriginCityName" AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC LIMIT 1001`, @@ -521,8 +519,7 @@ var AggregationTests = []AggregationTestCase{ "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0", count(*) AS "aggr__0__1__count" FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') - AND "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029)) GROUP BY "FlightDelayType" AS "aggr__0__key_0", toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( "timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0")) @@ -622,8 +619,7 @@ var AggregationTests = []AggregationTestCase{ }, ExpectedPancakeSQL: `SELECT sumOrNull("taxful_total_price") AS "metric__0_col_0" ` + `FROM ` + TableName + ` ` + - `WHERE ("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') ` + - `AND "order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z'))`, + `WHERE ("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date"<=fromUnixTimestamp64Milli(1707818397034))`, }, { // [4] TestName: "cardinality", @@ -755,8 +751,7 @@ var AggregationTests = []AggregationTestCase{ "OriginCityName" AS "aggr__suggestions__key_0", count(*) AS "aggr__suggestions__count" FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029)) GROUP BY "OriginCityName" AS "aggr__suggestions__key_0" ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC LIMIT 11`, @@ -872,8 +867,7 @@ var AggregationTests = []AggregationTestCase{ ExpectedPancakeSQL: ` SELECT countIf("FlightDelay"==true) AS "aggr__0-bucket__count" FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z'))`, + WHERE ("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029))`, }, { // [6] TestName: "filters", @@ -1027,18 +1021,17 @@ var AggregationTests = []AggregationTestCase{ }}, }, ExpectedPancakeSQL: ` - SELECT countIf(("timestamp">=parseDateTime64BestEffort( - '2024-02-02T13:47:16.029Z') AND "timestamp"<=parseDateTime64BestEffort( - '2024-02-09T13:47:16.029Z'))) AS "filter_0__aggr__time_offset_split__count", - countIf(("timestamp">=parseDateTime64BestEffort('2024-01-26T13:47:16.029Z') - AND "timestamp"<=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z'))) AS + SELECT countIf(("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND + "timestamp"<=fromUnixTimestamp64Milli(1707486436029))) AS + "filter_0__aggr__time_offset_split__count", + countIf(("timestamp">=fromUnixTimestamp64Milli(1706276836029) AND "timestamp" + <=fromUnixTimestamp64Milli(1706881636029))) AS "filter_1__aggr__time_offset_split__count" - FROM ` + TableName + ` - WHERE ("FlightDelay"==true AND (("timestamp">=parseDateTime64BestEffort( - '2024-02-02T13:47:16.029Z') AND "timestamp"<=parseDateTime64BestEffort( - '2024-02-09T13:47:16.029Z')) OR ("timestamp">=parseDateTime64BestEffort( - '2024-01-26T13:47:16.029Z') AND "timestamp"<=parseDateTime64BestEffort( - '2024-02-02T13:47:16.029Z'))))`, + FROM __quesma_table_name + WHERE ("FlightDelay"==true AND (("timestamp">=fromUnixTimestamp64Milli( + 1706881636029) AND "timestamp"<=fromUnixTimestamp64Milli(1707486436029)) OR ( + "timestamp">=fromUnixTimestamp64Milli(1706276836029) AND "timestamp"<= + fromUnixTimestamp64Milli(1706881636029))))`, }, { // [7] TestName: "top hits, quite complex", @@ -1628,9 +1621,8 @@ var AggregationTests = []AggregationTestCase{ ExpectedPancakeSQL: ` SELECT "FlightDelayMin" AS "aggr__0__key_0", count(*) AS "aggr__0__count" FROM ` + TableName + ` - WHERE (("timestamp">=parseDateTime64BestEffort('2024-02-02T13:47:16.029Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-02-09T13:47:16.029Z')) AND NOT ( - "FlightDelayMin"==0)) + WHERE (("timestamp">=fromUnixTimestamp64Milli(1706881636029) AND "timestamp"<= + fromUnixTimestamp64Milli(1707486436029)) AND NOT ("FlightDelayMin"==0)) GROUP BY "FlightDelayMin" AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, }, @@ -1853,8 +1845,8 @@ var AggregationTests = []AggregationTestCase{ count(*) AS "aggr__0__1__count" FROM __quesma_table_name WHERE ("host.name" iLIKE '%prometheus%' AND ("@timestamp">= - parseDateTime64BestEffort('2024-02-02T16:36:49.940Z') AND "@timestamp"<= - parseDateTime64BestEffort('2024-02-09T16:36:49.940Z'))) + fromUnixTimestamp64Milli(1706891809940) AND "@timestamp"<= + fromUnixTimestamp64Milli(1707496609940))) GROUP BY "severity" AS "aggr__0__key_0", toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset(toTimezone( "@timestamp", 'Europe/Warsaw'))*1000) / 10800000) AS "aggr__0__1__key_0")) @@ -2202,8 +2194,7 @@ var AggregationTests = []AggregationTestCase{ "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", countIf("taxful_total_price" > '250') AS "aggr__1__2__count" FROM __quesma_table_name - WHERE ("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND - "order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) + WHERE ("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date"<=fromUnixTimestamp64Milli(1707818397034)) GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone ("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0" ORDER BY "aggr__1__2__key_0" ASC`, @@ -2214,8 +2205,7 @@ var AggregationTests = []AggregationTestCase{ "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", countIf("taxful_total_price" > '250') AS "aggr__1__2__count" FROM __quesma_table_name - WHERE ("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND - "order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) + WHERE ("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date"<=fromUnixTimestamp64Milli(1707818397034)) GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset( toTimezone("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0" @@ -2232,9 +2222,8 @@ var AggregationTests = []AggregationTestCase{ __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( "order_date", 'Europe/Warsaw'))*1000) / 43200000)) - WHERE ("taxful_total_price" > '250' AND ("order_date">= - parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND "order_date"<= - parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')))) + WHERE ("taxful_total_price" > '250' AND ("order_date">=fromUnixTimestamp64Milli(1707213597034) + AND "order_date"<=fromUnixTimestamp64Milli(1707818397034)))) SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", "top_metrics__1__2__4_col_0", "top_metrics__1__2__4_col_1", "top_hits_rank" FROM "quesma_top_hits_join" @@ -2247,8 +2236,7 @@ var AggregationTests = []AggregationTestCase{ "order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0", countIf("taxful_total_price" > '250') AS "aggr__1__2__count" FROM __quesma_table_name - WHERE ("order_date">=parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND - "order_date"<=parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')) + WHERE ("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date"<=fromUnixTimestamp64Milli(1707818397034)) GROUP BY toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset( toTimezone("order_date", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__1__2__key_0" @@ -2265,9 +2253,8 @@ var AggregationTests = []AggregationTestCase{ __quesma_table_name AS "hit_table" ON ("group_table"."aggr__1__2__key_0"= toInt64((toUnixTimestamp64Milli("order_date")+timeZoneOffset(toTimezone( "order_date", 'Europe/Warsaw'))*1000) / 43200000)) - WHERE ("taxful_total_price" > '250' AND ("order_date">= - parseDateTime64BestEffort('2024-02-06T09:59:57.034Z') AND "order_date"<= - parseDateTime64BestEffort('2024-02-13T09:59:57.034Z')))) + WHERE ("taxful_total_price" > '250' AND + ("order_date">=fromUnixTimestamp64Milli(1707213597034) AND "order_date"<=fromUnixTimestamp64Milli(1707818397034)))) SELECT "aggr__1__count", "aggr__1__2__key_0", "aggr__1__2__count", "top_metrics__1__2__5_col_0", "top_metrics__1__2__5_col_1", "top_hits_rank" FROM "quesma_top_hits_join" @@ -2465,10 +2452,10 @@ var AggregationTests = []AggregationTestCase{ count(*) AS "aggr__sample__top_values__count" FROM ( SELECT "host.name" - FROM ` + TableName + ` - WHERE (("@timestamp">=parseDateTime64BestEffort('2024-01-23T11:27:16.820Z') - AND "@timestamp"<=parseDateTime64BestEffort('2024-01-23T11:42:16.820Z')) AND - ` + fullTextFieldName + ` iLIKE '%user%') + FROM __quesma_table_name + WHERE (("@timestamp">=fromUnixTimestamp64Milli(1706009236820) AND "@timestamp" + <=fromUnixTimestamp64Milli(1706010136820)) AND + "__quesma_fulltext_field_name" iLIKE '%user%') LIMIT 8000) GROUP BY "host.name" AS "aggr__sample__top_values__key_0" ORDER BY "aggr__sample__top_values__count" DESC, @@ -2611,9 +2598,8 @@ var AggregationTests = []AggregationTestCase{ , count(*) AS "aggr__0__count" FROM ` + TableName + ` - WHERE (` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort( - '2024-01-23T14:43:19.481Z') AND "@timestamp"<=parseDateTime64BestEffort( - '2024-01-23T14:58:19.481Z'))) + WHERE (` + fullTextFieldName + ` iLIKE '%user%' AND + ("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481))) GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, @@ -2752,8 +2738,7 @@ var AggregationTests = []AggregationTestCase{ toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS "aggr__stats__series__key_0", count(*) AS "aggr__stats__series__count" FROM ` + TableName + ` - WHERE ("@timestamp">parseDateTime64BestEffort('2024-01-25T14:53:59.033Z') - AND "@timestamp"<=parseDateTime64BestEffort('2024-01-25T15:08:59.033Z')) + WHERE ("@timestamp">fromUnixTimestamp64Milli(1706194439033) AND "@timestamp"<=fromUnixTimestamp64Milli(1706195339033)) GROUP BY COALESCE("event.dataset", 'unknown') AS "aggr__stats__key_0", toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS "aggr__stats__series__key_0")) @@ -2992,8 +2977,7 @@ var AggregationTests = []AggregationTestCase{ "aggr__0__key_0", count(*) AS "aggr__0__count", sumOrNull("taxful_total_price") AS "metric__0__1_col_0" FROM ` + TableName + ` - WHERE ("order_date">=parseDateTime64BestEffort('2024-02-19T17:40:56.351Z') AND - "order_date"<=parseDateTime64BestEffort('2024-02-26T17:40:56.351Z')) + WHERE ("order_date">=fromUnixTimestamp64Milli(1708364456351) AND "order_date"<=fromUnixTimestamp64Milli(1708969256351)) GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, @@ -3102,9 +3086,9 @@ var AggregationTests = []AggregationTestCase{ ExpectedPancakeSQL: ` SELECT sum(count(*)) OVER () AS "aggr__0__parent_count", "message" AS "aggr__0__key_0", count(*) AS "aggr__0__count" - FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-02-20T19:13:33.795Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-02-21T04:01:14.920Z')) + FROM __quesma_table_name + WHERE ("timestamp">=fromUnixTimestamp64Milli(1708456413795) AND "timestamp"<= + fromUnixTimestamp64Milli(1708488074920)) GROUP BY "message" AS "aggr__0__key_0" ORDER BY "aggr__0__count" DESC, "aggr__0__key_0" ASC LIMIT 4`, @@ -3274,8 +3258,7 @@ var AggregationTests = []AggregationTestCase{ sumOrNullIf("taxful_total_price", "products.product_name" ILIKE '%watch%') AS "metric__0__1-bucket__1-metric_col_0" FROM ` + TableName + ` - WHERE ("order_date">=parseDateTime64BestEffort('2024-02-22T18:47:34.149Z') AND - "order_date"<=parseDateTime64BestEffort('2024-02-29T18:47:34.149Z')) + WHERE ("order_date">=fromUnixTimestamp64Milli(1708627654149) AND "order_date"<=fromUnixTimestamp64Milli(1709232454149)) GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 43200000) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, @@ -3507,45 +3490,39 @@ var AggregationTests = []AggregationTestCase{ }}, }, ExpectedPancakeSQL: ` - SELECT sum(countIf(("order_date">=parseDateTime64BestEffort( - '2024-02-22T21:57:36.376Z') AND "order_date"<=parseDateTime64BestEffort( - '2024-02-29T21:57:36.376Z')))) OVER () AS + SELECT sum(countIf(("order_date">=fromUnixTimestamp64Milli(1708639056376) AND + "order_date"<=fromUnixTimestamp64Milli(1709243856376)))) OVER () AS "filter_0__aggr__time_offset_split__count", toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS "filter_0__aggr__time_offset_split__0__key_0", - countIf(("order_date">=parseDateTime64BestEffort('2024-02-22T21:57:36.376Z') - AND "order_date"<=parseDateTime64BestEffort('2024-02-29T21:57:36.376Z'))) AS + countIf(("order_date">=fromUnixTimestamp64Milli(1708639056376) AND + "order_date"<=fromUnixTimestamp64Milli(1709243856376))) AS "filter_0__aggr__time_offset_split__0__count", - sumOrNullIf("taxful_total_price", ("order_date">=parseDateTime64BestEffort( - '2024-02-22T21:57:36.376Z') AND "order_date"<=parseDateTime64BestEffort( - '2024-02-29T21:57:36.376Z'))) AS + sumOrNullIf("taxful_total_price", ("order_date">=fromUnixTimestamp64Milli( + 1708639056376) AND "order_date"<=fromUnixTimestamp64Milli(1709243856376))) AS "filter_0__metric__time_offset_split__0__1_col_0", - sumOrNullIf("taxful_total_price", ("order_date">=parseDateTime64BestEffort( - '2024-02-22T21:57:36.376Z') AND "order_date"<=parseDateTime64BestEffort( - '2024-02-29T21:57:36.376Z'))) AS + sumOrNullIf("taxful_total_price", ("order_date">=fromUnixTimestamp64Milli( + 1708639056376) AND "order_date"<=fromUnixTimestamp64Milli(1709243856376))) AS "filter_0__metric__time_offset_split__0__2_col_0", - sum(countIf(("order_date">=parseDateTime64BestEffort( - '2024-02-15T21:57:36.376Z') AND "order_date"<=parseDateTime64BestEffort( - '2024-02-22T21:57:36.376Z')))) OVER () AS + sum(countIf(("order_date">=fromUnixTimestamp64Milli(1708034256376) AND + "order_date"<=fromUnixTimestamp64Milli(1708639056376)))) OVER () AS "filter_1__aggr__time_offset_split__count", toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS "filter_1__aggr__time_offset_split__0__key_0", - countIf(("order_date">=parseDateTime64BestEffort('2024-02-15T21:57:36.376Z') - AND "order_date"<=parseDateTime64BestEffort('2024-02-22T21:57:36.376Z'))) AS + countIf(("order_date">=fromUnixTimestamp64Milli(1708034256376) AND + "order_date"<=fromUnixTimestamp64Milli(1708639056376))) AS "filter_1__aggr__time_offset_split__0__count", - sumOrNullIf("taxful_total_price", ("order_date">=parseDateTime64BestEffort( - '2024-02-15T21:57:36.376Z') AND "order_date"<=parseDateTime64BestEffort( - '2024-02-22T21:57:36.376Z'))) AS + sumOrNullIf("taxful_total_price", ("order_date">=fromUnixTimestamp64Milli( + 1708034256376) AND "order_date"<=fromUnixTimestamp64Milli(1708639056376))) AS "filter_1__metric__time_offset_split__0__1_col_0", - sumOrNullIf("taxful_total_price", ("order_date">=parseDateTime64BestEffort( - '2024-02-15T21:57:36.376Z') AND "order_date"<=parseDateTime64BestEffort( - '2024-02-22T21:57:36.376Z'))) AS + sumOrNullIf("taxful_total_price", ("order_date">=fromUnixTimestamp64Milli( + 1708034256376) AND "order_date"<=fromUnixTimestamp64Milli(1708639056376))) AS "filter_1__metric__time_offset_split__0__2_col_0" - FROM ` + TableName + ` - WHERE (("order_date">=parseDateTime64BestEffort('2024-02-22T21:57:36.376Z') AND - "order_date"<=parseDateTime64BestEffort('2024-02-29T21:57:36.376Z')) OR ( - "order_date">=parseDateTime64BestEffort('2024-02-15T21:57:36.376Z') AND - "order_date"<=parseDateTime64BestEffort('2024-02-22T21:57:36.376Z'))) + FROM __quesma_table_name + WHERE (("order_date">=fromUnixTimestamp64Milli(1708639056376) AND "order_date"<= + fromUnixTimestamp64Milli(1709243856376)) OR ("order_date">= + fromUnixTimestamp64Milli(1708034256376) AND "order_date"<= + fromUnixTimestamp64Milli(1708639056376))) GROUP BY toInt64(toUnixTimestamp64Milli("order_date") / 86400000) AS "aggr__time_offset_split__0__key_0" ORDER BY "aggr__time_offset_split__0__key_0" ASC`, @@ -3667,8 +3644,7 @@ var AggregationTests = []AggregationTestCase{ FROM ( SELECT "@timestamp" FROM ` + TableName + ` - WHERE (toUnixTimestamp64Milli("@timestamp")>=1.709815794995e+12 AND - toUnixTimestamp64Milli("@timestamp")<=1.709816694995e+12) + WHERE ("@timestamp">=fromUnixTimestamp64Milli(1709815794995) AND "@timestamp"<=fromUnixTimestamp64Milli(1709816694995)) LIMIT 20000) GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 15000) AS "aggr__sampler__eventRate__key_0" @@ -4043,8 +4019,7 @@ var AggregationTests = []AggregationTestCase{ FROM ( SELECT "bytes_gauge" FROM __quesma_table_name - WHERE (toUnixTimestamp64Milli("timestamp")>=1.709932426749e+12 AND - toUnixTimestamp64Milli("timestamp")<=1.711228426749e+12) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1709932426749) AND "timestamp"<=fromUnixTimestamp64Milli(1711228426749)) LIMIT 20000)`, ExpectedAdditionalPancakeSQLs: []string{ `SELECT sum(count(*)) OVER () AS "aggr__sample__count", @@ -4054,8 +4029,7 @@ var AggregationTests = []AggregationTestCase{ FROM ( SELECT "bytes_gauge" FROM __quesma_table_name - WHERE (toUnixTimestamp64Milli("timestamp")>=1.709932426749e+12 AND - toUnixTimestamp64Milli("timestamp")<=1.711228426749e+12) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1709932426749) AND "timestamp"<=fromUnixTimestamp64Milli(1711228426749)) LIMIT 20000) GROUP BY "bytes_gauge" AS "aggr__sample__bytes_gauge_top__key_0" ORDER BY "aggr__sample__bytes_gauge_top__count" DESC, @@ -4266,9 +4240,9 @@ var AggregationTests = []AggregationTestCase{ countIf("bytes_gauge">=-5.5) AS "range_2__aggr__2__count", countIf("bytes_gauge"<6.555) AS "range_3__aggr__2__count", countIf("bytes_gauge" IS NOT NULL) AS "range_4__aggr__2__count" - FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-16T12:15:11.790Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-04-16T12:30:11.790Z'))`, + FROM __quesma_table_name + WHERE ("timestamp">=fromUnixTimestamp64Milli(1713269711790) AND "timestamp"<= + fromUnixTimestamp64Milli(1713270611790))`, ExpectedAdditionalPancakeSQLs: []string{` SELECT countIf(("bytes_gauge">=0 AND "bytes_gauge"<1000)) AS "range_0__aggr__3__count", @@ -4277,9 +4251,9 @@ var AggregationTests = []AggregationTestCase{ countIf("bytes_gauge">=-5.5) AS "range_2__aggr__3__count", countIf("bytes_gauge"<6.555) AS "range_3__aggr__3__count", countIf("bytes_gauge" IS NOT NULL) AS "range_4__aggr__3__count" - FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-16T12:15:11.790Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-04-16T12:30:11.790Z'))`, + FROM __quesma_table_name + WHERE ("timestamp">=fromUnixTimestamp64Milli(1713269711790) AND "timestamp"<= + fromUnixTimestamp64Milli(1713270611790))`, }, }, { // [22] @@ -4419,8 +4393,7 @@ var AggregationTests = []AggregationTestCase{ countIf("timestamp">=toInt64(toUnixTimestamp('2024-04-14'))) AS "range_2__aggr__2__count" FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-06T07:28:50.059Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-04-16T17:28:50.059Z'))`, + WHERE ("timestamp">=fromUnixTimestamp64Milli(1712388530059) AND "timestamp"<=fromUnixTimestamp64Milli(1713288530059))`, }, { // [23] TestName: "significant terms aggregation: same as terms for now", @@ -4756,8 +4729,7 @@ var AggregationTests = []AggregationTestCase{ SELECT floor("bytes"/100)*100 AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1715348876077) AND "timestamp"<=fromUnixTimestamp64Milli(1715349776077)) GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, @@ -4894,9 +4866,9 @@ var AggregationTests = []AggregationTestCase{ ExpectedPancakeSQL: ` SELECT toInt64(toUnixTimestamp64Milli("timestamp") / 30000) AS "aggr__2__key_0", count(*) AS "aggr__2__count" - FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T14:29:02.900Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:44:02.900Z')) + FROM __quesma_table_name + WHERE ("timestamp">=fromUnixTimestamp64Milli(1715351342900) AND "timestamp"<= + fromUnixTimestamp64Milli(1715352242900)) GROUP BY toInt64(toUnixTimestamp64Milli("timestamp") / 30000) AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, @@ -5613,8 +5585,7 @@ var AggregationTests = []AggregationTestCase{ "machine.os" AS "aggr__2__key_0", count(*) AS "aggr__2__count", uniq("clientip") AS "metric__2__1_col_0" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T06:22:39.037Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-10T21:22:39.037Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1715322159037) AND "timestamp"<=fromUnixTimestamp64Milli(1715376159037)) GROUP BY "machine.os" AS "aggr__2__key_0" ORDER BY "metric__2__1_col_0" DESC, "aggr__2__key_0" ASC LIMIT 6`, @@ -6104,7 +6075,7 @@ var AggregationTests = []AggregationTestCase{ }, ExpectedPancakeSQL: ` SELECT toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( - "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__0__key_0", + "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__0__key_0", count(*) AS "aggr__0__count", count("bytes") AS "metric__0__1_col_0", minOrNull("bytes") AS "metric__0__1_col_1", maxOrNull("bytes") AS "metric__0__1_col_2", @@ -6125,9 +6096,9 @@ var AggregationTests = []AggregationTestCase{ varSamp("bytes") AS "metric__0__2_col_7", stddevPop("bytes") AS "metric__0__2_col_8", stddevSamp("bytes") AS "metric__0__2_col_9" - FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-21T21:35:34.210Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-22T12:35:34.210Z')) + FROM __quesma_table_name + WHERE ("timestamp">=fromUnixTimestamp64Milli(1716327334210) AND "timestamp"<= + fromUnixTimestamp64Milli(1716381334210)) GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset(toTimezone( "timestamp", 'Europe/Warsaw'))*1000) / 600000) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, diff --git a/quesma/testdata/aggregation_requests_2.go b/quesma/testdata/aggregation_requests_2.go index 93626e360..102a9621d 100644 --- a/quesma/testdata/aggregation_requests_2.go +++ b/quesma/testdata/aggregation_requests_2.go @@ -605,8 +605,7 @@ var AggregationTests2 = []AggregationTestCase{ quantiles(0.020000)("timestamp") AS "metric__2__1_col_1", sumOrNull("count") AS "metric__2__2_col_0" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-18T00:51:15.845Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-03T00:51:15.845Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1713401475845) AND "timestamp"<=fromUnixTimestamp64Milli(1714697475845)) GROUP BY "response" AS "aggr__2__key_0" ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC LIMIT 4`, @@ -1676,8 +1675,7 @@ var AggregationTests2 = []AggregationTestCase{ floor("bytes2"/5)*5 AS "aggr__2__3__key_0", count(*) AS "aggr__2__3__count" FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') - AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1715348876077) AND "timestamp"<=fromUnixTimestamp64Milli(1715349776077)) GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0", floor("bytes2"/5)*5 AS "aggr__2__3__key_0")) ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, @@ -1856,8 +1854,7 @@ var AggregationTests2 = []AggregationTestCase{ floor("bytes2"/5)*5 AS "aggr__2__3__key_0", count(*) AS "aggr__2__3__count" FROM ` + TableName + ` - WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-10T13:47:56.077Z') - AND "timestamp"<=parseDateTime64BestEffort('2024-05-10T14:02:56.077Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1715348876077) AND "timestamp"<=fromUnixTimestamp64Milli(1715349776077)) GROUP BY floor("bytes"/100)*100 AS "aggr__2__key_0", floor("bytes2"/5)*5 AS "aggr__2__3__key_0")) ORDER BY "aggr__2__order_1_rank" ASC, "aggr__2__3__order_1_rank" ASC`, @@ -2459,8 +2456,8 @@ var AggregationTests2 = []AggregationTestCase{ quantiles(0.750000)("docker.cpu.total.pct") AS "metric__0__1__2_col_0" FROM __quesma_table_name WHERE ("data_stream.dataset"='docker.cpu' AND ("@timestamp">= - parseDateTime64BestEffort('2024-08-18T07:54:12.291Z') AND "@timestamp"<= - parseDateTime64BestEffort('2024-09-02T07:54:12.291Z'))) + fromUnixTimestamp64Milli(1723967652291) AND "@timestamp"<= + fromUnixTimestamp64Milli(1725263652291))) GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 43200000) AS "aggr__0__key_0", "container.name" AS "aggr__0__1__key_0")) WHERE "aggr__0__1__order_1_rank"<=6 @@ -3712,7 +3709,8 @@ var AggregationTests2 = []AggregationTestCase{ "aggr__histo__0__order_1_rank" FROM ( SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", - toDateTime(1706021760000))) / 30000) AS "aggr__histo__key_0", + fromUnixTimestamp64Milli(1706021760000))) / 30000) AS "aggr__histo__key_0" + , sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS "aggr__histo__count", sum(count(*)) OVER (PARTITION BY "aggr__histo__key_0") AS @@ -3720,8 +3718,8 @@ var AggregationTests2 = []AggregationTestCase{ count(*) AS "aggr__histo__0__count" FROM __quesma_table_name GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", - toDateTime(1706021760000))) / 30000) AS "aggr__histo__key_0", - "type" AS "aggr__histo__0__key_0")) + fromUnixTimestamp64Milli(1706021760000))) / 30000) AS "aggr__histo__key_0" + , "type" AS "aggr__histo__0__key_0")) WHERE "aggr__histo__0__order_1_rank"<=11 ORDER BY "aggr__histo__order_1_rank" ASC, "aggr__histo__0__order_1_rank" ASC`, }, @@ -3861,41 +3859,41 @@ var AggregationTests2 = []AggregationTestCase{ }}}, }, ExpectedPancakeSQL: ` - SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", toDateTime - (1706878800000))) / 90000) AS "aggr__histo1__key_0", + SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", + fromUnixTimestamp64Milli(1706878800000))) / 90000) AS "aggr__histo1__key_0", count(*) AS "aggr__histo1__count" FROM __quesma_table_name GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", - toDateTime(1706878800000))) / 90000) AS "aggr__histo1__key_0" + fromUnixTimestamp64Milli(1706878800000))) / 90000) AS "aggr__histo1__key_0" ORDER BY "aggr__histo1__key_0" ASC`, ExpectedAdditionalPancakeSQLs: []string{ - `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", toDateTime - (1706878800000))) / 90000) AS "aggr__histo2__key_0", + `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", + fromUnixTimestamp64Milli(1706878800000))) / 90000) AS "aggr__histo2__key_0", count(*) AS "aggr__histo2__count" FROM __quesma_table_name GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", - toDateTime(1706878800000))) / 90000) AS "aggr__histo2__key_0" + fromUnixTimestamp64Milli(1706878800000))) / 90000) AS "aggr__histo2__key_0" ORDER BY "aggr__histo2__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", toDateTime - (1706878800000))) / 90000) AS "aggr__histo3__key_0", + `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", + fromUnixTimestamp64Milli(1706878800000))) / 90000) AS "aggr__histo3__key_0", count(*) AS "aggr__histo3__count" FROM __quesma_table_name GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", - toDateTime(1706878800000))) / 90000) AS "aggr__histo3__key_0" + fromUnixTimestamp64Milli(1706878800000))) / 90000) AS "aggr__histo3__key_0" ORDER BY "aggr__histo3__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", toDateTime - (1706853600000))) / 90000) AS "aggr__histo4__key_0", + `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", + fromUnixTimestamp64Milli(1706853600000))) / 90000) AS "aggr__histo4__key_0", count(*) AS "aggr__histo4__count" FROM __quesma_table_name GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", - toDateTime(1706853600000))) / 90000) AS "aggr__histo4__key_0" + fromUnixTimestamp64Milli(1706853600000))) / 90000) AS "aggr__histo4__key_0" ORDER BY "aggr__histo4__key_0" ASC`, - `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", toDateTime - (1706853600000))) / 90000) AS "aggr__histo5__key_0", + `SELECT toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", + fromUnixTimestamp64Milli(1706853600000))) / 90000) AS "aggr__histo5__key_0", count(*) AS "aggr__histo5__count" FROM __quesma_table_name GROUP BY toInt64(toUnixTimestamp64Milli(COALESCE("customer_birth_date", - toDateTime(1706853600000))) / 90000) AS "aggr__histo5__key_0" + fromUnixTimestamp64Milli(1706853600000))) / 90000) AS "aggr__histo5__key_0" ORDER BY "aggr__histo5__key_0" ASC`, }, }, diff --git a/quesma/testdata/kibana-visualize/aggregation_requests.go b/quesma/testdata/kibana-visualize/aggregation_requests.go index 6611ba5ed..715e74bfb 100644 --- a/quesma/testdata/kibana-visualize/aggregation_requests.go +++ b/quesma/testdata/kibana-visualize/aggregation_requests.go @@ -238,8 +238,8 @@ var AggregationTests = []testdata.AggregationTestCase{ "aggr__0__1__parent_count", "severity" AS "aggr__0__1__key_0", "source" AS "aggr__0__1__key_1", count(*) AS "aggr__0__1__count" FROM __quesma_table_name - WHERE ("@timestamp">=parseDateTime64BestEffort('2024-05-27T11:59:56.627Z') - AND "@timestamp"<=parseDateTime64BestEffort('2024-05-27T12:14:56.627Z')) + WHERE ("@timestamp">=fromUnixTimestamp64Milli(1716811196627) AND + "@timestamp"<=fromUnixTimestamp64Milli(1716812096627)) GROUP BY toInt64((toUnixTimestamp64Milli("@timestamp")+timeZoneOffset( toTimezone("@timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__0__key_0", "severity" AS "aggr__0__1__key_0", @@ -1320,8 +1320,8 @@ var AggregationTests = []testdata.AggregationTestCase{ avgOrNull("FlightDelayMin") AS "metric__0__1_col_3", sumOrNull("FlightDelayMin") AS "metric__0__1_col_4" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-09-07T15:30:24.239Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-09-22T15:30:24.239Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1725723024239) AND "timestamp"<= + fromUnixTimestamp64Milli(1727019024239)) GROUP BY "Carrier" AS "aggr__0__key_0" ORDER BY "metric__0__1_col_1" DESC, "metric__0__1_col_0" DESC, "metric__0__1_col_3" DESC, "metric__0__1_col_2" ASC, @@ -1488,8 +1488,8 @@ var AggregationTests = []testdata.AggregationTestCase{ stddevPop("FlightDelayMin") AS "metric__0__1_col_8", stddevSamp("FlightDelayMin") AS "metric__0__1_col_9" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-09-07T15:30:24.239Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-09-22T15:30:24.239Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1725723024239) AND "timestamp"<= + fromUnixTimestamp64Milli(1727019024239)) GROUP BY "Carrier" AS "aggr__0__key_0" ORDER BY "metric__0__1_col_1" DESC, "metric__0__1_col_0" DESC, "metric__0__1_col_3" DESC, "metric__0__1_col_2" ASC, diff --git a/quesma/testdata/kibana-visualize/pipeline_aggregation_requests.go b/quesma/testdata/kibana-visualize/pipeline_aggregation_requests.go index 3e45a4316..ccda65c50 100644 --- a/quesma/testdata/kibana-visualize/pipeline_aggregation_requests.go +++ b/quesma/testdata/kibana-visualize/pipeline_aggregation_requests.go @@ -198,8 +198,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ "aggr__2__1-bucket__key_0", count(*) AS "aggr__2__1-bucket__count", maxOrNull("timestamp") AS "metric__2__1-bucket__1-metric_col_0" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-09-20T16:16:03.807Z') - AND "timestamp"<=parseDateTime64BestEffort('2024-10-05T16:16:03.807Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1726848963807) AND "timestamp" + <=fromUnixTimestamp64Milli(1728144963807)) GROUP BY toInt64((toUnixTimestamp64Milli("timestamp")+timeZoneOffset( toTimezone("timestamp", 'Europe/Warsaw'))*1000) / 43200000) AS "aggr__2__key_0", diff --git a/quesma/testdata/opensearch-visualize/aggregation_requests.go b/quesma/testdata/opensearch-visualize/aggregation_requests.go index a71462e98..fd25e2cb9 100644 --- a/quesma/testdata/opensearch-visualize/aggregation_requests.go +++ b/quesma/testdata/opensearch-visualize/aggregation_requests.go @@ -771,8 +771,8 @@ var AggregationTests = []testdata.AggregationTestCase{ "response" AS "aggr__2__key_0", count(*) AS "aggr__2__count", maxOrNull("timestamp") AS "metric__2__1_col_0" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-18T00:49:59.517Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-03T00:49:59.517Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1713401399517) AND "timestamp"<= + fromUnixTimestamp64Milli(1714697399517)) GROUP BY "response" AS "aggr__2__key_0" ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC LIMIT 4`, @@ -915,8 +915,8 @@ var AggregationTests = []testdata.AggregationTestCase{ "response" AS "aggr__2__key_0", count(*) AS "aggr__2__count", minOrNull("timestamp") AS "metric__2__1_col_0" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-18T00:51:00.471Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-03T00:51:00.471Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1713401460471) AND "timestamp"<= + fromUnixTimestamp64Milli(1714697460471)) GROUP BY "response" AS "aggr__2__key_0" ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC LIMIT 4`, @@ -1092,8 +1092,8 @@ var AggregationTests = []testdata.AggregationTestCase{ quantiles(0.950000)("timestamp") AS "metric__2__1_col_5", quantiles(0.990000)("timestamp") AS "metric__2__1_col_6" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-18T00:51:15.845Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-03T00:51:15.845Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1713401475845) AND "timestamp"<= + fromUnixTimestamp64Milli(1714697475845)) GROUP BY "response" AS "aggr__2__key_0" ORDER BY "aggr__2__count" DESC, "aggr__2__key_0" ASC LIMIT 4`, diff --git a/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go b/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go index 2f528b4de..1e38926be 100644 --- a/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go +++ b/quesma/testdata/opensearch-visualize/pipeline_aggregation_requests.go @@ -128,8 +128,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ ExpectedPancakeSQL: ` SELECT "day_of_week_i" AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM __quesma_table_name - WHERE ("order_date">=parseDateTime64BestEffort('2024-01-24T11:23:10.802Z') AND - "order_date"<=parseDateTime64BestEffort('2024-05-08T10:23:10.802Z')) + WHERE ("order_date">=fromUnixTimestamp64Milli(1706095390802) AND "order_date"<= + fromUnixTimestamp64Milli(1715163790802)) GROUP BY "day_of_week_i" AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, @@ -2841,8 +2841,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ SELECT sum(count(*)) OVER () AS "aggr__1-bucket__parent_count", "clientip" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-05-11T07:40:13.606Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-11T22:40:13.606Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1715413213606) AND "timestamp"<= + fromUnixTimestamp64Milli(1715467213606)) GROUP BY "clientip" AS "aggr__1-bucket__key_0" ORDER BY "aggr__1-bucket__key_0" DESC LIMIT 6`, @@ -3371,8 +3371,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ SELECT sum(count(*)) OVER () AS "aggr__1-bucket__parent_count", "Cancelled" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-27T21:56:51.264Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-12T21:56:51.264Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1714255011264) AND "timestamp"<= + fromUnixTimestamp64Milli(1715551011264)) GROUP BY "Cancelled" AS "aggr__1-bucket__key_0" ORDER BY "aggr__1-bucket__key_0" DESC LIMIT 6`, @@ -4333,8 +4333,8 @@ var PipelineAggregationTests = []testdata.AggregationTestCase{ SELECT sum(count(*)) OVER () AS "aggr__1-bucket__parent_count", "extension" AS "aggr__1-bucket__key_0", count(*) AS "aggr__1-bucket__count" FROM __quesma_table_name - WHERE ("timestamp">=parseDateTime64BestEffort('2024-04-27T22:16:26.906Z') AND - "timestamp"<=parseDateTime64BestEffort('2024-05-12T22:16:26.906Z')) + WHERE ("timestamp">=fromUnixTimestamp64Milli(1714256186906) AND "timestamp"<= + fromUnixTimestamp64Milli(1715552186906)) GROUP BY "extension" AS "aggr__1-bucket__key_0" ORDER BY "aggr__1-bucket__key_0" DESC LIMIT 6`, diff --git a/quesma/testdata/opensearch_requests.go b/quesma/testdata/opensearch_requests.go index 6e0e1f6d0..eed74f52a 100644 --- a/quesma/testdata/opensearch_requests.go +++ b/quesma/testdata/opensearch_requests.go @@ -80,22 +80,20 @@ var OpensearchSearchTests = []SearchTestCase{ "track_total_hits": true }`, WantedSql: []string{ - `("__timestamp">=parseDateTime64BestEffort('2024-04-04T13:18:18.149Z') AND "__timestamp"<=parseDateTime64BestEffort('2024-04-04T13:33:18.149Z'))`, + `("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149))`, }, WantedQueryType: model.ListAllFields, WantedQueries: []string{ `SELECT "__bytes", "__timestamp", "message_____" FROM __quesma_table_name - WHERE ("__timestamp">=parseDateTime64BestEffort('2024-04-04T13:18:18.149Z') - AND "__timestamp"<=parseDateTime64BestEffort('2024-04-04T13:33:18.149Z')) + WHERE ("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149)) ORDER BY "__timestamp" DESC LIMIT 500`, `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", toInt64((toUnixTimestamp64Milli("__timestamp")+timeZoneOffset(toTimezone( "__timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM __quesma_table_name - WHERE ("__timestamp">=parseDateTime64BestEffort('2024-04-04T13:18:18.149Z') AND - "__timestamp"<=parseDateTime64BestEffort('2024-04-04T13:33:18.149Z')) + WHERE ("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149)) GROUP BY toInt64((toUnixTimestamp64Milli("__timestamp")+timeZoneOffset( toTimezone("__timestamp", 'Europe/Warsaw'))*1000) / 30000) AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, @@ -173,7 +171,7 @@ var OpensearchSearchTests = []SearchTestCase{ "track_total_hits": true }`, WantedSql: []string{ - `("__timestamp">=parseDateTime64BestEffort('2024-04-04T13:18:18.149Z') AND "__timestamp"<=parseDateTime64BestEffort('2024-04-04T13:33:18.149Z'))`, + `("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149))`, }, WantedQueryType: model.Normal, WantedQueries: []string{ @@ -181,8 +179,7 @@ var OpensearchSearchTests = []SearchTestCase{ toInt64(toUnixTimestamp64Milli("__timestamp") / 30000) AS "aggr__2__key_0", count(*) AS "aggr__2__count" FROM __quesma_table_name - WHERE ("__timestamp">=parseDateTime64BestEffort('2024-04-04T13:18:18.149Z') - AND "__timestamp"<=parseDateTime64BestEffort('2024-04-04T13:33:18.149Z')) + WHERE ("__timestamp">=fromUnixTimestamp64Milli(1712236698149) AND "__timestamp"<=fromUnixTimestamp64Milli(1712237598149)) GROUP BY toInt64(toUnixTimestamp64Milli("__timestamp") / 30000) AS "aggr__2__key_0" ORDER BY "aggr__2__key_0" ASC`, }, diff --git a/quesma/testdata/requests.go b/quesma/testdata/requests.go index f09b3e305..1bf565feb 100644 --- a/quesma/testdata/requests.go +++ b/quesma/testdata/requests.go @@ -156,9 +156,8 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ FROM ( SELECT "host_name" FROM __quesma_table_name - WHERE (("@timestamp">=parseDateTime64BestEffort('2024-01-23T11:27:16.820Z') - AND "@timestamp"<=parseDateTime64BestEffort('2024-01-23T11:42:16.820Z')) AND - "message" iLIKE '%user%') + WHERE (("@timestamp">=fromUnixTimestamp64Milli(1706009236820) AND "@timestamp" + <=fromUnixTimestamp64Milli(1706010136820)) AND "message" iLIKE '%user%') LIMIT 20000) GROUP BY "host_name" AS "aggr__sample__top_values__key_0" ORDER BY "aggr__sample__top_values__count" DESC, @@ -307,16 +306,14 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ []string{ `SELECT "message" FROM __quesma_table_name - WHERE ((("@timestamp">=parseDateTime64BestEffort('2024-01-23T14:43:19.481Z') AND - "@timestamp"<=parseDateTime64BestEffort('2024-01-23T14:58:19.481Z')) AND - "message" iLIKE '%user%') AND "message" IS NOT NULL) + WHERE ((("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481)) + AND "message" iLIKE '%user%') AND "message" IS NOT NULL) ORDER BY "@timestamp" DESC LIMIT 100`, `SELECT count(*) FROM __quesma_table_name - WHERE ((("@timestamp">=parseDateTime64BestEffort('2024-01-23T14:43:19.481Z') - AND "@timestamp"<=parseDateTime64BestEffort('2024-01-23T14:58:19.481Z')) - AND "message" iLIKE '%user%') AND "message" IS NOT NULL)`, + WHERE ((("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481)) + AND "message" iLIKE '%user%') AND "message" IS NOT NULL)`, }, false, }, @@ -557,12 +554,10 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ }`, "Truncated most results. TODO Check what's at the end of response, probably count?", model.HitsCountInfo{Typ: model.ListAllFields, RequestedFields: []string{"*"}, Size: 500}, - []string{ - `SELECT "@timestamp", "host_name", "message", "properties_isreg" + []string{` + SELECT "@timestamp", "host_name", "message", "properties_isreg" FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort( - '2024-01-23T14:43:19.481Z') AND "@timestamp"<=parseDateTime64BestEffort( - '2024-01-23T14:58:19.481Z'))) + WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481))) ORDER BY "@timestamp" DESC LIMIT 500`, }, @@ -702,17 +697,12 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0", count(*) AS "aggr__0__count" FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort( - '2024-01-23T14:43:19.481Z') AND "@timestamp"<=parseDateTime64BestEffort( - '2024-01-23T14:58:19.481Z'))) - GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS - "aggr__0__key_0" + WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481))) + GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0" ORDER BY "aggr__0__key_0" ASC`, `SELECT "@timestamp" FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort( - '2024-01-23T14:43:19.481Z') AND "@timestamp"<=parseDateTime64BestEffort( - '2024-01-23T14:58:19.481Z'))) + WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1706020999481) AND "@timestamp"<=fromUnixTimestamp64Milli(1706021899481))) LIMIT 100`, }, true, @@ -771,8 +761,7 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS "aggr__stats__series__key_0", count(*) AS "aggr__stats__series__count" FROM __quesma_table_name - WHERE ("@timestamp">parseDateTime64BestEffort('2024-01-25T14:53:59.033Z') - AND "@timestamp"<=parseDateTime64BestEffort('2024-01-25T15:08:59.033Z')) + WHERE ("@timestamp">fromUnixTimestamp64Milli(1706194439033) AND "@timestamp"<=fromUnixTimestamp64Milli(1706195339033)) GROUP BY COALESCE("event.dataset", 'unknown') AS "aggr__stats__key_0", toInt64(toUnixTimestamp64Milli("@timestamp") / 60000) AS "aggr__stats__series__key_0")) @@ -902,7 +891,7 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ "filter": [ { "range": { - "epoch_time": { + "@timestamp": { "format": "epoch_millis", "gte": 1710171234276, "lte": 1710172134276 @@ -914,7 +903,7 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ "filter": [ { "range": { - "epoch_time": { + "@timestamp": { "format": "epoch_millis", "gte": 1710171234276, "lte": 1710172134276 @@ -949,15 +938,14 @@ var TestsAsyncSearch = []AsyncSearchTestCase{ ``, "happens e.g. in Explorer > Field Statistics view", model.HitsCountInfo{Typ: model.ListByField, RequestedFields: []string{"properties::isreg"}, Size: 100}, - []string{ - `SELECT "properties_isreg" - FROM __quesma_table_name - WHERE (((toUnixTimestamp64Milli("epoch_time")>=1.710171234276e+12 AND - toUnixTimestamp64Milli("epoch_time")<=1.710172134276e+12) AND ( - toUnixTimestamp64Milli("epoch_time")>=1.710171234276e+12 AND - toUnixTimestamp64Milli("epoch_time")<=1.710172134276e+12)) AND - "properties_isreg" IS NOT NULL) - LIMIT 100`, + []string{` + SELECT "properties_isreg" + FROM __quesma_table_name + WHERE ((("@timestamp">=fromUnixTimestamp64Milli(1710171234276) AND "@timestamp" + <=fromUnixTimestamp64Milli(1710172134276)) AND ("@timestamp">= + fromUnixTimestamp64Milli(1710171234276) AND "@timestamp"<= + fromUnixTimestamp64Milli(1710172134276))) AND "properties_isreg" IS NOT NULL) + LIMIT 100`, }, false, }, @@ -975,7 +963,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{""}, model.ListAllFields, - ////[]model.Query{newSimplestQuery()}, []string{ `SELECT "message" FROM ` + TableName + ` LIMIT 10`, }, @@ -999,7 +986,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"type"='task'`}, model.ListAllFields, - ////[]model.Query{justSimplestWhere(`"type"='task'`)}, []string{ `SELECT "message" FROM ` + TableName + ` WHERE "type"='task' LIMIT 10`, `SELECT count(*) FROM ` + TableName, @@ -1030,9 +1016,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`("type"='task' AND "task.enabled" IN (true,54))`}, model.ListAllFields, - //[]model.Query{ - // justSimplestWhere(`("type"='task' AND "task.enabled" IN (true,54))`), - //}, []string{ `SELECT "message" FROM ` + TableName + ` WHERE ("type"='task' AND "task.enabled" IN (true,54)) LIMIT 10`, `SELECT count(*) FROM ` + TableName, @@ -1070,16 +1053,12 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": true }`, []string{ - `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-17T10:28:18.815Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-17T10:43:18.815Z')))`, + `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705487298815) AND "@timestamp"<=fromUnixTimestamp64Milli(1705488198815)))`, }, model.ListAllFields, - //[]model.Query{ - // justSimplestWhere(`("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-17T10:28:18.815Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-17T10:43:18.815Z')))`), - //}, []string{ `SELECT "message" FROM ` + TableName + ` WHERE ("message" iLIKE '%user%' ` + - `AND ("@timestamp".=parseDateTime64BestEffort('2024-01-17T10:..:18.815Z') ` + - `AND "@timestamp".=parseDateTime64BestEffort('2024-01-17T10:..:18.815Z'))) ` + + `AND ("@timestamp">=fromUnixTimestamp64Milli(1705487298815) AND "@timestamp"<=fromUnixTimestamp64Milli(1705488198815))) ` + `LIMIT 10`, `SELECT count(*) FROM ` + TableName, }, @@ -1116,10 +1095,6 @@ var TestsSearch = []SearchTestCase{ `((("user.id"='kimchy' AND "tags"='production') AND ("tags"='env1' OR "tags"='deployed')) AND NOT (("age">=10 AND "age"<=20)))`, }, model.ListAllFields, - //[]model.Query{ - // justSimplestWhere(`((("user.id"='kimchy' AND "tags"='production') AND ("tags"='env1' OR "tags"='deployed')) AND NOT (("age">=10 AND "age"<=20)))`), - // justSimplestWhere(`((("user.id"='kimchy' AND "tags"='production') AND ("tags"='env1' OR "tags"='deployed')) AND NOT (("age">=10 AND "age"<=20)))`), - //}, []string{ `SELECT "message" FROM ` + TableName + ` WHERE ((("user.id"='kimchy' AND "tags"='production') ` + `AND ("tags"='env1' OR "tags"='deployed')) AND NOT (("age".=.0 AND "age".=.0))) ` + @@ -1158,7 +1133,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"host_name" iLIKE '%prometheus%'`}, model.ListAllFields, - ////[]model.Query{justSimplestWhere(`"host_name" iLIKE '%prometheus%'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "host_name" iLIKE '%prometheus%' LIMIT 10`}, []string{}, }, @@ -1176,7 +1150,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`((("message" iLIKE '%this%' OR "message" iLIKE '%is%') OR "message" iLIKE '%a%') OR "message" iLIKE '%test%')`}, model.ListAllFields, - ////[]model.Query{justSimplestWhere(`((("message" iLIKE '%this%' OR "message" iLIKE '%is%') OR "message" iLIKE '%a%') OR "message" iLIKE '%test%')`)}, []string{ `SELECT "message" FROM ` + TableName + ` WHERE ((("message" iLIKE '%this%' OR "message" iLIKE '%is%') ` + `OR "message" iLIKE '%a%') OR "message" iLIKE '%test%') ` + @@ -1203,7 +1176,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"status"='pending'`}, model.ListAllFields, - ////[]model.Query{justSimplestWhere(`"status"='pending'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "status"='pending'`}, []string{}, }, @@ -1254,9 +1226,6 @@ var TestsSearch = []SearchTestCase{ `(((has("attributes_string_key","namespace") AND "attributes_string_value"[indexOf("attributes_string_key","namespace")] IS NOT NULL) ` + `OR (has("attributes_string_key","namespaces") AND "attributes_string_value"[indexOf("attributes_string_key","namespaces")] IS NOT NULL))))`}, model.ListAllFields, - ////[]model.Query{ - // justSimplestWhere(`("type"='upgrade-assistant-reindex-operation' AND (NOT ((has("attributes_string_key","namespace") AND "attributes_string_value"[indexOf("attributes_string_key","namespace")] IS NOT NULL)) OR NOT ((has("attributes_string_key","namespaces") AND "attributes_string_value"[indexOf("attributes_string_key","namespaces")] IS NOT NULL))))`), - //}, []string{ `SELECT "message" ` + `FROM ` + TableName + ` ` + @@ -1291,7 +1260,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"exception-list-agnostic.list_id" = 'endpoint_event_filters'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"exception-list-agnostic.list_id" = 'endpoint_event_filters'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "exception-list-agnostic.list_id" = 'endpoint_event_filters'`}, []string{}, }, @@ -1319,7 +1287,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{fullTextFieldName + ` = 'ingest-agent-policies'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"message" = 'ingest-agent-policies'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE ` + fullTextFieldName + ` = 'ingest-agent-policies'`}, []string{}, }, @@ -1344,7 +1311,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"task.taskType" iLIKE 'alerting:%'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"task.taskType" iLIKE 'alerting:%'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "task.taskType" iLIKE 'alerting:%'`}, []string{}, }, @@ -1369,7 +1335,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"alert.actions.actionRef" iLIKE 'preconfigured:%'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"alert.actions.actionRef" iLIKE 'preconfigured:%'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "alert.actions.actionRef" iLIKE 'preconfigured:%'`}, []string{}, }, @@ -1385,7 +1350,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"user" iLIKE 'ki%'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"user" iLIKE 'ki%'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "user" iLIKE 'ki%'`}, []string{}, }, @@ -1406,7 +1370,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"message" ILIKE '% logged'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"message" ILIKE '% logged'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "message" ILIKE '% logged'`}, []string{}, }, @@ -1426,7 +1389,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{""}, model.ListAllFields, - //[]model.Query{newSimplestQuery()}, []string{ `SELECT count(*) FROM ` + TableName, `SELECT "message" FROM ` + TableName, @@ -1445,7 +1407,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"message" iLIKE '%this is a test%'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"message" iLIKE '%this is a test%'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "message" iLIKE '%this is a test%'`}, []string{}, }, @@ -1464,7 +1425,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"message" iLIKE '%this is a test%'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"message" iLIKE '%this is a test%'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "message" iLIKE '%this is a test%'`}, []string{}, }, @@ -1498,7 +1458,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"references.type"='tag'`}, model.ListAllFields, - ////[]model.Query{justSimplestWhere(`"references.type"='tag'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "references.type"='tag'`}, []string{}, }, @@ -1562,16 +1521,11 @@ var TestsSearch = []SearchTestCase{ } `, []string{ - `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z')))`, - `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z'))) ` + + `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299)))`, + `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) ` + `AND "stream.namespace" IS NOT NULL)`, }, model.Normal, - ////[]model.Query{ - // justSimplestWhere(`("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z')))`), - //}, []string{}, []string{ `SELECT uniqMerge(uniqState("stream.namespace")) OVER () AS @@ -1581,9 +1535,7 @@ var TestsSearch = []SearchTestCase{ "stream.namespace" AS "aggr__suggestions__key_0", count(*) AS "aggr__suggestions__count" FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort( - '2024-01-22T09:26:10.299Z') AND "@timestamp"<=parseDateTime64BestEffort( - '2024-01-22T09:41:10.299Z'))) + WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) GROUP BY "stream.namespace" AS "aggr__suggestions__key_0" ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC LIMIT 11`, @@ -1656,16 +1608,11 @@ var TestsSearch = []SearchTestCase{ } `, []string{ - `(("service.name"='admin' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T14:34:35.873Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T14:49:35.873Z'))) ` + + `(("service.name"='admin' AND ("@timestamp">=fromUnixTimestamp64Milli(1705934075873) AND "@timestamp"<=fromUnixTimestamp64Milli(1705934975873))) ` + `AND "namespace" IS NOT NULL)`, - `("service.name"='admin' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T14:34:35.873Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T14:49:35.873Z')))`, + `("service.name"='admin' AND ("@timestamp">=fromUnixTimestamp64Milli(1705934075873) AND "@timestamp"<=fromUnixTimestamp64Milli(1705934975873)))`, }, model.Normal, - ////[]model.Query{ - // justSimplestWhere(`("service.name"='admin' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T14:34:35.873Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T14:49:35.873Z')))`), - //}, []string{}, []string{ `SELECT uniqMerge(uniqState("namespace")) OVER () AS "metric__unique_terms_col_0" @@ -1673,9 +1620,7 @@ var TestsSearch = []SearchTestCase{ "namespace" AS "aggr__suggestions__key_0", count(*) AS "aggr__suggestions__count" FROM __quesma_table_name - WHERE ("service.name"='admin' AND ("@timestamp">=parseDateTime64BestEffort( - '2024-01-22T14:34:35.873Z') AND "@timestamp"<=parseDateTime64BestEffort( - '2024-01-22T14:49:35.873Z'))) + WHERE ("service.name"='admin' AND ("@timestamp">=fromUnixTimestamp64Milli(1705934075873) AND "@timestamp"<=fromUnixTimestamp64Milli(1705934975873))) GROUP BY "namespace" AS "aggr__suggestions__key_0" ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC LIMIT 11`, @@ -1743,17 +1688,12 @@ var TestsSearch = []SearchTestCase{ }`, []string{ `(("message" iLIKE '%User logged out%' AND "host.name" iLIKE '%poseidon%') ` + - `AND ("@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')))`, + `AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491)))`, `((("message" iLIKE '%User logged out%' AND "host.name" iLIKE '%poseidon%') ` + - `AND ("@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z'))) ` + + `AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491))) ` + `AND "stream.namespace" IS NOT NULL)`, }, model.Normal, - ////[]model.Query{ - // justSimplestWhere(`(("message" iLIKE '%User logged out%' AND "host.name" iLIKE '%poseidon%') AND ("@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')))`), - //}, []string{}, []string{ `SELECT uniqMerge(uniqState("stream.namespace")) OVER () AS @@ -1764,8 +1704,7 @@ var TestsSearch = []SearchTestCase{ count(*) AS "aggr__suggestions__count" FROM __quesma_table_name WHERE (("message" iLIKE '%User logged out%' AND "host.name" iLIKE '%poseidon%') - AND ("@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') AND - "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z'))) + AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491))) GROUP BY "stream.namespace" AS "aggr__suggestions__key_0" ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC LIMIT 11`, @@ -1829,16 +1768,11 @@ var TestsSearch = []SearchTestCase{ "timeout": "1000ms" }`, []string{ - `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z'))) ` + + `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) ` + `AND "namespace" IS NOT NULL)`, - `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z')))`, + `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299)))`, }, model.Normal, - ////[]model.Query{ - // justSimplestWhere(`("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z')))`), - //}, []string{}, []string{ `SELECT uniqMerge(uniqState("namespace")) OVER () AS "metric__unique_terms_col_0" @@ -1846,9 +1780,7 @@ var TestsSearch = []SearchTestCase{ "namespace" AS "aggr__suggestions__key_0", count(*) AS "aggr__suggestions__count" FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort( - '2024-01-22T09:26:10.299Z') AND "@timestamp"<=parseDateTime64BestEffort( - '2024-01-22T09:41:10.299Z'))) + WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) GROUP BY "namespace" AS "aggr__suggestions__key_0" ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC LIMIT 11`, @@ -1916,17 +1848,12 @@ var TestsSearch = []SearchTestCase{ }`, []string{ `((("message" iLIKE '%User logged out%' AND "host.name" iLIKE '%poseidon%') ` + - `AND ("@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z'))) ` + + `AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491))) ` + `AND "namespace" IS NOT NULL)`, `(("message" iLIKE '%User logged out%' AND "host.name" iLIKE '%poseidon%') ` + - `AND ("@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')))`, + `AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491)))`, }, model.Normal, - ////[]model.Query{ - // justSimplestWhere(`(("message" iLIKE '%User logged out%' AND "host.name" iLIKE '%poseidon%') AND ("@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z')))`), - //}, []string{}, []string{ `SELECT uniqMerge(uniqState("namespace")) OVER () AS "metric__unique_terms_col_0" @@ -1935,8 +1862,7 @@ var TestsSearch = []SearchTestCase{ count(*) AS "aggr__suggestions__count" FROM __quesma_table_name WHERE (("message" iLIKE '%User logged out%' AND "host.name" iLIKE '%poseidon%') - AND ("@timestamp">=parseDateTime64BestEffort('2024-01-29T15:36:36.491Z') AND - "@timestamp"<=parseDateTime64BestEffort('2024-01-29T18:11:36.491Z'))) + AND ("@timestamp">=fromUnixTimestamp64Milli(1706542596491) AND "@timestamp"<=fromUnixTimestamp64Milli(1706551896491))) GROUP BY "namespace" AS "aggr__suggestions__key_0" ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC LIMIT 11`, @@ -2000,16 +1926,10 @@ var TestsSearch = []SearchTestCase{ "timeout": "1000ms" }`, []string{ - `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z'))) ` + - `AND "namespace" IS NOT NULL)`, - `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z')))`, + `((` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) AND "namespace" IS NOT NULL)`, + `(` + fullTextFieldName + ` iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299)))`, }, model.Normal, - ////[]model.Query{ - // justSimplestWhere(`("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-22T09:41:10.299Z')))`), - //}, []string{}, []string{ `SELECT uniqMerge(uniqState("namespace")) OVER () AS "metric__unique_terms_col_0" @@ -2017,9 +1937,7 @@ var TestsSearch = []SearchTestCase{ "namespace" AS "aggr__suggestions__key_0", count(*) AS "aggr__suggestions__count" FROM __quesma_table_name - WHERE ("message" iLIKE '%user%' AND ("@timestamp">=parseDateTime64BestEffort( - '2024-01-22T09:26:10.299Z') AND "@timestamp"<=parseDateTime64BestEffort( - '2024-01-22T09:41:10.299Z'))) + WHERE ("message" iLIKE '%user%' AND ("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp"<=fromUnixTimestamp64Milli(1705916470299))) GROUP BY "namespace" AS "aggr__suggestions__key_0" ORDER BY "aggr__suggestions__count" DESC, "aggr__suggestions__key_0" ASC LIMIT 11`, @@ -2061,7 +1979,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{""}, model.ListByField, - //[]model.Query{withLimit(newSimplestQuery(), 500)}, []string{ `SELECT count(*) FROM ` + TableName, `SELECT "message" FROM ` + TableName + ` LIMIT 500`, @@ -2081,7 +1998,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{``}, model.ListAllFields, - //[]model.Query{justSimplestWhere(``)}, []string{ `SELECT count(*) FROM ` + TableName, `SELECT "message" FROM ` + TableName + ` LIMIT 10`, @@ -2101,7 +2017,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{``}, model.ListAllFields, - //[]model.Query{justSimplestWhere(``)}, []string{ `SELECT "message" FROM ` + TableName + ` LIMIT 10`, }, @@ -2120,7 +2035,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{``}, model.ListAllFields, - //[]model.Query{justSimplestWhere(``)}, []string{`SELECT "message" FROM ` + TableName}, []string{}, }, @@ -2140,7 +2054,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{``}, model.ListAllFields, - //[]model.Query{justSimplestWhere(``)}, []string{ `SELECT count(*) FROM ` + TableName, `SELECT "message" FROM ` + TableName, @@ -2174,7 +2087,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`("message" iLIKE '%User logged out%' AND "message" iLIKE '%User logged out%')`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`("message" iLIKE '%User logged out%' AND "message" iLIKE '%User logged out%')`)}, []string{ `SELECT "message" ` + `FROM ` + TableName + ` ` + @@ -2187,7 +2099,6 @@ var TestsSearch = []SearchTestCase{ `{}`, []string{""}, model.ListAllFields, - //[]model.Query{newSimplestQuery()}, []string{ `SELECT count(*) FROM (SELECT 1 FROM ` + TableName + ` LIMIT 10000)`, `SELECT "message" FROM __quesma_table_name LIMIT 10`, @@ -2209,7 +2120,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"user.id"='kimchy'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"user.id"='kimchy'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "user.id"='kimchy'`}, []string{}, }, @@ -2238,15 +2148,12 @@ var TestsSearch = []SearchTestCase{ "track_total_hits": false }`, []string{ - `("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') AND "@timestamp" = toDateTime64('2024-05-24 13:32:47.307',3))`, + `("@timestamp">=fromUnixTimestamp64Milli(1705915570299) AND "@timestamp" = toDateTime64('2024-05-24 13:32:47.307',3))`, }, model.ListAllFields, - //[]model.Query{ - // justSimplestWhere(`("@timestamp">=parseDateTime64BestEffort('2024-01-22T09:26:10.299Z') AND "@timestamp" = toDateTime64('2024-05-24 13:32:47.307',3))`), - //}, // TestSearchHandler is pretty blunt with config loading so the test below can't be used. // We will probably refactor it as we move forwards with schema which will get even more side-effecting - []string{`SELECT "message" FROM ` + TableName + ` WHERE "@timestamp".=parseDateTime64BestEffort('2024-01-22T09:..:10.299Z')`}, + []string{`SELECT "message" FROM ` + TableName + ` WHERE "@timestamp">=fromUnixTimestamp64Milli(1705915570299)`}, []string{}, }, { // [34] Comments in queries @@ -2263,7 +2170,6 @@ var TestsSearch = []SearchTestCase{ }`, []string{`"user.id"='kimchy'`}, model.ListAllFields, - //[]model.Query{justSimplestWhere(`"user.id"='kimchy'`)}, []string{`SELECT "message" FROM ` + TableName + ` WHERE "user.id"='kimchy'`}, []string{}, }, @@ -2296,15 +2202,14 @@ var TestsSearch = []SearchTestCase{ }, "track_total_hits": false }`, - []string{`("cliIP" IN ('2601:204:c503:c240:9c41:5531:ad94:4d90','50.116.43.98','75.246.0.64') AND ("@timestamp">=parseDateTime64BestEffort('2024-05-16T00:00:00') AND "@timestamp"<=parseDateTime64BestEffort('2024-05-17T23:59:59')))`}, + []string{`("cliIP" IN ('2601:204:c503:c240:9c41:5531:ad94:4d90','50.116.43.98','75.246.0.64') AND ("@timestamp">=fromUnixTimestamp64Milli(1715817600000) AND "@timestamp"<=fromUnixTimestamp64Milli(1715990399000)))`}, model.ListAllFields, //[]model.Query{withLimit(justSimplestWhere(`("cliIP" IN ('2601:204:c503:c240:9c41:5531:ad94:4d90','50.116.43.98','75.246.0.64') AND ("@timestamp">=parseDateTime64BestEffort('2024-05-16T00:00:00') AND "@timestamp"<=parseDateTime64BestEffort('2024-05-17T23:59:59')))`), 1)}, []string{ `SELECT "message" ` + `FROM ` + TableName + ` ` + `WHERE ("cliIP" IN ('2601:204:c503:c240:9c41:5531:ad94:4d90','50.116.43.98','75.246.0.64') ` + - `AND ("@timestamp">=parseDateTime64BestEffort('2024-05-16T00:00:00') ` + - `AND "@timestamp"<=parseDateTime64BestEffort('2024-05-17T23:59:59'))) ` + + `AND ("@timestamp">=fromUnixTimestamp64Milli(1715817600000) AND "@timestamp"<=fromUnixTimestamp64Milli(1715990399000))) ` + `LIMIT 1`, }, []string{}, @@ -2491,16 +2396,12 @@ var TestsSearchNoAttrs = []SearchTestCase{ "track_total_hits": false }`, []string{ - `("@timestamp">=parseDateTime64BestEffort('2024-01-25T13:22:45.968Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-25T13:37:45.968Z'))`, + `("@timestamp">=fromUnixTimestamp64Milli(1706188965968) AND "@timestamp"<=fromUnixTimestamp64Milli(1706189865968))`, }, model.ListAllFields, - //[]model.Query{ - // justSimplestWhere(`("@timestamp">=parseDateTime64BestEffort('2024-01-25T13:22:45.968Z') AND "@timestamp"<=parseDateTime64BestEffort('2024-01-25T13:37:45.968Z'))`), - //}, []string{ `SELECT "message" FROM ` + TableName + ` ` + - `WHERE ((("@timestamp".=parseDateTime64BestEffort('2024-01-25T13:22:45.968Z') ` + - `AND "@timestamp".=parseDateTime64BestEffort('2024-01-25T13:37:45.968Z')) ` + + `WHERE ((("@timestamp">=fromUnixTimestamp64Milli(1706188965968) AND "@timestamp"<=fromUnixTimestamp64Milli(1706189865968)) ` + `AND (has("attributes_string_key","summary") AND "attributes_string_value"[indexOf("attributes_string_key","summary")] IS NOT NULL)) ` + `AND NOT ((has("attributes_string_key","run_once") AND "attributes_string_value"[indexOf("attributes_string_key","run_once")] IS NOT NULL))) ` + `LIMIT 10`, @@ -2548,15 +2449,8 @@ var TestSearchFilter = []SearchTestCase{ "*" ] }`, - []string{ - ``, - ``, - }, + []string{}, model.Normal, - //[]model.Query{ - // justSimplestWhere(``), - // justSimplestWhere(``), - //}, []string{}, []string{ `SELECT toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0" @@ -2612,15 +2506,8 @@ var TestSearchFilter = []SearchTestCase{ ], "track_total_hits": true }`, - []string{ - ``, - ``, - }, + []string{}, model.Normal, - //[]model.Query{ - // justSimplestWhere(``), - // justSimplestWhere(``), - //}, []string{}, []string{ `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", @@ -2634,6 +2521,67 @@ var TestSearchFilter = []SearchTestCase{ }, }, { // [2] + "Range with int timestamps", + `{ + "_source": { + "excludes": [] + }, + "aggs": { + "0": { + "date_histogram": { + "field": "@timestamp", + "fixed_interval": "30s", + "min_doc_count": 1 + } + } + }, + "fields": [ + { + "field": "@timestamp", + "format": "date_time" + } + ], + "query": { + "bool": { + "filter": [ + { + "range": { + "@timestamp": { + "format": "epoch_millis||strict_date_optional_time", + "gte": 1727858503270, + "lte": 1727859403270 + } + } + } + ], + "must": [], + "must_not": [], + "should": [] + } + }, + "runtime_mappings": {}, + "script_fields": {}, + "size": 0, + "stored_fields": [ + "*" + ], + "track_total_hits": true + }`, + []string{}, + model.Normal, + []string{}, + []string{ + `SELECT sum(count(*)) OVER () AS "metric____quesma_total_count_col_0", + toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS "aggr__0__key_0", + count(*) AS "aggr__0__count" + FROM __quesma_table_name + WHERE ("@timestamp">=fromUnixTimestamp64Milli(1727858503270) AND "@timestamp"<=fromUnixTimestamp64Milli(1727859403270)) + GROUP BY toInt64(toUnixTimestamp64Milli("@timestamp") / 30000) AS + "aggr__0__key_0" + ORDER BY "aggr__0__key_0" ASC`, + }, + }, + { // [3] "Empty filter", ` { @@ -2644,13 +2592,12 @@ var TestSearchFilter = []SearchTestCase{ }, "track_total_hits": false }`, - []string{``}, + []string{}, model.Normal, - //[]model.Query{justSimplestWhere(``)}, []string{`SELECT "message" FROM ` + TableName + ` LIMIT 10`}, []string{}, }, - { // [3] + { // [4] "Empty filter with other clauses", ` { @@ -2680,10 +2627,6 @@ var TestSearchFilter = []SearchTestCase{ `("user.id"='kimchy' AND ("tags"='env1' OR "tags"='deployed')) AND NOT ("age">=10 AND "age"<=20)`, }, model.Normal, - //[]model.Query{ - // justSimplestWhere(`("user.id"='kimchy' AND ("tags"='env1' OR "tags"='deployed')) AND NOT ("age"<=20 AND "age">=10)`), - // justSimplestWhere(`("user.id"='kimchy' AND ("tags"='env1' OR "tags"='deployed')) AND NOT ("age">=10 AND "age"<=20)`), - //}, []string{ `SELECT "message" ` + `FROM ` + TableName + ` ` +