diff --git a/pkg/unify-query/Makefile b/pkg/unify-query/Makefile index a0ddf37ad..9e1723ec5 100644 --- a/pkg/unify-query/Makefile +++ b/pkg/unify-query/Makefile @@ -45,8 +45,8 @@ lint: .PHONY: addlicense addlicense: - find ./ -type f \( -iname \*.go -o -iname \*.py -iname \*.sh \)|xargs addlicense -v -f ../../scripts/license.txt -ignore vendor/* + find ./ -type f \( -iname \*.go -o -iname \*.py -iname \*.sh \) | xargs addlicense -v -f ../../scripts/license.txt -ignore vendor/* .PHONY: imports imports: addlicense - goimports-reviser -rm-unused -set-alias -format ./... + goimports-reviser -rm-unused -set-alias -format -project-name "github.com/TencentBlueKing/bkmonitor-datalink/pkg" ./... diff --git a/pkg/unify-query/influxdb/router_mock.go b/pkg/unify-query/influxdb/router_mock.go index f555189c8..31c42425c 100644 --- a/pkg/unify-query/influxdb/router_mock.go +++ b/pkg/unify-query/influxdb/router_mock.go @@ -49,6 +49,12 @@ func MockSpaceRouter(ctx context.Context) { "false": false }, "targeting": [ + { + "query": "spaceUID in [\"bkdata\"]", + "percentage": { + "false": 100 + } + } ], "defaultRule": { "variation": "true" diff --git a/pkg/unify-query/internal/function/function.go b/pkg/unify-query/internal/function/function.go index d7d492f11..907672b10 100644 --- a/pkg/unify-query/internal/function/function.go +++ b/pkg/unify-query/internal/function/function.go @@ -10,6 +10,8 @@ package function import ( + "time" + "github.com/prometheus/prometheus/model/labels" ) @@ -24,3 +26,40 @@ func MatcherToMetricName(matchers ...*labels.Matcher) string { return "" } + +func RangeDateWithUnit(unit string, start, end time.Time, step int) (dates []string) { + var ( + addYear int + addMonth int + addDay int + toDate func(t time.Time) time.Time + format string + ) + + switch unit { + case "year": + addYear = step + format = "2006" + toDate = func(t time.Time) time.Time { + return time.Date(t.Year(), 1, 1, 0, 0, 0, 0, t.Location()) + } + case "month": + addMonth = step + format = "200601" + toDate = func(t time.Time) time.Time { + return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location()) + } + default: + addDay = step + format = "20060102" + toDate = func(t time.Time) time.Time { + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) + } + } + + for d := toDate(start); !d.After(toDate(end)); d = d.AddDate(addYear, addMonth, addDay) { + dates = append(dates, d.Format(format)) + } + + return dates +} diff --git a/pkg/unify-query/metadata/featureFlag.go b/pkg/unify-query/metadata/featureFlag.go index e5d8b1166..fa4ad6d08 100644 --- a/pkg/unify-query/metadata/featureFlag.go +++ b/pkg/unify-query/metadata/featureFlag.go @@ -83,7 +83,8 @@ func GetMustVmQueryFeatureFlag(ctx context.Context, tableID string) bool { span.Set("ff-user-custom", ffUser.GetCustom()) - status := featureFlag.BoolVariation(ctx, ffUser, "must-vm-query", false) + // 如果匹配不到,则默认查询 vm + status := featureFlag.BoolVariation(ctx, ffUser, "must-vm-query", true) // 根据查询时间范围判断是否满足当前时间配置 vmDataTime := featureFlag.IntVariation(ctx, ffUser, "range-vm-query", 0) diff --git a/pkg/unify-query/metadata/featureFlag_test.go b/pkg/unify-query/metadata/featureFlag_test.go index 394c286ab..9b4d1d68f 100644 --- a/pkg/unify-query/metadata/featureFlag_test.go +++ b/pkg/unify-query/metadata/featureFlag_test.go @@ -19,6 +19,51 @@ import ( "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/log" ) +func TestGetBkDataTableIDCheck(t *testing.T) { + ctx := InitHashID(context.Background()) + InitMetadata() + + featureFlag.MockFeatureFlag(ctx, `{ + "bk-data-table-id-auth": { + "variations": { + "Default": true, + "true": true, + "false": false + }, + "targeting": [ + { + "query": "spaceUid in [\"bkdata\", \"bkdata_1\"]", + "percentage": { + "false": 100, + "true": 0 + } + } + ], + "defaultRule": { + "variation": "Default" + } + } +}`) + + var actual bool + + SetUser(ctx, "", "bkdata_1", "") + actual = GetBkDataTableIDCheck(ctx) + assert.Equal(t, false, actual) + + SetUser(ctx, "", "bkmonitor", "") + actual = GetBkDataTableIDCheck(ctx) + assert.Equal(t, true, actual) + + SetUser(ctx, "", "bkdata_1_1", "") + actual = GetBkDataTableIDCheck(ctx) + assert.Equal(t, true, actual) + + SetUser(ctx, "", "bkdata", "") + actual = GetBkDataTableIDCheck(ctx) + assert.Equal(t, false, actual) +} + func TestGetMustVmQueryFeatureFlag(t *testing.T) { ctx := context.Background() diff --git a/pkg/unify-query/tsdb/bksql/format.go b/pkg/unify-query/tsdb/bksql/format.go index b3b91f342..74dde1170 100644 --- a/pkg/unify-query/tsdb/bksql/format.go +++ b/pkg/unify-query/tsdb/bksql/format.go @@ -19,6 +19,7 @@ import ( "github.com/prometheus/prometheus/prompb" + "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/internal/function" "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/metadata" ) @@ -131,6 +132,25 @@ func (f *QueryFactory) ParserQuery() (err error) { return } +func (f *QueryFactory) getTheDateFilters() (theDateFilter string) { + // bkbase 使用 时区东八区 转换为 thedate + loc, _ := time.LoadLocation("Asia/ShangHai") + start := f.start.In(loc) + end := f.end.In(loc) + + dates := function.RangeDateWithUnit("day", start, end, 1) + + if len(dates) == 0 { + return "" + } + + if len(dates) == 1 { + return fmt.Sprintf("`%s` = '%s'", theDate, dates[0]) + } + + return fmt.Sprintf("`%s` >= '%s' AND `%s` <= '%s'", theDate, dates[0], theDate, dates[len(dates)-1]) +} + func (f *QueryFactory) SQL() (sql string, err error) { f.sql.Reset() err = f.ParserQuery() @@ -151,6 +171,13 @@ func (f *QueryFactory) SQL() (sql string, err error) { f.write(db) f.write("WHERE") f.write(fmt.Sprintf("`%s` >= %d AND `%s` < %d", f.timeField, f.start.UnixMilli(), f.timeField, f.end.UnixMilli())) + + theDateFilter := f.getTheDateFilters() + if theDateFilter != "" { + f.write("AND") + f.write(theDateFilter) + } + if f.query.BkSqlCondition != "" { f.write("AND") f.write("(" + f.query.BkSqlCondition + ")") diff --git a/pkg/unify-query/tsdb/bksql/format_test.go b/pkg/unify-query/tsdb/bksql/format_test.go index f5069279d..b9aa19043 100644 --- a/pkg/unify-query/tsdb/bksql/format_test.go +++ b/pkg/unify-query/tsdb/bksql/format_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" + "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/log" "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/metadata" ) @@ -26,6 +27,9 @@ func TestNewSqlFactory(t *testing.T) { for name, c := range map[string]struct { query *metadata.Query expected string + + start time.Time + end time.Time }{ "sum-count_over_time-with-promql-1": { query: &metadata.Query{ @@ -46,7 +50,7 @@ func TestNewSqlFactory(t *testing.T) { Size: 0, Orders: metadata.Orders{"ip": true}, }, - expected: "SELECT `ip`, COUNT(`gseIndex`) AS `_value_`, MAX((`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000))) AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1717144141000 AND `dtEventTimeStamp` < 1717147741000 AND (gseIndex > 0) GROUP BY `ip`, (`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000)) ORDER BY `_timestamp_` ASC, `ip` ASC", + expected: "SELECT `ip`, COUNT(`gseIndex`) AS `_value_`, MAX((`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000))) AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1717144141000 AND `dtEventTimeStamp` < 1717147741000 AND `thedate` = '20240531' AND (gseIndex > 0) GROUP BY `ip`, (`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000)) ORDER BY `_timestamp_` ASC, `ip` ASC", }, "sum-with-promql-1": { query: &metadata.Query{ @@ -66,9 +70,33 @@ func TestNewSqlFactory(t *testing.T) { Size: 10, Orders: nil, }, - expected: "SELECT `ip`, SUM(`gseIndex`) AS `_value_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1717144141000 AND `dtEventTimeStamp` < 1717147741000 AND (gseIndex > 0) GROUP BY `ip` LIMIT 10", + expected: "SELECT `ip`, SUM(`gseIndex`) AS `_value_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1717144141000 AND `dtEventTimeStamp` < 1717147741000 AND `thedate` = '20240531' AND (gseIndex > 0) GROUP BY `ip` LIMIT 10", + }, + "count-with-count-promql-1": { + query: &metadata.Query{ + DB: "100133_ieod_logsearch4_errorlog_p", + Measurement: "doris", + Field: "gseIndex", + Aggregates: metadata.Aggregates{ + { + Name: "count", + Dimensions: []string{ + "ip", + }, + Window: time.Minute, + }, + }, + BkSqlCondition: "gseIndex > 0", + }, + expected: "SELECT `ip`, COUNT(`gseIndex`) AS `_value_`, MAX((`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000))) AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1717144141000 AND `dtEventTimeStamp` < 1717147741000 AND `thedate` = '20240531' AND (gseIndex > 0) GROUP BY `ip`, (`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000)) ORDER BY `_timestamp_` ASC", }, - "count-without-promql-1": { + "count-with-count-promql-2": { + // 2024-12-07 21:36:40 UTC + // 2024-12-08 05:36:40 Asia/ShangHai + start: time.Unix(1733607400, 0), + // 2024-12-11 17:49:35 UTC + // 2024-12-12 01:49:35 Asia/ShangHai + end: time.Unix(1733939375, 0), query: &metadata.Query{ DB: "100133_ieod_logsearch4_errorlog_p", Measurement: "doris", @@ -84,12 +112,20 @@ func TestNewSqlFactory(t *testing.T) { }, BkSqlCondition: "gseIndex > 0", }, - expected: "SELECT `ip`, COUNT(`gseIndex`) AS `_value_`, MAX((`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000))) AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1717144141000 AND `dtEventTimeStamp` < 1717147741000 AND (gseIndex > 0) GROUP BY `ip`, (`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000)) ORDER BY `_timestamp_` ASC", + expected: "SELECT `ip`, COUNT(`gseIndex`) AS `_value_`, MAX((`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000))) AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1733607400000 AND `dtEventTimeStamp` < 1733939375000 AND `thedate` >= '20241208' AND `thedate` <= '20241212' AND (gseIndex > 0) GROUP BY `ip`, (`dtEventTimeStamp` - (`dtEventTimeStamp` % 60000)) ORDER BY `_timestamp_` ASC", }, } { t.Run(name, func(t *testing.T) { ctx := metadata.InitHashID(context.Background()) - fact := NewQueryFactory(ctx, c.query).WithRangeTime(start, end) + if c.start.Unix() == 0 { + c.start = start + } + if c.end.Unix() == 0 { + c.end = end + } + + log.Infof(ctx, "start: %s, end: %s", c.start, c.end) + fact := NewQueryFactory(ctx, c.query).WithRangeTime(c.start, c.end) sql, err := fact.SQL() assert.Nil(t, err) assert.Equal(t, c.expected, sql) diff --git a/pkg/unify-query/tsdb/bksql/instance_test.go b/pkg/unify-query/tsdb/bksql/instance_test.go index 194d0a6ea..080a2caae 100644 --- a/pkg/unify-query/tsdb/bksql/instance_test.go +++ b/pkg/unify-query/tsdb/bksql/instance_test.go @@ -126,6 +126,8 @@ func TestInstance_bkSql(t *testing.T) { end := time.UnixMilli(1718193555000) testCases := []struct { + start time.Time + end time.Time query *metadata.Query expected string @@ -143,7 +145,7 @@ func TestInstance_bkSql(t *testing.T) { }, }, }, - expected: "SELECT `namespace`, COUNT(`login_rate`) AS `_value_`, MAX((`dtEventTimeStamp` - (`dtEventTimeStamp` % 15000))) AS `_timestamp_` FROM `132_lol_new_login_queue_login_1min` WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 AND (namespace REGEXP '^(bgp2\\-new|gz100)$') GROUP BY `namespace`, (`dtEventTimeStamp` - (`dtEventTimeStamp` % 15000)) ORDER BY `_timestamp_` ASC", + expected: "SELECT `namespace`, COUNT(`login_rate`) AS `_value_`, MAX((`dtEventTimeStamp` - (`dtEventTimeStamp` % 15000))) AS `_timestamp_` FROM `132_lol_new_login_queue_login_1min` WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 AND `thedate` = '20240612' AND (namespace REGEXP '^(bgp2\\-new|gz100)$') GROUP BY `namespace`, (`dtEventTimeStamp` - (`dtEventTimeStamp` % 15000)) ORDER BY `_timestamp_` ASC", }, { query: &metadata.Query{ @@ -156,7 +158,7 @@ func TestInstance_bkSql(t *testing.T) { }, }, - expected: "SELECT SUM(`value`) AS `_value_` FROM `132_hander_opmon_avg` WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000", + expected: "SELECT SUM(`value`) AS `_value_` FROM `132_hander_opmon_avg` WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 AND `thedate` = '20240612'", }, { query: &metadata.Query{ @@ -165,7 +167,7 @@ func TestInstance_bkSql(t *testing.T) { Field: "value", Size: 5, }, - expected: "SELECT *, `value` AS `_value_`, `dtEventTimeStamp` AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 LIMIT 5", + expected: "SELECT *, `value` AS `_value_`, `dtEventTimeStamp` AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 AND `thedate` = '20240612' LIMIT 5", }, { query: &metadata.Query{ @@ -176,7 +178,7 @@ func TestInstance_bkSql(t *testing.T) { "_time": false, }, }, - expected: "SELECT *, `value` AS `_value_`, `dtEventTimeStamp` AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 ORDER BY `_timestamp_` DESC", + expected: "SELECT *, `value` AS `_value_`, `dtEventTimeStamp` AS `_timestamp_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 AND `thedate` = '20240612' ORDER BY `_timestamp_` DESC", }, { query: &metadata.Query{ @@ -194,14 +196,52 @@ func TestInstance_bkSql(t *testing.T) { Size: 5, }, - expected: "SELECT `ip`, COUNT(`gseIndex`) AS `_value_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 GROUP BY `ip` LIMIT 5", + expected: "SELECT `ip`, COUNT(`gseIndex`) AS `_value_` FROM `100133_ieod_logsearch4_errorlog_p`.doris WHERE `dtEventTimeStamp` >= 1718189940000 AND `dtEventTimeStamp` < 1718193555000 AND `thedate` = '20240612' GROUP BY `ip` LIMIT 5", + }, + { + start: time.Unix(1733756400, 0), + end: time.Unix(1733846399, 0), + query: &metadata.Query{ + DB: "101068_MatchFullLinkTimeConsumptionFlow_CostTime", + Field: "matchstep_start_to_fail_0_100", + Aggregates: metadata.Aggregates{ + { + Name: "count", + }, + }, + }, + + expected: "SELECT COUNT(`matchstep_start_to_fail_0_100`) AS `_value_` FROM `101068_MatchFullLinkTimeConsumptionFlow_CostTime` WHERE `dtEventTimeStamp` >= 1733756400000 AND `dtEventTimeStamp` < 1733846399000 AND `thedate` >= '20241209' AND `thedate` <= '20241210'", + }, + { + start: time.Unix(1733756400, 0), + end: time.Unix(1733846399, 0), + query: &metadata.Query{ + DB: "101068_MatchFullLinkTimeConsumptionFlow_CostTime", + Field: "matchstep_start_to_fail_0_100", + Aggregates: metadata.Aggregates{ + { + Name: "count", + Window: time.Hour, + }, + }, + }, + + expected: "SELECT COUNT(`matchstep_start_to_fail_0_100`) AS `_value_`, MAX((`dtEventTimeStamp` - (`dtEventTimeStamp` % 3600000))) AS `_timestamp_` FROM `101068_MatchFullLinkTimeConsumptionFlow_CostTime` WHERE `dtEventTimeStamp` >= 1733756400000 AND `dtEventTimeStamp` < 1733846399000 AND `thedate` >= '20241209' AND `thedate` <= '20241210' GROUP BY (`dtEventTimeStamp` - (`dtEventTimeStamp` % 3600000)) ORDER BY `_timestamp_` ASC", }, } for i, c := range testCases { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { ctx := metadata.InitHashID(context.Background()) - sql, err := NewQueryFactory(ctx, c.query).WithRangeTime(start, end).SQL() + if c.start.Unix() == 0 { + c.start = start + } + if c.end.Unix() == 0 { + c.end = end + } + + sql, err := NewQueryFactory(ctx, c.query).WithRangeTime(c.start, c.end).SQL() assert.Nil(t, err) if err == nil { assert.Equal(t, c.expected, sql) diff --git a/pkg/unify-query/tsdb/elasticsearch/instance.go b/pkg/unify-query/tsdb/elasticsearch/instance.go index 84cdc2092..4bfdc7ecf 100644 --- a/pkg/unify-query/tsdb/elasticsearch/instance.go +++ b/pkg/unify-query/tsdb/elasticsearch/instance.go @@ -29,6 +29,7 @@ import ( "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/consul" "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/influxdb/decoder" + "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/internal/function" "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/log" "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/metadata" "github.com/TencentBlueKing/bkmonitor-datalink/pkg/unify-query/metric" @@ -471,15 +472,6 @@ func (i *Instance) mergeTimeSeries(rets chan *TimeSeriesResult) (*prompb.QueryRe return qr, nil } -func timeToDate(t time.Time, unit string) time.Time { - switch unit { - case "month": - return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location()) - default: - return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) - } -} - func (i *Instance) getAlias(ctx context.Context, db string, needAddTime bool, start, end time.Time, timezone string) ([]string, error) { var ( aliases []string @@ -512,12 +504,7 @@ func (i *Instance) getAlias(ctx context.Context, db string, needAddTime bool, st span.Set("left", left) var ( - dateFormat string - addDay int - addMonth int - - startTime time.Time - endTime time.Time + unit string ) if left > int64(time.Hour.Seconds()*24*14) { @@ -526,21 +513,17 @@ func (i *Instance) getAlias(ctx context.Context, db string, needAddTime bool, st start = end.Add(halfYear * -1) } - startTime = timeToDate(start, "month") - endTime = timeToDate(end, "month") - dateFormat = "200601" - addMonth = 1 + unit = "month" } else { - startTime = timeToDate(start, "day") - endTime = timeToDate(end, "day") - dateFormat = "20060102" - addDay = 1 + unit = "day" } newAliases := make([]string, 0) - for d := startTime; !d.After(endTime); d = d.AddDate(0, addMonth, addDay) { + dates := function.RangeDateWithUnit(unit, start, end, 1) + + for _, d := range dates { for _, alias := range aliases { - newAliases = append(newAliases, fmt.Sprintf("%s_%s*", alias, d.Format(dateFormat))) + newAliases = append(newAliases, fmt.Sprintf("%s_%s*", alias, d)) } }