From eda1bdeede797db6dc688512f06e6f625e57e0fe Mon Sep 17 00:00:00 2001 From: Krzysztof Kiewicz Date: Mon, 20 May 2024 12:25:10 +0200 Subject: [PATCH] Cleanup --- quesma/model/pipeline_aggregations/max_bucket.go | 7 ++----- quesma/queryparser/aggregation_parser_test.go | 13 +++++-------- quesma/queryparser/lucene/lucene_parser.go | 8 +------- 3 files changed, 8 insertions(+), 20 deletions(-) diff --git a/quesma/model/pipeline_aggregations/max_bucket.go b/quesma/model/pipeline_aggregations/max_bucket.go index d67f706c8..f48ab472c 100644 --- a/quesma/model/pipeline_aggregations/max_bucket.go +++ b/quesma/model/pipeline_aggregations/max_bucket.go @@ -24,7 +24,8 @@ func (query MaxBucket) IsBucketAggregation() bool { } // FIXME I think we should return all rows, not just 1 -// dunno why it's working, maybe I'm wrong +// Dunno why it's working, maybe I'm wrong. +// Let's wait for this until all pipeline merges, when I'll perform some more thorough tests. func (query MaxBucket) TranslateSqlResponseToJson(rows []model.QueryResultRow, level int) []model.JsonMap { if len(rows) == 0 { logger.WarnWithCtx(query.ctx).Msg("no rows returned for max bucket aggregation") @@ -41,10 +42,7 @@ func (query MaxBucket) TranslateSqlResponseToJson(rows []model.QueryResultRow, l } } -// TODO unify with min_bucket, move to common func (query MaxBucket) CalculateResultWhenMissing(qwa *model.Query, parentRows []model.QueryResultRow) []model.QueryResultRow { - fmt.Println("hoho") - fmt.Println(parentRows) resultRows := make([]model.QueryResultRow, 0) if len(parentRows) == 0 { return resultRows // maybe null? @@ -56,7 +54,6 @@ func (query MaxBucket) CalculateResultWhenMissing(qwa *model.Query, parentRows [ for _, parentRowsOneBucket := range qp.SplitResultSetIntoBuckets(parentRows, parentFieldsCnt) { resultRows = append(resultRows, query.calculateSingleMaxBucket(parentRowsOneBucket)) } - fmt.Println("resultRows", resultRows) return resultRows } diff --git a/quesma/queryparser/aggregation_parser_test.go b/quesma/queryparser/aggregation_parser_test.go index ac9749f8f..c5625e22f 100644 --- a/quesma/queryparser/aggregation_parser_test.go +++ b/quesma/queryparser/aggregation_parser_test.go @@ -3,13 +3,10 @@ package queryparser import ( "cmp" "context" - "fmt" "github.com/barkimedes/go-deepcopy" - "github.com/k0kubun/pp" "github.com/stretchr/testify/assert" "mitmproxy/quesma/clickhouse" "mitmproxy/quesma/concurrent" - "mitmproxy/quesma/logger" "mitmproxy/quesma/model" "mitmproxy/quesma/quesma/config" "mitmproxy/quesma/testdata" @@ -545,7 +542,7 @@ func sortAggregations(aggregations []model.Query) { } func Test2AggregationParserExternalTestcases(t *testing.T) { - logger.InitSimpleLoggerForTests() + // logger.InitSimpleLoggerForTests() table := clickhouse.Table{ Cols: map[string]*clickhouse.Column{ "@timestamp": {Name: "@timestamp", Type: clickhouse.NewBaseType("DateTime64")}, @@ -591,7 +588,7 @@ func Test2AggregationParserExternalTestcases(t *testing.T) { // Let's leave those commented debugs for now, they'll be useful in next PRs for j, aggregation := range aggregations { - fmt.Printf("--- Aggregation %d: %+v\n\n---SQL string: %s\n\n", j, aggregation, aggregation.String()) + // fmt.Printf("--- Aggregation %d: %+v\n\n---SQL string: %s\n\n", j, aggregation, aggregation.String()) test.ExpectedResults[j] = aggregation.Type.PostprocessResults(test.ExpectedResults[j]) // fmt.Println("--- Group by: ", aggregation.GroupByFields) if test.ExpectedSQLs[j] != "NoDBQuery" { @@ -604,7 +601,7 @@ func Test2AggregationParserExternalTestcases(t *testing.T) { expectedResultsCopy := deepcopy.MustAnything(test.ExpectedResults).([][]model.QueryResultRow) // pp.Println("EXPECTED", expectedResultsCopy) actualAggregationsPart := cw.MakeAggregationPartOfResponse(aggregations, test.ExpectedResults) - pp.Println("ACTUAL", actualAggregationsPart) + // pp.Println("ACTUAL", actualAggregationsPart) fullResponse, err := cw.MakeResponseAggregationMarshalled(aggregations, expectedResultsCopy) assert.NoError(t, err) @@ -620,8 +617,8 @@ func Test2AggregationParserExternalTestcases(t *testing.T) { // probability and seed are present in random_sampler aggregation. I'd assume they are not needed, thus let's not care about it for now. acceptableDifference := []string{"doc_count_error_upper_bound", "sum_other_doc_count", "probability", "seed", "bg_count", "doc_count"} - pp.Println("ACTUAL", actualMinusExpected) - pp.Println("EXPECTED", expectedMinusActual) + // pp.Println("ACTUAL", actualMinusExpected) + // pp.Println("EXPECTED", expectedMinusActual) assert.True(t, util.AlmostEmpty(actualMinusExpected, acceptableDifference)) assert.True(t, util.AlmostEmpty(expectedMinusActual, acceptableDifference)) assert.Contains(t, string(fullResponse), `"value":`+strconv.FormatUint(test.ExpectedResults[0][0].Cols[0].Value.(uint64), 10)) // checks if hits nr is OK diff --git a/quesma/queryparser/lucene/lucene_parser.go b/quesma/queryparser/lucene/lucene_parser.go index fd50ad156..b2218a39e 100644 --- a/quesma/queryparser/lucene/lucene_parser.go +++ b/quesma/queryparser/lucene/lucene_parser.go @@ -2,7 +2,6 @@ package lucene import ( "context" - "fmt" "math" "mitmproxy/quesma/logger" "slices" @@ -64,7 +63,6 @@ var specialOperators = map[string]token{ } func TranslateToSQL(ctx context.Context, query string, fields []string) string { - fmt.Println(query) parser := newLuceneParser(ctx, fields) return parser.translateToSQL(query) } @@ -207,11 +205,7 @@ func (p *luceneParser) parseRange(query string) (token token, remainingQuery str // e.g. when acceptableCharsAfterNumber = {']', '}'}, then 200} or 200] parses to 200, but parsing 200( fails. func (p *luceneParser) parseNumber(query string, reportErrors bool, acceptableCharsAfterNumber []rune) (number float64, remainingQuery string) { var i, dotCount = 0, 0 - if len(query) > 0 && query[0] == '-' { - i++ - } - fmt.Println("q:", query) - for ; i < len(query); i++ { + for i = 0; i < len(query); i++ { r := rune(query[i]) if r == '.' { dotCount++