-
Notifications
You must be signed in to change notification settings - Fork 6
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Testing showed `filters` doesn't currently work with pipeline aggregations. I'll fix that in separate PR Some screen: I have no idea why those 2 different aggregations are of the same color, I didn't find a way to change it. But you can see there's histogram with a) count b) max_bucket count. @ @ <img width="1727" alt="Screenshot 2024-05-20 at 12 45 31" src="https://github.com/QuesmaOrg/quesma/assets/5407146/7951ac97-a513-42b4-b9f6-6136568ef305">
- Loading branch information
Showing
11 changed files
with
1,067 additions
and
50 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,136 @@ | ||
package pipeline_aggregations | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"mitmproxy/quesma/logger" | ||
"mitmproxy/quesma/model" | ||
"mitmproxy/quesma/queryprocessor" | ||
"mitmproxy/quesma/util" | ||
) | ||
|
||
type MaxBucket struct { | ||
ctx context.Context | ||
Parent string | ||
// IsCount bool | ||
} | ||
|
||
func NewMaxBucket(ctx context.Context, bucketsPath string) MaxBucket { | ||
return MaxBucket{ctx: ctx, Parent: parseBucketsPathIntoParentAggregationName(ctx, bucketsPath)} | ||
} | ||
|
||
func (query MaxBucket) IsBucketAggregation() bool { | ||
return false | ||
} | ||
|
||
// FIXME I think we should return all rows, not just 1 | ||
// Dunno why it's working, maybe I'm wrong. | ||
// Let's wait for this until all pipeline merges, when I'll perform some more thorough tests. | ||
func (query MaxBucket) TranslateSqlResponseToJson(rows []model.QueryResultRow, level int) []model.JsonMap { | ||
if len(rows) == 0 { | ||
logger.WarnWithCtx(query.ctx).Msg("no rows returned for max bucket aggregation") | ||
return []model.JsonMap{nil} | ||
} | ||
if len(rows) > 1 { | ||
logger.WarnWithCtx(query.ctx).Msg("more than one row returned for max bucket aggregation") | ||
} | ||
if returnMap, ok := rows[0].LastColValue().(model.JsonMap); ok { | ||
return []model.JsonMap{returnMap} | ||
} else { | ||
logger.WarnWithCtx(query.ctx).Msgf("could not convert value to JsonMap: %v, type: %T", rows[0].LastColValue(), rows[0].LastColValue()) | ||
return []model.JsonMap{nil} | ||
} | ||
} | ||
|
||
func (query MaxBucket) CalculateResultWhenMissing(qwa *model.Query, parentRows []model.QueryResultRow) []model.QueryResultRow { | ||
resultRows := make([]model.QueryResultRow, 0) | ||
if len(parentRows) == 0 { | ||
return resultRows // maybe null? | ||
} | ||
qp := queryprocessor.NewQueryProcessor(query.ctx) | ||
parentFieldsCnt := len(parentRows[0].Cols) - 2 // -2, because row is [parent_cols..., current_key, current_value] | ||
// in calculateSingleAvgBucket we calculate avg all current_keys with the same parent_cols | ||
// so we need to split into buckets based on parent_cols | ||
for _, parentRowsOneBucket := range qp.SplitResultSetIntoBuckets(parentRows, parentFieldsCnt) { | ||
resultRows = append(resultRows, query.calculateSingleMaxBucket(qwa, parentRowsOneBucket)) | ||
} | ||
return resultRows | ||
} | ||
|
||
// we're sure len(parentRows) > 0 | ||
func (query MaxBucket) calculateSingleMaxBucket(qwa *model.Query, parentRows []model.QueryResultRow) model.QueryResultRow { | ||
var resultValue any | ||
var resultKeys []any | ||
|
||
firstNonNilIndex := -1 | ||
for i, row := range parentRows { | ||
if row.LastColValue() != nil { | ||
firstNonNilIndex = i | ||
break | ||
} | ||
} | ||
if firstNonNilIndex == -1 { | ||
resultRow := parentRows[0].Copy() | ||
resultRow.Cols[len(resultRow.Cols)-1].Value = model.JsonMap{ | ||
"value": resultValue, | ||
"keys": resultKeys, | ||
} | ||
return resultRow | ||
} | ||
|
||
if firstRowValueFloat, firstRowValueIsFloat := util.ExtractFloat64Maybe(parentRows[firstNonNilIndex].LastColValue()); firstRowValueIsFloat { | ||
// find max | ||
maxValue := firstRowValueFloat | ||
for _, row := range parentRows[firstNonNilIndex+1:] { | ||
value, ok := util.ExtractFloat64Maybe(row.LastColValue()) | ||
if ok { | ||
maxValue = max(maxValue, value) | ||
} else { | ||
logger.WarnWithCtx(query.ctx).Msgf("could not convert value to float: %v, type: %T. Skipping", row.LastColValue(), row.LastColValue()) | ||
} | ||
} | ||
resultValue = maxValue | ||
// find keys with max value | ||
for _, row := range parentRows[firstNonNilIndex:] { | ||
if value, ok := util.ExtractFloat64Maybe(row.LastColValue()); ok && value == maxValue { | ||
resultKeys = append(resultKeys, getKey(query.ctx, row, qwa)) | ||
} | ||
} | ||
} else if firstRowValueInt, firstRowValueIsInt := util.ExtractInt64Maybe(parentRows[firstNonNilIndex].LastColValue()); firstRowValueIsInt { | ||
// find max | ||
maxValue := firstRowValueInt | ||
for _, row := range parentRows[firstNonNilIndex+1:] { | ||
value, ok := util.ExtractInt64Maybe(row.LastColValue()) | ||
if ok { | ||
maxValue = max(maxValue, value) | ||
} else { | ||
logger.WarnWithCtx(query.ctx).Msgf("could not convert value to float: %v, type: %T. Skipping", row.LastColValue(), row.LastColValue()) | ||
} | ||
} | ||
resultValue = maxValue | ||
// find keys with max value | ||
for _, row := range parentRows[firstNonNilIndex:] { | ||
if value, ok := util.ExtractInt64Maybe(row.LastColValue()); ok && value == maxValue { | ||
resultKeys = append(resultKeys, getKey(query.ctx, row, qwa)) | ||
} | ||
} | ||
} else { | ||
logger.WarnWithCtx(query.ctx).Msgf("could not convert value to float or int: %v, type: %T. Returning nil.", | ||
parentRows[firstNonNilIndex].LastColValue(), parentRows[firstNonNilIndex].LastColValue()) | ||
} | ||
|
||
resultRow := parentRows[0].Copy() | ||
resultRow.Cols[len(resultRow.Cols)-1].Value = model.JsonMap{ | ||
"value": resultValue, | ||
"keys": resultKeys, | ||
} | ||
return resultRow | ||
} | ||
|
||
func (query MaxBucket) PostprocessResults(rowsFromDB []model.QueryResultRow) []model.QueryResultRow { | ||
return rowsFromDB | ||
} | ||
|
||
func (query MaxBucket) String() string { | ||
return fmt.Sprintf("max_bucket(%s)", query.Parent) | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.