Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrading build-image to node 18 #5904

Closed
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
# Changelog

## master / unreleased
* [CHANGE] Ruler: Remove `experimental.ruler.api-enable-rules-backup` flag and use `ruler.ring.replication-factor` to check if rules backup is enabled
* [ENHANCEMENT] Query Frontend/Querier: Added store gateway postings touched count and touched size in Querier stats and log in Query Frontend. #5892
* [CHANGE] Upgrade Dockerfile Node version from 14x to 18x. #5904

## 1.17.0 in progress

Expand Down
2 changes: 1 addition & 1 deletion build-image/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ ARG goproxyValue
ENV GOPROXY=${goproxyValue}
RUN apt-get update && apt-get install -y curl file jq unzip protobuf-compiler libprotobuf-dev && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
RUN curl -sL https://deb.nodesource.com/setup_14.x | bash -
RUN curl -sL https://deb.nodesource.com/setup_18.x | bash -
RUN apt-get install -y nodejs && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*

# Install website builder dependencies. Whenever you change these version, please also change website/package.json
Expand Down
9 changes: 0 additions & 9 deletions docs/configuration/config-file-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -4271,15 +4271,6 @@ ring:
# CLI flag: -experimental.ruler.enable-api
[enable_api: <boolean> | default = false]

# EXPERIMENTAL: Enable rulers to store a copy of rules owned by other rulers
# with default state (state before any evaluation) and send this copy in list
# API requests as backup in case the ruler who owns the rule fails to send its
# rules. This allows the rules API to handle ruler outage by returning rules
# with default state. Ring replication-factor needs to be set to 2 or more for
# this to be useful.
# CLI flag: -experimental.ruler.api-enable-rules-backup
[api_enable_rules_backup: <boolean> | default = false]

# EXPERIMENTAL: Remove duplicate rules in the prometheus rules and alerts API
# response. If there are duplicate rules the rule with the latest evaluation
# timestamp will be kept.
Expand Down
9 changes: 4 additions & 5 deletions integration/ruler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ func TestRulerAPIShardingWithAPIRulesBackupEnabled(t *testing.T) {
testRulerAPIWithSharding(t, true)
}

func testRulerAPIWithSharding(t *testing.T, enableAPIRulesBackup bool) {
func testRulerAPIWithSharding(t *testing.T, enableRulesBackup bool) {
const numRulesGroups = 100

random := rand.New(rand.NewSource(time.Now().UnixNano()))
Expand Down Expand Up @@ -459,9 +459,8 @@ func testRulerAPIWithSharding(t *testing.T, enableAPIRulesBackup bool) {
// Enable the bucket index so we can skip the initial bucket scan.
"-blocks-storage.bucket-store.bucket-index.enabled": "true",
}
if enableAPIRulesBackup {
if enableRulesBackup {
overrides["-ruler.ring.replication-factor"] = "3"
overrides["-experimental.ruler.api-enable-rules-backup"] = "true"
}
rulerFlags := mergeFlags(
BlocksStorageFlags(),
Expand Down Expand Up @@ -556,8 +555,8 @@ func testRulerAPIWithSharding(t *testing.T, enableAPIRulesBackup bool) {
},
}
// For each test case, fetch the rules with configured filters, and ensure the results match.
if enableAPIRulesBackup {
err := ruler2.Kill() // if api-enable-rules-backup is enabled the APIs should be able to handle a ruler going down
if enableRulesBackup {
err := ruler2.Kill() // if rules backup is enabled the APIs should be able to handle a ruler going down
require.NoError(t, err)
}
for name, tc := range testCases {
Expand Down
17 changes: 5 additions & 12 deletions pkg/cortexpb/extensions.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,8 @@ var signerPool = sync.Pool{
}

type signer struct {
h *xxhash.Digest
b []byte
optimized bool
h *xxhash.Digest
b []byte
}

func newSigner() *signer {
Expand All @@ -38,29 +37,23 @@ func newSigner() *signer {
func (s *signer) Reset() {
s.h.Reset()
s.b = s.b[:0]
s.optimized = true
}

func (s *signer) WriteString(val string) {
switch {
case !s.optimized:
_, _ = s.h.WriteString(val)
case len(s.b)+len(val) > cap(s.b):
// If labels val does not fit in the []byte we fall back to not allocate the whole entry.
_, _ = s.h.Write(s.b)
_, _ = s.h.WriteString(val)
s.optimized = false
s.b = s.b[:0]
s.b = append(s.b, val...)
default:
// Use xxhash.Sum64(b) for fast path as it's faster.
s.b = append(s.b, val...)
}
}

func (s *signer) Sum64() uint64 {
if s.optimized {
return xxhash.Sum64(s.b)
}

_, _ = s.h.Write(s.b)
return s.h.Sum64()
}

Expand Down
26 changes: 26 additions & 0 deletions pkg/cortexpb/extensions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,32 @@ import (
"github.com/weaveworks/common/user"
)

func BenchmarkSignRequest(b *testing.B) {
ctx := context.Background()
ctx = user.InjectOrgID(ctx, "user-1")

tests := []struct {
size int
}{
{size: 10},
{size: 100},
{size: 1000},
{size: 10000},
}

for _, tc := range tests {
b.Run(fmt.Sprintf("WriteRequestSize: %v", tc.size), func(b *testing.B) {
wr := createWriteRequest(tc.size, true, "family1", "help1", "unit")
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_, err := wr.Sign(ctx)
require.NoError(b, err)
}
})
}
}

func TestWriteRequest_Sign(t *testing.T) {
ctx := context.Background()
ctx = user.InjectOrgID(ctx, "user-1")
Expand Down
12 changes: 10 additions & 2 deletions pkg/distributor/distributor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,14 @@ var (
emptyResponse = &cortexpb.WriteResponse{}
)

var (
randomStrings = []string{}
)

func init() {
randomStrings = util.GenerateRandomStrings()
}

func TestConfig_Validate(t *testing.T) {
t.Parallel()
tests := map[string]struct {
Expand Down Expand Up @@ -2466,8 +2474,8 @@ func prepare(tb testing.TB, cfg prepConfig) ([]*Distributor, []*mockIngester, []
// Strings to be used for get labels values/Names
var unusedStrings []string
if cfg.lblValuesPerIngester > 0 {
unusedStrings = make([]string, min(len(util.RandomStrings), cfg.numIngesters*cfg.lblValuesPerIngester))
copy(unusedStrings, util.RandomStrings)
unusedStrings = make([]string, min(len(randomStrings), cfg.numIngesters*cfg.lblValuesPerIngester))
copy(unusedStrings, randomStrings)
}
s := &prepState{
unusedStrings: unusedStrings,
Expand Down
7 changes: 7 additions & 0 deletions pkg/frontend/transport/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,8 @@ func (f *Handler) reportQueryStats(r *http.Request, userID string, queryString u
numSamples := stats.LoadFetchedSamples()
numChunkBytes := stats.LoadFetchedChunkBytes()
numDataBytes := stats.LoadFetchedDataBytes()
numStoreGatewayTouchedPostings := stats.LoadStoreGatewayTouchedPostings()
numStoreGatewayTouchedPostingBytes := stats.LoadStoreGatewayTouchedPostingBytes()
splitQueries := stats.LoadSplitQueries()
dataSelectMaxTime := stats.LoadDataSelectMaxTime()
dataSelectMinTime := stats.LoadDataSelectMinTime()
Expand Down Expand Up @@ -334,6 +336,11 @@ func (f *Handler) reportQueryStats(r *http.Request, userID string, queryString u
"response_size", contentLength,
}, stats.LoadExtraFields()...)

if numStoreGatewayTouchedPostings > 0 {
logMessage = append(logMessage, "store_gateway_touched_postings_count", numStoreGatewayTouchedPostings)
logMessage = append(logMessage, "store_gateway_touched_posting_bytes", numStoreGatewayTouchedPostingBytes)
}

grafanaFields := formatGrafanaStatsFields(r)
if len(grafanaFields) > 0 {
logMessage = append(logMessage, grafanaFields...)
Expand Down
17 changes: 17 additions & 0 deletions pkg/frontend/transport/handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -471,6 +471,23 @@ func TestReportQueryStatsFormat(t *testing.T) {
},
expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=0 fetched_series_count=0 fetched_chunks_count=0 fetched_samples_count=0 fetched_chunks_bytes=0 fetched_data_bytes=0 split_queries=0 status_code=200 response_size=1000 data_select_max_time=1704153600 data_select_min_time=1704067200 query_length=2 param_query=up`,
},
"should include query stats with store gateway stats": {
queryStats: &querier_stats.QueryStats{
Stats: querier_stats.Stats{
WallTime: 3 * time.Second,
QueryStorageWallTime: 100 * time.Minute,
FetchedSeriesCount: 100,
FetchedChunksCount: 200,
FetchedSamplesCount: 300,
FetchedChunkBytes: 1024,
FetchedDataBytes: 2048,
SplitQueries: 10,
StoreGatewayTouchedPostingsCount: 20,
StoreGatewayTouchedPostingBytes: 200,
},
},
expectedLog: `level=info msg="query stats" component=query-frontend method=GET path=/prometheus/api/v1/query response_time=1s query_wall_time_seconds=3 fetched_series_count=100 fetched_chunks_count=200 fetched_samples_count=300 fetched_chunks_bytes=1024 fetched_data_bytes=2048 split_queries=10 status_code=200 response_size=1000 store_gateway_touched_postings_count=20 store_gateway_touched_posting_bytes=200 query_storage_wall_time_seconds=6000`,
},
}

for testName, testData := range tests {
Expand Down
2 changes: 2 additions & 0 deletions pkg/querier/blocks_store_queryable.go
Original file line number Diff line number Diff line change
Expand Up @@ -756,6 +756,8 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(
reqStats.AddFetchedSamples(numSamples)
reqStats.AddFetchedChunkBytes(uint64(chunkBytes))
reqStats.AddFetchedDataBytes(uint64(dataBytes))
reqStats.AddStoreGatewayTouchedPostings(uint64(seriesQueryStats.PostingsTouched))
reqStats.AddStoreGatewayTouchedPostingBytes(uint64(seriesQueryStats.PostingsTouchedSizeSum))

level.Debug(spanLog).Log("msg", "received series from store-gateway",
"instance", c.RemoteAddress(),
Expand Down
34 changes: 34 additions & 0 deletions pkg/querier/stats/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,38 @@ func (s *QueryStats) LoadDataSelectMinTime() int64 {
return atomic.LoadInt64(&s.DataSelectMinTime)
}

func (s *QueryStats) AddStoreGatewayTouchedPostings(count uint64) {
if s == nil {
return
}

atomic.AddUint64(&s.StoreGatewayTouchedPostingsCount, count)
}

func (s *QueryStats) LoadStoreGatewayTouchedPostings() uint64 {
if s == nil {
return 0
}

return atomic.LoadUint64(&s.StoreGatewayTouchedPostingsCount)
}

func (s *QueryStats) AddStoreGatewayTouchedPostingBytes(bytes uint64) {
if s == nil {
return
}

atomic.AddUint64(&s.StoreGatewayTouchedPostingBytes, bytes)
}

func (s *QueryStats) LoadStoreGatewayTouchedPostingBytes() uint64 {
if s == nil {
return 0
}

return atomic.LoadUint64(&s.StoreGatewayTouchedPostingBytes)
}

// Merge the provided Stats into this one.
func (s *QueryStats) Merge(other *QueryStats) {
if s == nil || other == nil {
Expand All @@ -283,6 +315,8 @@ func (s *QueryStats) Merge(other *QueryStats) {
s.AddFetchedDataBytes(other.LoadFetchedDataBytes())
s.AddFetchedSamples(other.LoadFetchedSamples())
s.AddFetchedChunks(other.LoadFetchedChunks())
s.AddStoreGatewayTouchedPostings(other.LoadStoreGatewayTouchedPostings())
s.AddStoreGatewayTouchedPostingBytes(other.LoadStoreGatewayTouchedPostingBytes())
s.AddExtraFields(other.LoadExtraFields()...)
}

Expand Down
Loading