From 796b35a6a40ac072570ccff7995f95f1a4b47d0a Mon Sep 17 00:00:00 2001 From: Jian Xiao <99709935+jianoaix@users.noreply.github.com> Date: Wed, 17 Jul 2024 17:08:36 -0700 Subject: [PATCH] Add a metric for DB writing throughput (#644) --- node/metrics.go | 10 ++++++++++ node/store.go | 4 +++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/node/metrics.go b/node/metrics.go index 940b0a1300..1b72e14069 100644 --- a/node/metrics.go +++ b/node/metrics.go @@ -45,6 +45,8 @@ type Metrics struct { EigenMetrics eigenmetrics.Metrics // Reachability gauge to monitoring the reachability of the node's retrieval/dispersal sockets ReachabilityGauge *prometheus.GaugeVec + // The throughput (bytes per second) at which the data is written to database. + DBWriteThroughput prometheus.Gauge registry *prometheus.Registry // socketAddr is the address at which the metrics server will be listening. @@ -139,6 +141,14 @@ func NewMetrics(eigenMetrics eigenmetrics.Metrics, reg *prometheus.Registry, log }, []string{"service"}, ), + DBWriteThroughput: promauto.With(reg).NewGauge( + prometheus.GaugeOpts{ + Namespace: Namespace, + Name: "db_write_throughput_bytes_per_second", + Help: "the throughput (bytes per second) at which the data is written to database", + }, + ), + EigenMetrics: eigenMetrics, logger: logger.With("component", "NodeMetrics"), registry: reg, diff --git a/node/store.go b/node/store.go index 06849a7848..0e38be43d1 100644 --- a/node/store.go +++ b/node/store.go @@ -311,7 +311,9 @@ func (s *Store) StoreBatch(ctx context.Context, header *core.BatchHeader, blobs log.Error("Failed to write the batch into local database:", "err", err) return nil, err } - log.Debug("StoreBatch succeeded", "chunk serialization duration", serializationDuration, "bytes encoding duration", encodingDuration, "write batch duration", time.Since(start), "total store batch duration", time.Since(storeBatchStart), "total bytes", size) + throughput := float64(size) / time.Since(start).Seconds() + s.metrics.DBWriteThroughput.Set(throughput) + log.Debug("StoreBatch succeeded", "chunk serialization duration", serializationDuration, "bytes encoding duration", encodingDuration, "num blobs", len(blobs), "num of key-value pair entries", len(keys), "write batch duration", time.Since(start), "write throughput (MB/s)", throughput/1000_000, "total store batch duration", time.Since(storeBatchStart), "total bytes", size) return &keys, nil }